diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b37da533fa3b..5b64db236b60 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -132,6 +132,250 @@ } } }, + "/api/v1/cluster-workflow-templates": { + "get": { + "tags": [ + "ClusterWorkflowTemplateService" + ], + "operationId": "ListClusterWorkflowTemplates", + "parameters": [ + { + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "name": "listOptions.labelSelector", + "in": "query" + }, + { + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "name": "listOptions.fieldSelector", + "in": "query" + }, + { + "type": "boolean", + "format": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "name": "listOptions.watch", + "in": "query" + }, + { + "type": "boolean", + "format": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional", + "name": "listOptions.allowWatchBookmarks", + "in": "query" + }, + { + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional.", + "name": "listOptions.resourceVersion", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "name": "listOptions.timeoutSeconds", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "name": "listOptions.limit", + "in": "query" + }, + { + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "name": "listOptions.continue", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateList" + } + } + } + }, + "post": { + "tags": [ + "ClusterWorkflowTemplateService" + ], + "operationId": "CreateClusterWorkflowTemplate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + } + } + }, + "/api/v1/cluster-workflow-templates/lint": { + "post": { + "tags": [ + "ClusterWorkflowTemplateService" + ], + "operationId": "LintClusterWorkflowTemplate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateLintRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + } + } + }, + "/api/v1/cluster-workflow-templates/{name}": { + "get": { + "tags": [ + "ClusterWorkflowTemplateService" + ], + "operationId": "GetClusterWorkflowTemplate", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", + "name": "getOptions.resourceVersion", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + } + }, + "put": { + "tags": [ + "ClusterWorkflowTemplateService" + ], + "operationId": "UpdateClusterWorkflowTemplate", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateUpdateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + } + }, + "delete": { + "tags": [ + "ClusterWorkflowTemplateService" + ], + "operationId": "DeleteClusterWorkflowTemplate", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "int64", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", + "name": "deleteOptions.gracePeriodSeconds", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the target UID.\n+optional.", + "name": "deleteOptions.preconditions.uid", + "in": "query" + }, + { + "type": "string", + "description": "Specifies the target ResourceVersion\n+optional.", + "name": "deleteOptions.preconditions.resourceVersion", + "in": "query" + }, + { + "type": "boolean", + "format": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", + "name": "deleteOptions.orphanDependents", + "in": "query" + }, + { + "type": "string", + "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", + "name": "deleteOptions.propagationPolicy", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", + "name": "deleteOptions.dryRun", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateDeleteResponse" + } + } + } + } + }, "/api/v1/cron-workflows/{namespace}": { "get": { "tags": [ @@ -1368,6 +1612,42 @@ } }, "definitions": { + "clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateCreateRequest": { + "type": "object", + "properties": { + "createOptions": { + "$ref": "#/definitions/io.k8s.api.core.v1.CreateOptions" + }, + "template": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateDeleteResponse": { + "type": "object" + }, + "clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateLintRequest": { + "type": "object", + "properties": { + "createOptions": { + "$ref": "#/definitions/io.k8s.api.core.v1.CreateOptions" + }, + "template": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "clusterio.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateUpdateRequest": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "template": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, "cronio.argoproj.workflow.v1alpha1.CreateCronWorkflowRequest": { "type": "object", "properties": { @@ -1578,6 +1858,33 @@ } } }, + "io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate": { + "type": "object", + "title": "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope\n+genclient\n+genclient:noStatus\n+genclient:nonNamespaced\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateSpec" + } + } + }, + "io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateList": { + "type": "object", + "title": "ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate" + } + }, + "metadata": { + "$ref": "#/definitions/io.k8s.api.core.v1.ListMeta" + } + } + }, "io.argoproj.workflow.v1alpha1.ContinueOn": { "description": "ContinueOn defines if a workflow should continue even if a task or step fails/errors.\nIt can be specified if the workflow should continue when the pod errors, fails or both.", "type": "object", @@ -2710,6 +3017,11 @@ "description": "TemplateRef is a reference of template resource.", "type": "object", "properties": { + "clusterscope": { + "description": "ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate).", + "type": "boolean", + "format": "boolean" + }, "name": { "description": "Name is the resource name of the template.", "type": "string" @@ -5930,31 +6242,5 @@ "description": "HTTP Basic authentication", "type": "basic" } - }, - "x-stream-definitions": { - "io.argoproj.workflow.v1alpha1.LogEntry": { - "properties": { - "error": { - "$ref": "#/definitions/grpc.gateway.runtime.StreamError" - }, - "result": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.LogEntry" - } - }, - "title": "Stream result of io.argoproj.workflow.v1alpha1.LogEntry", - "type": "object" - }, - "io.argoproj.workflow.v1alpha1.WorkflowWatchEvent": { - "properties": { - "error": { - "$ref": "#/definitions/grpc.gateway.runtime.StreamError" - }, - "result": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowWatchEvent" - } - }, - "title": "Stream result of io.argoproj.workflow.v1alpha1.WorkflowWatchEvent", - "type": "object" - } } } diff --git a/cmd/argo/commands/clustertemplate/create.go b/cmd/argo/commands/clustertemplate/create.go new file mode 100644 index 000000000000..b268dbdde0a3 --- /dev/null +++ b/cmd/argo/commands/clustertemplate/create.go @@ -0,0 +1,96 @@ +package clustertemplate + +import ( + "log" + "os" + + "github.com/argoproj/pkg/json" + "github.com/spf13/cobra" + + "github.com/argoproj/argo/cmd/argo/commands/client" + "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/common" + "github.com/argoproj/argo/workflow/util" +) + +type cliCreateOpts struct { + output string // --output + strict bool // --strict +} + +func NewCreateCommand() *cobra.Command { + var ( + cliCreateOpts cliCreateOpts + ) + var command = &cobra.Command{ + Use: "create FILE1 FILE2...", + Short: "create a cluster workflow template", + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + cmd.HelpFunc()(cmd, args) + os.Exit(1) + } + + createClusterWorkflowTemplates(args, &cliCreateOpts) + }, + } + command.Flags().StringVarP(&cliCreateOpts.output, "output", "o", "", "Output format. One of: name|json|yaml|wide") + command.Flags().BoolVar(&cliCreateOpts.strict, "strict", true, "perform strict workflow validation") + return command +} + +func createClusterWorkflowTemplates(filePaths []string, cliOpts *cliCreateOpts) { + if cliOpts == nil { + cliOpts = &cliCreateOpts{} + } + ctx, apiClient := client.NewAPIClient() + serviceClient := apiClient.NewClusterWorkflowTemplateServiceClient() + + fileContents, err := util.ReadManifest(filePaths...) + if err != nil { + log.Fatal(err) + } + + var clusterWorkflowTemplates []wfv1.ClusterWorkflowTemplate + for _, body := range fileContents { + cwftmpls, err := unmarshalClusterWorkflowTemplates(body, cliOpts.strict) + if err != nil { + log.Fatalf("Failed to parse cluster workflow template: %v", err) + } + clusterWorkflowTemplates = append(clusterWorkflowTemplates, cwftmpls...) + } + + if len(clusterWorkflowTemplates) == 0 { + log.Println("No cluster workflow template found in given files") + os.Exit(1) + } + + for _, wftmpl := range clusterWorkflowTemplates { + created, err := serviceClient.CreateClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateCreateRequest{ + Template: &wftmpl, + }) + if err != nil { + log.Fatalf("Failed to create cluster workflow template: %s, %v", wftmpl.Name, err) + } + printClusterWorkflowTemplate(created, cliOpts.output) + } +} + +// unmarshalClusterWorkflowTemplates unmarshals the input bytes as either json or yaml +func unmarshalClusterWorkflowTemplates(wfBytes []byte, strict bool) ([]wfv1.ClusterWorkflowTemplate, error) { + var cwft wfv1.ClusterWorkflowTemplate + var jsonOpts []json.JSONOpt + if strict { + jsonOpts = append(jsonOpts, json.DisallowUnknownFields) + } + err := json.Unmarshal(wfBytes, &cwft, jsonOpts...) + if err == nil { + return []wfv1.ClusterWorkflowTemplate{cwft}, nil + } + yamlWfs, err := common.SplitClusterWorkflowTemplateYAMLFile(wfBytes, strict) + if err == nil { + return yamlWfs, nil + } + return nil, err +} diff --git a/cmd/argo/commands/clustertemplate/create_test.go b/cmd/argo/commands/clustertemplate/create_test.go new file mode 100644 index 000000000000..19ea43a828e2 --- /dev/null +++ b/cmd/argo/commands/clustertemplate/create_test.go @@ -0,0 +1,47 @@ +package clustertemplate + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const cwfts = ` +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-whalesay-template +spec: + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{inputs.parameters.message}}"] +--- +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-whalesay-template +spec: + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{inputs.parameters.message}}"] +` + +func TestUnmarshalCWFT(t *testing.T) { + + clusterwfts, err := unmarshalClusterWorkflowTemplates([]byte(cwfts), false) + if assert.NoError(t, err) { + assert.Equal(t, 2, len(clusterwfts)) + } +} diff --git a/cmd/argo/commands/clustertemplate/delete.go b/cmd/argo/commands/clustertemplate/delete.go new file mode 100644 index 000000000000..7ff1736b596f --- /dev/null +++ b/cmd/argo/commands/clustertemplate/delete.go @@ -0,0 +1,53 @@ +package clustertemplate + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/argoproj/pkg/errors" + + "github.com/argoproj/argo/cmd/argo/commands/client" + "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" +) + +// NewDeleteCommand returns a new instance of an `argo delete` command +func NewDeleteCommand() *cobra.Command { + var ( + all bool + ) + + var command = &cobra.Command{ + Use: "delete WORKFLOW_TEMPLATE", + Short: "delete a cluster workflow template", + Run: func(cmd *cobra.Command, args []string) { + apiServerDeleteClusterWorkflowTemplates(all, args) + }, + } + + command.Flags().BoolVar(&all, "all", false, "Delete all cluster workflow templates") + return command +} + +func apiServerDeleteClusterWorkflowTemplates(allWFs bool, wfTmplNames []string) { + ctx, apiClient := client.NewAPIClient() + serviceClient := apiClient.NewClusterWorkflowTemplateServiceClient() + var delWFTmplNames []string + if allWFs { + cwftmplList, err := serviceClient.ListClusterWorkflowTemplates(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateListRequest{}) + errors.CheckError(err) + for _, cwfTmpl := range cwftmplList.Items { + delWFTmplNames = append(delWFTmplNames, cwfTmpl.Name) + } + + } else { + delWFTmplNames = wfTmplNames + } + for _, cwfTmplName := range delWFTmplNames { + _, err := serviceClient.DeleteClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateDeleteRequest{ + Name: cwfTmplName, + }) + errors.CheckError(err) + fmt.Printf("ClusterWorkflowTemplate '%s' deleted\n", cwfTmplName) + } +} diff --git a/cmd/argo/commands/clustertemplate/get.go b/cmd/argo/commands/clustertemplate/get.go new file mode 100644 index 000000000000..cb68bf720622 --- /dev/null +++ b/cmd/argo/commands/clustertemplate/get.go @@ -0,0 +1,66 @@ +package clustertemplate + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/spf13/cobra" + "sigs.k8s.io/yaml" + + "github.com/argoproj/pkg/humanize" + + "github.com/argoproj/argo/cmd/argo/commands/client" + clusterworkflowtmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" +) + +func NewGetCommand() *cobra.Command { + var ( + output string + ) + + var command = &cobra.Command{ + Use: "get CLUSTER WORKFLOW_TEMPLATE...", + Short: "display details about a cluster workflow template", + Run: func(cmd *cobra.Command, args []string) { + ctx, apiClient := client.NewAPIClient() + serviceClient := apiClient.NewClusterWorkflowTemplateServiceClient() + for _, name := range args { + wftmpl, err := serviceClient.GetClusterWorkflowTemplate(ctx, &clusterworkflowtmplpkg.ClusterWorkflowTemplateGetRequest{ + Name: name, + }) + if err != nil { + log.Fatal(err) + } + printClusterWorkflowTemplate(wftmpl, output) + } + }, + } + + command.Flags().StringVarP(&output, "output", "o", "", "Output format. One of: json|yaml|wide") + return command +} + +func printClusterWorkflowTemplate(wf *wfv1.ClusterWorkflowTemplate, outFmt string) { + switch outFmt { + case "name": + fmt.Println(wf.ObjectMeta.Name) + case "json": + outBytes, _ := json.MarshalIndent(wf, "", " ") + fmt.Println(string(outBytes)) + case "yaml": + outBytes, _ := yaml.Marshal(wf) + fmt.Print(string(outBytes)) + case "wide", "": + printClusterWorkflowTemplateHelper(wf) + default: + log.Fatalf("Unknown output format: %s", outFmt) + } +} + +func printClusterWorkflowTemplateHelper(wf *wfv1.ClusterWorkflowTemplate) { + const fmtStr = "%-20s %v\n" + fmt.Printf(fmtStr, "Name:", wf.ObjectMeta.Name) + fmt.Printf(fmtStr, "Created:", humanize.Timestamp(wf.ObjectMeta.CreationTimestamp.Time)) +} diff --git a/cmd/argo/commands/clustertemplate/lint.go b/cmd/argo/commands/clustertemplate/lint.go new file mode 100644 index 000000000000..cd1ee7fdf666 --- /dev/null +++ b/cmd/argo/commands/clustertemplate/lint.go @@ -0,0 +1,67 @@ +package clustertemplate + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/argoproj/pkg/errors" + + "github.com/argoproj/argo/cmd/argo/commands/client" + "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + "github.com/argoproj/argo/workflow/validate" +) + +func NewLintCommand() *cobra.Command { + var ( + strict bool + ) + var command = &cobra.Command{ + Use: "lint FILE...", + Short: "validate files or directories of cluster workflow template manifests", + Run: func(cmd *cobra.Command, args []string) { + ctx, apiClient := client.NewAPIClient() + serviceClient := apiClient.NewClusterWorkflowTemplateServiceClient() + + lint := func(file string) error { + cwfTmpls, err := validate.ParseCWfTmplFromFile(file, strict) + if err != nil { + return err + } + for _, cfwft := range cwfTmpls { + _, err := serviceClient.LintClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateLintRequest{Template: &cfwft}) + if err != nil { + return err + } + } + fmt.Printf("%s is valid\n", file) + return nil + } + + for _, file := range args { + stat, err := os.Stat(file) + errors.CheckError(err) + if stat.IsDir() { + err := filepath.Walk(file, func(path string, info os.FileInfo, err error) error { + fileExt := filepath.Ext(info.Name()) + switch fileExt { + case ".yaml", ".yml", ".json": + default: + return nil + } + return lint(path) + }) + errors.CheckError(err) + } else { + err := lint(file) + errors.CheckError(err) + } + } + fmt.Printf("Cluster Workflow Template manifests validated\n") + }, + } + command.Flags().BoolVar(&strict, "strict", true, "perform strict workflow validation") + return command +} diff --git a/cmd/argo/commands/clustertemplate/list.go b/cmd/argo/commands/clustertemplate/list.go new file mode 100644 index 000000000000..f4b3bdf538d9 --- /dev/null +++ b/cmd/argo/commands/clustertemplate/list.go @@ -0,0 +1,61 @@ +package clustertemplate + +import ( + "fmt" + "log" + "os" + "text/tabwriter" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo/cmd/argo/commands/client" + "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" +) + +type listFlags struct { + output string // --output +} + +func NewListCommand() *cobra.Command { + var ( + listArgs listFlags + ) + var command = &cobra.Command{ + Use: "list", + Short: "list cluster workflow templates", + Run: func(cmd *cobra.Command, args []string) { + ctx, apiClient := client.NewAPIClient() + serviceClient := apiClient.NewClusterWorkflowTemplateServiceClient() + + cwftmplList, err := serviceClient.ListClusterWorkflowTemplates(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateListRequest{}) + if err != nil { + log.Fatal(err) + } + switch listArgs.output { + case "", "wide": + printTable(cwftmplList.Items) + case "name": + for _, cwftmp := range cwftmplList.Items { + fmt.Println(cwftmp.ObjectMeta.Name) + } + default: + log.Fatalf("Unknown output mode: %s", listArgs.output) + } + + }, + } + command.Flags().StringVarP(&listArgs.output, "output", "o", "", "Output format. One of: wide|name") + return command +} + +func printTable(wfList []wfv1.ClusterWorkflowTemplate) { + w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0) + _, _ = fmt.Fprint(w, "NAME") + _, _ = fmt.Fprint(w, "\n") + for _, wf := range wfList { + _, _ = fmt.Fprintf(w, "%s\t", wf.ObjectMeta.Name) + _, _ = fmt.Fprintf(w, "\n") + } + _ = w.Flush() +} diff --git a/cmd/argo/commands/clustertemplate/root.go b/cmd/argo/commands/clustertemplate/root.go new file mode 100644 index 000000000000..2ea8a972737e --- /dev/null +++ b/cmd/argo/commands/clustertemplate/root.go @@ -0,0 +1,24 @@ +package clustertemplate + +import ( + "github.com/spf13/cobra" +) + +func NewClusterTemplateCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "cluster-template", + Aliases: []string{"cwftmpl", "cwft"}, + Short: "manipulate cluster workflow templates", + Run: func(cmd *cobra.Command, args []string) { + cmd.HelpFunc()(cmd, args) + }, + } + + command.AddCommand(NewGetCommand()) + command.AddCommand(NewListCommand()) + command.AddCommand(NewCreateCommand()) + command.AddCommand(NewDeleteCommand()) + command.AddCommand(NewLintCommand()) + + return command +} diff --git a/cmd/argo/commands/root.go b/cmd/argo/commands/root.go index f3dbee61dfa1..9a4ea879fd9c 100644 --- a/cmd/argo/commands/root.go +++ b/cmd/argo/commands/root.go @@ -6,6 +6,8 @@ import ( "github.com/argoproj/pkg/cli" "github.com/spf13/cobra" + "github.com/argoproj/argo/cmd/argo/commands/clustertemplate" + "github.com/argoproj/argo/cmd/argo/commands/auth" "github.com/argoproj/argo/cmd/argo/commands/cron" "github.com/argoproj/argo/util/help" @@ -54,6 +56,8 @@ If you're using the Argo Server (e.g. because you need large workflow support or command.AddCommand(cmd.NewVersionCmd(CLIName)) command.AddCommand(template.NewTemplateCommand()) command.AddCommand(cron.NewCronWorkflowCommand()) + command.AddCommand(clustertemplate.NewClusterTemplateCommand()) + client.AddKubectlFlagsToCmd(command) client.AddArgoServerFlagsToCmd(command) diff --git a/cmd/argo/commands/submit.go b/cmd/argo/commands/submit.go index fc4eaa8bb3d7..c159e37acaec 100644 --- a/cmd/argo/commands/submit.go +++ b/cmd/argo/commands/submit.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo/cmd/argo/commands/client" + clusterworkflowtmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" cronworkflowpkg "github.com/argoproj/argo/pkg/apiclient/cronworkflow" workflowpkg "github.com/argoproj/argo/pkg/apiclient/workflow" workflowtemplatepkg "github.com/argoproj/argo/pkg/apiclient/workflowtemplate" @@ -134,6 +135,15 @@ func submitWorkflowFromResource(resourceIdentifier string, submitOpts *util.Subm log.Fatalf("Unable to get workflow template '%s': %s", name, err) } workflowToSubmit = common.ConvertWorkflowTemplateToWorkflow(template) + case workflow.ClusterWorkflowTemplateKind, workflow.ClusterWorkflowTemplateSingular, workflow.ClusterWorkflowTemplatePlural, workflow.ClusterWorkflowTemplateShortName: + serviceClient := apiClient.NewClusterWorkflowTemplateServiceClient() + template, err := serviceClient.GetClusterWorkflowTemplate(ctx, &clusterworkflowtmplpkg.ClusterWorkflowTemplateGetRequest{ + Name: name, + }) + if err != nil { + log.Fatalf("Unable to get cluster workflow template '%s': %s", name, err) + } + workflowToSubmit = common.ConvertClusterWorkflowTemplateToWorkflow(template) default: log.Fatalf("Resource kind '%s' is not supported with --from", kind) } diff --git a/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml b/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml new file mode 100644 index 000000000000..a252545ffd2a --- /dev/null +++ b/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml @@ -0,0 +1,56 @@ +# The following workflow executes a diamond workflow +# +# A +# / \ +# B C +# \ / +# D +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-dag-diamond- +spec: + entrypoint: diamond + templates: + - name: diamond + dag: + tasks: + - name: A + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: A + - name: B + dependencies: [A] + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: B + - name: C + dependencies: [A] + templateRef: + name: cluster-workflow-template-inner-dag + template: inner-diamond + clusterscope: true + arguments: + parameters: + - name: message + value: C + - name: D + dependencies: [B, C] + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: D diff --git a/examples/cluster-workflow-template/clustertemplates.yaml b/examples/cluster-workflow-template/clustertemplates.yaml new file mode 100644 index 000000000000..69be047dbfc9 --- /dev/null +++ b/examples/cluster-workflow-template/clustertemplates.yaml @@ -0,0 +1,151 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-whalesay-template +spec: + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{inputs.parameters.message}}"] +--- +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-random-fail-template +spec: + templates: + - name: random-fail-template + retryStrategy: + limit: 10 + container: + image: python:alpine3.6 + command: [python, -c] + # fail with a 66% probability + args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"] +--- +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-inner-steps +spec: + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + - name: inner-steps + steps: + - - name: inner-hello1 + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: "inner-hello1" + - - name: inner-hello2a + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: "inner-hello2a" + - name: inner-hello2b + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: "inner-hello2b" +--- +# The following workflow executes a diamond workflow +# +# A +# / \ +# B C +# \ / +# D +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-inner-dag +spec: + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + - name: inner-diamond + dag: + tasks: + - name: inner-A + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: inner-A + - name: inner-B + dependencies: [inner-A] + template: whalesay-template + arguments: + parameters: + - name: message + value: inner-B + - name: inner-C + dependencies: [inner-A] + template: whalesay-template + arguments: + parameters: + - name: message + value: inner-C + - name: inner-D + dependencies: [inner-B, inner-C] + templateRef: + name: cluster-workflow-template-whalesay-template + template: whalesay-template + clusterscope: true + arguments: + parameters: + - name: message + value: inner-D +--- +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-submittable +spec: + entryPoint: whalesay-template + arguments: + parameters: + - name: message + value: hello world + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: docker/whalesay + command: [cowsay] + args: ["{{inputs.parameters.message}}"] diff --git a/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml b/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml new file mode 100644 index 000000000000..9a573028a32b --- /dev/null +++ b/examples/cluster-workflow-template/mixed-cluster-namespaced-wftmpl-steps.yaml @@ -0,0 +1,34 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-steps- +spec: + entrypoint: hello-hello-hello + templates: + - name: hello-hello-hello + steps: + - - name: hello1 + templateRef: + name: workflow-template-whalesay-template + template: whalesay-template + arguments: + parameters: + - name: message + value: "hello1" + - - name: hello2a + templateRef: + name: cluster-workflow-template-inner-steps + template: inner-steps + clusterscope: true + arguments: + parameters: + - name: message + value: "hello2a" + - name: hello2b + templateRef: + name: workflow-template-whalesay-template + template: whalesay-template + arguments: + parameters: + - name: message + value: "hello2b" diff --git a/go.mod b/go.mod index a714bf37c40a..f37d250d816c 100644 --- a/go.mod +++ b/go.mod @@ -22,8 +22,8 @@ require ( github.com/fatih/structs v1.1.0 // indirect github.com/ghodss/yaml v1.0.0 github.com/go-ini/ini v1.51.1 // indirect - github.com/go-openapi/jsonpointer v0.19.3 // indirect - github.com/go-openapi/spec v0.19.2 + github.com/go-openapi/jsonreference v0.19.3 // indirect + github.com/go-openapi/spec v0.19.3 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 github.com/golang/protobuf v1.3.5 @@ -45,6 +45,7 @@ require ( github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/kr/pretty v0.2.0 // indirect github.com/lib/pq v1.3.0 // indirect + github.com/mailru/easyjson v0.7.0 // indirect github.com/mattn/go-colorable v0.1.4 // indirect github.com/minio/minio-go v6.0.14+incompatible // indirect github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b @@ -76,7 +77,7 @@ require ( golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d golang.org/x/net v0.0.0-20200301022130-244492dfa37a golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/tools v0.0.0-20200330175517-31583a0dbbc8 // indirect + golang.org/x/tools v0.0.0-20200401192744-099440627f01 // indirect google.golang.org/api v0.20.0 google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 google.golang.org/grpc v1.28.0 diff --git a/go.sum b/go.sum index 64bd129e3aaf..25d33ebaa9dd 100644 --- a/go.sum +++ b/go.sum @@ -142,11 +142,13 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+aug github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= @@ -298,6 +300,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= @@ -428,6 +432,7 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDf github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= @@ -500,6 +505,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -607,8 +613,8 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200330175517-31583a0dbbc8 h1:cFzD2CKgXeXEgq3J/Qmj40Y0vv41un09b/5M5BRqHWo= -golang.org/x/tools v0.0.0-20200330175517-31583a0dbbc8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200401192744-099440627f01 h1:ysQJ/fU6laLOZJseIeOqXl6Mo+lw5z6b7QHnmUKjW+k= +golang.org/x/tools v0.0.0-20200401192744-099440627f01/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 8c926e601701..7e6316553f3f 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -1,9 +1,9 @@ #!/bin/bash set -eux -o pipefail -go get k8s.io/code-generator/cmd/go-to-protobuf@v0.16.7-beta.0 +go get k8s.io/code-generator/cmd/go-to-protobuf@v0.17.3 -bash ${GOPATH}/pkg/mod/k8s.io/code-generator@v0.16.7-beta.0/generate-groups.sh \ +bash ${GOPATH}/pkg/mod/k8s.io/code-generator@v0.17.3/generate-groups.sh \ "deepcopy,client,informer,lister" \ github.com/argoproj/argo/pkg/client github.com/argoproj/argo/pkg/apis \ workflow:v1alpha1 \ diff --git a/manifests/base/crds/workflow-crd.yaml b/manifests/base/crds/workflow-crd.yaml index f38016e8ac4c..efffdd9b242d 100644 --- a/manifests/base/crds/workflow-crd.yaml +++ b/manifests/base/crds/workflow-crd.yaml @@ -49,4 +49,19 @@ spec: plural: cronworkflows shortNames: - cronwf - - cwf \ No newline at end of file + - cwf +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + version: v1alpha1 + scope: Cluster + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft diff --git a/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml b/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml index 7d28311790d9..1a5ef721d4e5 100644 --- a/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml +++ b/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml @@ -40,6 +40,7 @@ rules: - workflows - workflowtemplates - cronworkflows + - clusterworkflowtemplates verbs: - create - get diff --git a/manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml b/manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml index c3d25cbed3f1..38d816ce6443 100644 --- a/manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml +++ b/manifests/cluster-install/workflow-controller-rbac/workflow-aggregate-roles.yaml @@ -14,6 +14,8 @@ rules: - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - get - list @@ -36,6 +38,8 @@ rules: - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - create - delete @@ -63,6 +67,8 @@ rules: - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - create - delete diff --git a/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml b/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml index cf0739465898..e5e78d2396fe 100644 --- a/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml +++ b/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml @@ -49,6 +49,8 @@ rules: resources: - workflowtemplates - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - get - list diff --git a/manifests/install.yaml b/manifests/install.yaml index 20926018d382..20e31630086b 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: @@ -90,6 +105,8 @@ rules: - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - create - delete @@ -116,6 +133,8 @@ rules: - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - create - delete @@ -142,6 +161,8 @@ rules: - workflowtemplates/finalizers - cronworkflows - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - get - list @@ -198,6 +219,8 @@ rules: resources: - workflowtemplates - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers verbs: - get - list @@ -278,6 +301,7 @@ rules: - workflows - workflowtemplates - cronworkflows + - clusterworkflowtemplates verbs: - create - get diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 707bd353bcd5..9ab771b51a40 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: diff --git a/manifests/namespace-install/argo-server-rbac/argo-server-rolebinding.yaml b/manifests/namespace-install/argo-server-rbac/argo-server-rolebinding.yaml index d92f0a509653..92411422afd1 100644 --- a/manifests/namespace-install/argo-server-rbac/argo-server-rolebinding.yaml +++ b/manifests/namespace-install/argo-server-rbac/argo-server-rolebinding.yaml @@ -9,3 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: argo-server + diff --git a/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml b/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml index 9677d76a2c67..149255ddbbc4 100644 --- a/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml +++ b/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml @@ -92,3 +92,4 @@ rules: - create - get - delete + diff --git a/manifests/namespace-install/workflow-controller-rbac/workflow-controller-rolebinding.yaml b/manifests/namespace-install/workflow-controller-rbac/workflow-controller-rolebinding.yaml index 05a7b3a46e27..f8ac7ec0d606 100644 --- a/manifests/namespace-install/workflow-controller-rbac/workflow-controller-rolebinding.yaml +++ b/manifests/namespace-install/workflow-controller-rbac/workflow-controller-rolebinding.yaml @@ -8,4 +8,5 @@ roleRef: name: argo-role subjects: - kind: ServiceAccount - name: argo \ No newline at end of file + name: argo + diff --git a/manifests/quick-start-mysql.yaml b/manifests/quick-start-mysql.yaml index dd14f89604a2..0ccb02b5877d 100644 --- a/manifests/quick-start-mysql.yaml +++ b/manifests/quick-start-mysql.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: diff --git a/manifests/quick-start-no-db.yaml b/manifests/quick-start-no-db.yaml index 5ec45b361fed..e28108393279 100644 --- a/manifests/quick-start-no-db.yaml +++ b/manifests/quick-start-no-db.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: diff --git a/manifests/quick-start-postgres.yaml b/manifests/quick-start-postgres.yaml index 5934b9662a73..c14a9020e819 100644 --- a/manifests/quick-start-postgres.yaml +++ b/manifests/quick-start-postgres.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: diff --git a/pkg/apiclient/apiclient.go b/pkg/apiclient/apiclient.go index 32e223f162d9..36e4cbb266eb 100644 --- a/pkg/apiclient/apiclient.go +++ b/pkg/apiclient/apiclient.go @@ -5,6 +5,7 @@ import ( "k8s.io/client-go/tools/clientcmd" + clusterworkflowtmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" cronworkflowpkg "github.com/argoproj/argo/pkg/apiclient/cronworkflow" workflowpkg "github.com/argoproj/argo/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo/pkg/apiclient/workflowarchive" @@ -16,6 +17,7 @@ type Client interface { NewWorkflowServiceClient() workflowpkg.WorkflowServiceClient NewCronWorkflowServiceClient() cronworkflowpkg.CronWorkflowServiceClient NewWorkflowTemplateServiceClient() workflowtemplatepkg.WorkflowTemplateServiceClient + NewClusterWorkflowTemplateServiceClient() clusterworkflowtmplpkg.ClusterWorkflowTemplateServiceClient } func NewClient(argoServer string, authSupplier func() string, clientConfig clientcmd.ClientConfig) (context.Context, Client, error) { diff --git a/pkg/apiclient/argo-kube-client.go b/pkg/apiclient/argo-kube-client.go index 291e7bda3832..cb864236f81e 100644 --- a/pkg/apiclient/argo-kube-client.go +++ b/pkg/apiclient/argo-kube-client.go @@ -8,12 +8,14 @@ import ( "k8s.io/client-go/tools/clientcmd" "github.com/argoproj/argo/persist/sqldb" + "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" "github.com/argoproj/argo/pkg/apiclient/cronworkflow" workflowpkg "github.com/argoproj/argo/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo/pkg/apiclient/workflowarchive" "github.com/argoproj/argo/pkg/apiclient/workflowtemplate" "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/server/auth" + clusterworkflowtmplserver "github.com/argoproj/argo/server/clusterworkflowtemplate" cronworkflowserver "github.com/argoproj/argo/server/cronworkflow" workflowserver "github.com/argoproj/argo/server/workflow" workflowtemplateserver "github.com/argoproj/argo/server/workflowtemplate" @@ -60,3 +62,7 @@ func (a *argoKubeClient) NewWorkflowTemplateServiceClient() workflowtemplate.Wor func (a *argoKubeClient) NewArchivedWorkflowServiceClient() (workflowarchivepkg.ArchivedWorkflowServiceClient, error) { return nil, fmt.Errorf("it is impossible to interact with the workflow archive if you are not using the Argo Server, see " + help.CLI) } + +func (a *argoKubeClient) NewClusterWorkflowTemplateServiceClient() clusterworkflowtemplate.ClusterWorkflowTemplateServiceClient { + return &argoKubeWorkflowClusterTemplateServiceClient{clusterworkflowtmplserver.NewClusterWorkflowTemplateServer()} +} diff --git a/pkg/apiclient/argo-kube-cluster-workflow-template-service-client.go b/pkg/apiclient/argo-kube-cluster-workflow-template-service-client.go new file mode 100644 index 000000000000..eec66c609f7d --- /dev/null +++ b/pkg/apiclient/argo-kube-cluster-workflow-template-service-client.go @@ -0,0 +1,38 @@ +package apiclient + +import ( + "context" + + "google.golang.org/grpc" + + clusterworkflowtmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" +) + +type argoKubeWorkflowClusterTemplateServiceClient struct { + delegate clusterworkflowtmplpkg.ClusterWorkflowTemplateServiceServer +} + +func (a argoKubeWorkflowClusterTemplateServiceClient) CreateClusterWorkflowTemplate(ctx context.Context, req *clusterworkflowtmplpkg.ClusterWorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + return a.delegate.CreateClusterWorkflowTemplate(ctx, req) +} + +func (a argoKubeWorkflowClusterTemplateServiceClient) GetClusterWorkflowTemplate(ctx context.Context, req *clusterworkflowtmplpkg.ClusterWorkflowTemplateGetRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + return a.delegate.GetClusterWorkflowTemplate(ctx, req) +} + +func (a argoKubeWorkflowClusterTemplateServiceClient) ListClusterWorkflowTemplates(ctx context.Context, req *clusterworkflowtmplpkg.ClusterWorkflowTemplateListRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplateList, error) { + return a.delegate.ListClusterWorkflowTemplates(ctx, req) +} + +func (a argoKubeWorkflowClusterTemplateServiceClient) UpdateClusterWorkflowTemplate(ctx context.Context, req *clusterworkflowtmplpkg.ClusterWorkflowTemplateUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + return a.delegate.UpdateClusterWorkflowTemplate(ctx, req) +} + +func (a argoKubeWorkflowClusterTemplateServiceClient) DeleteClusterWorkflowTemplate(ctx context.Context, req *clusterworkflowtmplpkg.ClusterWorkflowTemplateDeleteRequest, opts ...grpc.CallOption) (*clusterworkflowtmplpkg.ClusterWorkflowTemplateDeleteResponse, error) { + return a.delegate.DeleteClusterWorkflowTemplate(ctx, req) +} + +func (a argoKubeWorkflowClusterTemplateServiceClient) LintClusterWorkflowTemplate(ctx context.Context, req *clusterworkflowtmplpkg.ClusterWorkflowTemplateLintRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + return a.delegate.LintClusterWorkflowTemplate(ctx, req) +} diff --git a/pkg/apiclient/argo-server-client.go b/pkg/apiclient/argo-server-client.go index 31c95dce5147..161f5dd197be 100644 --- a/pkg/apiclient/argo-server-client.go +++ b/pkg/apiclient/argo-server-client.go @@ -6,6 +6,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/metadata" + clusterworkflowtmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" cronworkflowpkg "github.com/argoproj/argo/pkg/apiclient/cronworkflow" workflowpkg "github.com/argoproj/argo/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo/pkg/apiclient/workflowarchive" @@ -45,6 +46,10 @@ func (a *argoServerClient) NewArchivedWorkflowServiceClient() (workflowarchivepk return workflowarchivepkg.NewArchivedWorkflowServiceClient(a.ClientConn), nil } +func (a *argoServerClient) NewClusterWorkflowTemplateServiceClient() clusterworkflowtmplpkg.ClusterWorkflowTemplateServiceClient { + return clusterworkflowtmplpkg.NewClusterWorkflowTemplateServiceClient(a.ClientConn) +} + func NewClientConn(argoServer string) (*grpc.ClientConn, error) { conn, err := grpc.Dial(argoServer, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxClientGRPCMessageSize)), grpc.WithInsecure()) if err != nil { diff --git a/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.pb.go b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.pb.go new file mode 100644 index 000000000000..758616c13735 --- /dev/null +++ b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.pb.go @@ -0,0 +1,2014 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto + +// Workflow Service +// +// Workflow Service API performs CRUD actions against application resources + +package clusterworkflowtemplate + +import ( + context "context" + fmt "fmt" + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ClusterWorkflowTemplateCreateRequest struct { + Template *v1alpha1.ClusterWorkflowTemplate `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"` + CreateOptions *v1.CreateOptions `protobuf:"bytes,2,opt,name=createOptions,proto3" json:"createOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateCreateRequest) Reset() { *m = ClusterWorkflowTemplateCreateRequest{} } +func (m *ClusterWorkflowTemplateCreateRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateCreateRequest) ProtoMessage() {} +func (*ClusterWorkflowTemplateCreateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{0} +} +func (m *ClusterWorkflowTemplateCreateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateCreateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateCreateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateCreateRequest.Merge(m, src) +} +func (m *ClusterWorkflowTemplateCreateRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateCreateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateCreateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateCreateRequest proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateCreateRequest) GetTemplate() *v1alpha1.ClusterWorkflowTemplate { + if m != nil { + return m.Template + } + return nil +} + +func (m *ClusterWorkflowTemplateCreateRequest) GetCreateOptions() *v1.CreateOptions { + if m != nil { + return m.CreateOptions + } + return nil +} + +type ClusterWorkflowTemplateGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GetOptions *v1.GetOptions `protobuf:"bytes,2,opt,name=getOptions,proto3" json:"getOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateGetRequest) Reset() { *m = ClusterWorkflowTemplateGetRequest{} } +func (m *ClusterWorkflowTemplateGetRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateGetRequest) ProtoMessage() {} +func (*ClusterWorkflowTemplateGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{1} +} +func (m *ClusterWorkflowTemplateGetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateGetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateGetRequest.Merge(m, src) +} +func (m *ClusterWorkflowTemplateGetRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateGetRequest proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClusterWorkflowTemplateGetRequest) GetGetOptions() *v1.GetOptions { + if m != nil { + return m.GetOptions + } + return nil +} + +type ClusterWorkflowTemplateListRequest struct { + ListOptions *v1.ListOptions `protobuf:"bytes,1,opt,name=listOptions,proto3" json:"listOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateListRequest) Reset() { *m = ClusterWorkflowTemplateListRequest{} } +func (m *ClusterWorkflowTemplateListRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateListRequest) ProtoMessage() {} +func (*ClusterWorkflowTemplateListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{2} +} +func (m *ClusterWorkflowTemplateListRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateListRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateListRequest.Merge(m, src) +} +func (m *ClusterWorkflowTemplateListRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateListRequest proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateListRequest) GetListOptions() *v1.ListOptions { + if m != nil { + return m.ListOptions + } + return nil +} + +type ClusterWorkflowTemplateUpdateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Template *v1alpha1.ClusterWorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateUpdateRequest) Reset() { *m = ClusterWorkflowTemplateUpdateRequest{} } +func (m *ClusterWorkflowTemplateUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateUpdateRequest) ProtoMessage() {} +func (*ClusterWorkflowTemplateUpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{3} +} +func (m *ClusterWorkflowTemplateUpdateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateUpdateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateUpdateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateUpdateRequest.Merge(m, src) +} +func (m *ClusterWorkflowTemplateUpdateRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateUpdateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateUpdateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateUpdateRequest proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateUpdateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClusterWorkflowTemplateUpdateRequest) GetTemplate() *v1alpha1.ClusterWorkflowTemplate { + if m != nil { + return m.Template + } + return nil +} + +type ClusterWorkflowTemplateDeleteRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + DeleteOptions *v1.DeleteOptions `protobuf:"bytes,2,opt,name=deleteOptions,proto3" json:"deleteOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateDeleteRequest) Reset() { *m = ClusterWorkflowTemplateDeleteRequest{} } +func (m *ClusterWorkflowTemplateDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateDeleteRequest) ProtoMessage() {} +func (*ClusterWorkflowTemplateDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{4} +} +func (m *ClusterWorkflowTemplateDeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateDeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateDeleteRequest.Merge(m, src) +} +func (m *ClusterWorkflowTemplateDeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateDeleteRequest proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClusterWorkflowTemplateDeleteRequest) GetDeleteOptions() *v1.DeleteOptions { + if m != nil { + return m.DeleteOptions + } + return nil +} + +type ClusterWorkflowTemplateDeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateDeleteResponse) Reset() { *m = ClusterWorkflowTemplateDeleteResponse{} } +func (m *ClusterWorkflowTemplateDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateDeleteResponse) ProtoMessage() {} +func (*ClusterWorkflowTemplateDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{5} +} +func (m *ClusterWorkflowTemplateDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateDeleteResponse.Merge(m, src) +} +func (m *ClusterWorkflowTemplateDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateDeleteResponse proto.InternalMessageInfo + +type ClusterWorkflowTemplateLintRequest struct { + Template *v1alpha1.ClusterWorkflowTemplate `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"` + CreateOptions *v1.CreateOptions `protobuf:"bytes,2,opt,name=createOptions,proto3" json:"createOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWorkflowTemplateLintRequest) Reset() { *m = ClusterWorkflowTemplateLintRequest{} } +func (m *ClusterWorkflowTemplateLintRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterWorkflowTemplateLintRequest) ProtoMessage() {} +func (*ClusterWorkflowTemplateLintRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_688d96b5f613e598, []int{6} +} +func (m *ClusterWorkflowTemplateLintRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateLintRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClusterWorkflowTemplateLintRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClusterWorkflowTemplateLintRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateLintRequest.Merge(m, src) +} +func (m *ClusterWorkflowTemplateLintRequest) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateLintRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateLintRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateLintRequest proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateLintRequest) GetTemplate() *v1alpha1.ClusterWorkflowTemplate { + if m != nil { + return m.Template + } + return nil +} + +func (m *ClusterWorkflowTemplateLintRequest) GetCreateOptions() *v1.CreateOptions { + if m != nil { + return m.CreateOptions + } + return nil +} + +func init() { + proto.RegisterType((*ClusterWorkflowTemplateCreateRequest)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateCreateRequest") + proto.RegisterType((*ClusterWorkflowTemplateGetRequest)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateGetRequest") + proto.RegisterType((*ClusterWorkflowTemplateListRequest)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateListRequest") + proto.RegisterType((*ClusterWorkflowTemplateUpdateRequest)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateUpdateRequest") + proto.RegisterType((*ClusterWorkflowTemplateDeleteRequest)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateDeleteRequest") + proto.RegisterType((*ClusterWorkflowTemplateDeleteResponse)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateDeleteResponse") + proto.RegisterType((*ClusterWorkflowTemplateLintRequest)(nil), "clusterworkflowtemplate.ClusterWorkflowTemplateLintRequest") +} + +func init() { + proto.RegisterFile("pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto", fileDescriptor_688d96b5f613e598) +} + +var fileDescriptor_688d96b5f613e598 = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0xc1, 0x6f, 0xd3, 0x3e, + 0x14, 0x96, 0xab, 0x9f, 0x7e, 0x02, 0x4f, 0xbb, 0xf8, 0xc2, 0x14, 0xd6, 0x0a, 0xac, 0xa1, 0x42, + 0xa1, 0x0e, 0xe9, 0x76, 0x40, 0x45, 0xec, 0x40, 0x87, 0x7a, 0xa9, 0xc4, 0x94, 0x81, 0xd0, 0xb8, + 0xb9, 0xa9, 0x97, 0x86, 0xa6, 0x71, 0x48, 0xdc, 0x4e, 0x13, 0xe2, 0xc2, 0x8d, 0x33, 0xe2, 0x3f, + 0xe0, 0x8f, 0xe1, 0x84, 0x40, 0x9c, 0x10, 0x1c, 0x50, 0xc5, 0x05, 0xf1, 0x4f, 0xa0, 0x38, 0x49, + 0x9b, 0x0a, 0xdc, 0xa6, 0x55, 0x76, 0xe1, 0xe6, 0xd4, 0x7e, 0xdf, 0xfb, 0xbe, 0xf7, 0x9e, 0xbf, + 0x1a, 0x3e, 0xf0, 0x07, 0xb6, 0x4e, 0x7d, 0xc7, 0x72, 0x1d, 0xe6, 0x09, 0xdd, 0x72, 0x47, 0xa1, + 0x60, 0xc1, 0x29, 0x0f, 0x06, 0x27, 0x2e, 0x3f, 0x15, 0x6c, 0xe8, 0xbb, 0x54, 0xb0, 0xf4, 0xf7, + 0x7a, 0xba, 0x51, 0x4f, 0x77, 0x88, 0x1f, 0x70, 0xc1, 0xd1, 0x25, 0x45, 0xa0, 0x56, 0xb7, 0x1d, + 0xd1, 0x1f, 0x75, 0x89, 0xc5, 0x87, 0xba, 0xcd, 0x6d, 0xae, 0xcb, 0xf3, 0xdd, 0xd1, 0x89, 0xfc, + 0x92, 0x1f, 0x72, 0x15, 0xe3, 0x68, 0xdb, 0x36, 0xe7, 0xb6, 0xcb, 0x22, 0x46, 0x3a, 0xf5, 0x3c, + 0x2e, 0xa8, 0x70, 0xb8, 0x17, 0x26, 0xbb, 0x7b, 0x83, 0x3b, 0x21, 0x71, 0x78, 0xb4, 0x3b, 0xa4, + 0x56, 0xdf, 0xf1, 0x58, 0x70, 0xa6, 0x27, 0x02, 0x42, 0x7d, 0xc8, 0x04, 0xd5, 0xc7, 0x86, 0x6e, + 0x33, 0x8f, 0x05, 0x54, 0xb0, 0x5e, 0x12, 0xd5, 0xca, 0x50, 0xa0, 0x81, 0x4c, 0xfa, 0x4c, 0x2e, + 0x66, 0xa1, 0x29, 0x6d, 0x7d, 0x6c, 0x50, 0xd7, 0xef, 0xd3, 0x3f, 0x40, 0xf0, 0x2f, 0x00, 0x77, + 0x5a, 0xb1, 0xc6, 0x27, 0xc9, 0xe1, 0x47, 0x89, 0xc6, 0x56, 0xc0, 0xa8, 0x60, 0x26, 0x7b, 0x3e, + 0x62, 0xa1, 0x40, 0x7d, 0x78, 0x21, 0x15, 0xbf, 0x05, 0xae, 0x80, 0xeb, 0x1b, 0x8d, 0x0e, 0x99, + 0x11, 0x20, 0x29, 0x01, 0xb9, 0x20, 0xfe, 0xc0, 0x26, 0x11, 0x01, 0x92, 0x12, 0x20, 0x29, 0x01, + 0xa2, 0x48, 0x66, 0x4e, 0xd1, 0xd1, 0x31, 0xdc, 0xb4, 0x64, 0xea, 0x87, 0xbe, 0x2c, 0xd2, 0x56, + 0x49, 0xa6, 0xdb, 0x25, 0x71, 0x95, 0x48, 0xb6, 0x4a, 0xb3, 0x4c, 0x51, 0x95, 0xc8, 0xd8, 0x20, + 0xad, 0x6c, 0xa8, 0x39, 0x8f, 0x84, 0x5f, 0x03, 0x78, 0x55, 0x41, 0xa0, 0xcd, 0x44, 0x2a, 0x15, + 0xc1, 0xff, 0x3c, 0x3a, 0x8c, 0x65, 0x5e, 0x34, 0xe5, 0x1a, 0x1d, 0x42, 0x68, 0x33, 0x31, 0xcf, + 0xe8, 0x76, 0x3e, 0x46, 0xed, 0x69, 0x9c, 0x99, 0xc1, 0xc0, 0x67, 0x10, 0x2b, 0xa8, 0x74, 0x9c, + 0x70, 0xca, 0xe5, 0x08, 0x6e, 0xb8, 0x4e, 0x38, 0x4d, 0x1c, 0x57, 0xde, 0xc8, 0x97, 0xb8, 0x33, + 0x0b, 0x34, 0xb3, 0x28, 0xf8, 0x9d, 0xba, 0xe9, 0x8f, 0xfd, 0x5e, 0xa6, 0xe9, 0x7f, 0xab, 0x44, + 0x76, 0x10, 0x4a, 0xe7, 0x39, 0x08, 0xf8, 0xad, 0x9a, 0xe6, 0x01, 0x73, 0xd9, 0x62, 0x9a, 0xc7, + 0x70, 0xb3, 0x27, 0x0f, 0xad, 0x35, 0x45, 0x07, 0xd9, 0x50, 0x73, 0x1e, 0x09, 0x57, 0xe1, 0xb5, + 0x25, 0xb4, 0x42, 0x9f, 0x7b, 0x21, 0xc3, 0x3f, 0xc1, 0x82, 0x1e, 0x7b, 0xe2, 0x5f, 0xba, 0x5a, + 0x8d, 0x0f, 0x10, 0x56, 0x14, 0x04, 0x8e, 0x58, 0x30, 0x76, 0x2c, 0x86, 0xbe, 0x02, 0x58, 0x8e, + 0x31, 0x14, 0x07, 0xd1, 0x3d, 0xa2, 0xf0, 0x5b, 0x92, 0xc7, 0xa3, 0xb4, 0x42, 0xcb, 0x86, 0xeb, + 0xaf, 0x3e, 0xff, 0x78, 0x53, 0xaa, 0x62, 0x2c, 0x5d, 0x7b, 0x6c, 0xa8, 0xff, 0x2c, 0xc2, 0x26, + 0xa8, 0xa1, 0x2f, 0x00, 0x6a, 0x6d, 0x26, 0x54, 0xd2, 0x9a, 0xab, 0x4a, 0x9b, 0x19, 0x52, 0xc1, + 0xba, 0x0c, 0xa9, 0xeb, 0x26, 0xba, 0xb1, 0x5c, 0x97, 0xfe, 0x22, 0xba, 0x4b, 0x2f, 0x23, 0x6d, + 0xdb, 0x91, 0x9b, 0x28, 0x20, 0x43, 0x74, 0x77, 0x55, 0x75, 0x19, 0x8f, 0xd3, 0x0e, 0x8b, 0x94, + 0x17, 0x01, 0xe3, 0x9a, 0x94, 0xb8, 0x83, 0x72, 0xb4, 0x0e, 0x4d, 0x00, 0x2c, 0xc7, 0xae, 0x57, + 0xd8, 0x54, 0xce, 0x99, 0x68, 0xc1, 0xdd, 0xdb, 0x93, 0xd2, 0x88, 0x96, 0xbf, 0x7b, 0xd1, 0x70, + 0x7e, 0x02, 0xb0, 0x1c, 0x9b, 0x53, 0x61, 0x22, 0xe7, 0x2c, 0x58, 0xdb, 0x5f, 0x37, 0x3c, 0xb1, + 0xca, 0x64, 0x28, 0x6b, 0x2b, 0x0c, 0xe5, 0x37, 0x00, 0x2f, 0x47, 0x36, 0xaa, 0x52, 0xb4, 0xc6, + 0x4c, 0x7a, 0xe7, 0x74, 0xe5, 0x1a, 0x52, 0xdd, 0x2d, 0x5c, 0xcd, 0xa1, 0xce, 0x75, 0x3c, 0xd1, + 0x04, 0xb5, 0xfb, 0x9d, 0xf7, 0x93, 0x0a, 0xf8, 0x38, 0xa9, 0x80, 0xef, 0x93, 0x0a, 0x78, 0xba, + 0xbf, 0xec, 0xb1, 0xb7, 0xf8, 0xa1, 0xdb, 0xfd, 0x5f, 0x3e, 0xf7, 0x76, 0x7f, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x14, 0x9b, 0x01, 0x18, 0x0b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ClusterWorkflowTemplateServiceClient is the client API for ClusterWorkflowTemplateService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterWorkflowTemplateServiceClient interface { + CreateClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) + GetClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateGetRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) + ListClusterWorkflowTemplates(ctx context.Context, in *ClusterWorkflowTemplateListRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplateList, error) + UpdateClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) + DeleteClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateDeleteRequest, opts ...grpc.CallOption) (*ClusterWorkflowTemplateDeleteResponse, error) + LintClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateLintRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) +} + +type clusterWorkflowTemplateServiceClient struct { + cc *grpc.ClientConn +} + +func NewClusterWorkflowTemplateServiceClient(cc *grpc.ClientConn) ClusterWorkflowTemplateServiceClient { + return &clusterWorkflowTemplateServiceClient{cc} +} + +func (c *clusterWorkflowTemplateServiceClient) CreateClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + out := new(v1alpha1.ClusterWorkflowTemplate) + err := c.cc.Invoke(ctx, "/clusterworkflowtemplate.ClusterWorkflowTemplateService/CreateClusterWorkflowTemplate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterWorkflowTemplateServiceClient) GetClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateGetRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + out := new(v1alpha1.ClusterWorkflowTemplate) + err := c.cc.Invoke(ctx, "/clusterworkflowtemplate.ClusterWorkflowTemplateService/GetClusterWorkflowTemplate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterWorkflowTemplateServiceClient) ListClusterWorkflowTemplates(ctx context.Context, in *ClusterWorkflowTemplateListRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplateList, error) { + out := new(v1alpha1.ClusterWorkflowTemplateList) + err := c.cc.Invoke(ctx, "/clusterworkflowtemplate.ClusterWorkflowTemplateService/ListClusterWorkflowTemplates", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterWorkflowTemplateServiceClient) UpdateClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + out := new(v1alpha1.ClusterWorkflowTemplate) + err := c.cc.Invoke(ctx, "/clusterworkflowtemplate.ClusterWorkflowTemplateService/UpdateClusterWorkflowTemplate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterWorkflowTemplateServiceClient) DeleteClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateDeleteRequest, opts ...grpc.CallOption) (*ClusterWorkflowTemplateDeleteResponse, error) { + out := new(ClusterWorkflowTemplateDeleteResponse) + err := c.cc.Invoke(ctx, "/clusterworkflowtemplate.ClusterWorkflowTemplateService/DeleteClusterWorkflowTemplate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterWorkflowTemplateServiceClient) LintClusterWorkflowTemplate(ctx context.Context, in *ClusterWorkflowTemplateLintRequest, opts ...grpc.CallOption) (*v1alpha1.ClusterWorkflowTemplate, error) { + out := new(v1alpha1.ClusterWorkflowTemplate) + err := c.cc.Invoke(ctx, "/clusterworkflowtemplate.ClusterWorkflowTemplateService/LintClusterWorkflowTemplate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterWorkflowTemplateServiceServer is the server API for ClusterWorkflowTemplateService service. +type ClusterWorkflowTemplateServiceServer interface { + CreateClusterWorkflowTemplate(context.Context, *ClusterWorkflowTemplateCreateRequest) (*v1alpha1.ClusterWorkflowTemplate, error) + GetClusterWorkflowTemplate(context.Context, *ClusterWorkflowTemplateGetRequest) (*v1alpha1.ClusterWorkflowTemplate, error) + ListClusterWorkflowTemplates(context.Context, *ClusterWorkflowTemplateListRequest) (*v1alpha1.ClusterWorkflowTemplateList, error) + UpdateClusterWorkflowTemplate(context.Context, *ClusterWorkflowTemplateUpdateRequest) (*v1alpha1.ClusterWorkflowTemplate, error) + DeleteClusterWorkflowTemplate(context.Context, *ClusterWorkflowTemplateDeleteRequest) (*ClusterWorkflowTemplateDeleteResponse, error) + LintClusterWorkflowTemplate(context.Context, *ClusterWorkflowTemplateLintRequest) (*v1alpha1.ClusterWorkflowTemplate, error) +} + +// UnimplementedClusterWorkflowTemplateServiceServer can be embedded to have forward compatible implementations. +type UnimplementedClusterWorkflowTemplateServiceServer struct { +} + +func (*UnimplementedClusterWorkflowTemplateServiceServer) CreateClusterWorkflowTemplate(ctx context.Context, req *ClusterWorkflowTemplateCreateRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateClusterWorkflowTemplate not implemented") +} +func (*UnimplementedClusterWorkflowTemplateServiceServer) GetClusterWorkflowTemplate(ctx context.Context, req *ClusterWorkflowTemplateGetRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetClusterWorkflowTemplate not implemented") +} +func (*UnimplementedClusterWorkflowTemplateServiceServer) ListClusterWorkflowTemplates(ctx context.Context, req *ClusterWorkflowTemplateListRequest) (*v1alpha1.ClusterWorkflowTemplateList, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListClusterWorkflowTemplates not implemented") +} +func (*UnimplementedClusterWorkflowTemplateServiceServer) UpdateClusterWorkflowTemplate(ctx context.Context, req *ClusterWorkflowTemplateUpdateRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateClusterWorkflowTemplate not implemented") +} +func (*UnimplementedClusterWorkflowTemplateServiceServer) DeleteClusterWorkflowTemplate(ctx context.Context, req *ClusterWorkflowTemplateDeleteRequest) (*ClusterWorkflowTemplateDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteClusterWorkflowTemplate not implemented") +} +func (*UnimplementedClusterWorkflowTemplateServiceServer) LintClusterWorkflowTemplate(ctx context.Context, req *ClusterWorkflowTemplateLintRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method LintClusterWorkflowTemplate not implemented") +} + +func RegisterClusterWorkflowTemplateServiceServer(s *grpc.Server, srv ClusterWorkflowTemplateServiceServer) { + s.RegisterService(&_ClusterWorkflowTemplateService_serviceDesc, srv) +} + +func _ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterWorkflowTemplateCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterWorkflowTemplateServiceServer).CreateClusterWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/clusterworkflowtemplate.ClusterWorkflowTemplateService/CreateClusterWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterWorkflowTemplateServiceServer).CreateClusterWorkflowTemplate(ctx, req.(*ClusterWorkflowTemplateCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterWorkflowTemplateGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterWorkflowTemplateServiceServer).GetClusterWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/clusterworkflowtemplate.ClusterWorkflowTemplateService/GetClusterWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterWorkflowTemplateServiceServer).GetClusterWorkflowTemplate(ctx, req.(*ClusterWorkflowTemplateGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterWorkflowTemplateListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterWorkflowTemplateServiceServer).ListClusterWorkflowTemplates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/clusterworkflowtemplate.ClusterWorkflowTemplateService/ListClusterWorkflowTemplates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterWorkflowTemplateServiceServer).ListClusterWorkflowTemplates(ctx, req.(*ClusterWorkflowTemplateListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterWorkflowTemplateUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterWorkflowTemplateServiceServer).UpdateClusterWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/clusterworkflowtemplate.ClusterWorkflowTemplateService/UpdateClusterWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterWorkflowTemplateServiceServer).UpdateClusterWorkflowTemplate(ctx, req.(*ClusterWorkflowTemplateUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterWorkflowTemplateDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterWorkflowTemplateServiceServer).DeleteClusterWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/clusterworkflowtemplate.ClusterWorkflowTemplateService/DeleteClusterWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterWorkflowTemplateServiceServer).DeleteClusterWorkflowTemplate(ctx, req.(*ClusterWorkflowTemplateDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterWorkflowTemplateLintRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterWorkflowTemplateServiceServer).LintClusterWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/clusterworkflowtemplate.ClusterWorkflowTemplateService/LintClusterWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterWorkflowTemplateServiceServer).LintClusterWorkflowTemplate(ctx, req.(*ClusterWorkflowTemplateLintRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterWorkflowTemplateService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "clusterworkflowtemplate.ClusterWorkflowTemplateService", + HandlerType: (*ClusterWorkflowTemplateServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateClusterWorkflowTemplate", + Handler: _ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_Handler, + }, + { + MethodName: "GetClusterWorkflowTemplate", + Handler: _ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_Handler, + }, + { + MethodName: "ListClusterWorkflowTemplates", + Handler: _ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_Handler, + }, + { + MethodName: "UpdateClusterWorkflowTemplate", + Handler: _ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_Handler, + }, + { + MethodName: "DeleteClusterWorkflowTemplate", + Handler: _ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_Handler, + }, + { + MethodName: "LintClusterWorkflowTemplate", + Handler: _ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto", +} + +func (m *ClusterWorkflowTemplateCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateCreateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreateOptions != nil { + { + size, err := m.CreateOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateGetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.GetOptions != nil { + { + size, err := m.GetOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ListOptions != nil { + { + size, err := m.ListOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateLintRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateLintRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateLintRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreateOptions != nil { + { + size, err := m.CreateOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClusterWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintClusterWorkflowTemplate(dAtA []byte, offset int, v uint64) int { + offset -= sovClusterWorkflowTemplate(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterWorkflowTemplateCreateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.CreateOptions != nil { + l = m.CreateOptions.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflowTemplateGetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.GetOptions != nil { + l = m.GetOptions.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflowTemplateListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListOptions != nil { + l = m.ListOptions.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflowTemplateUpdateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflowTemplateDeleteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflowTemplateDeleteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClusterWorkflowTemplateLintRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.CreateOptions != nil { + l = m.CreateOptions.Size() + n += 1 + l + sovClusterWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovClusterWorkflowTemplate(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozClusterWorkflowTemplate(x uint64) (n int) { + return sovClusterWorkflowTemplate(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClusterWorkflowTemplateCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1alpha1.ClusterWorkflowTemplate{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreateOptions == nil { + m.CreateOptions = &v1.CreateOptions{} + } + if err := m.CreateOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetOptions == nil { + m.GetOptions = &v1.GetOptions{} + } + if err := m.GetOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListOptions == nil { + m.ListOptions = &v1.ListOptions{} + } + if err := m.ListOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1alpha1.ClusterWorkflowTemplate{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateLintRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateLintRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateLintRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1alpha1.ClusterWorkflowTemplate{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreateOptions == nil { + m.CreateOptions = &v1.CreateOptions{} + } + if err := m.CreateOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClusterWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthClusterWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipClusterWorkflowTemplate(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowClusterWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthClusterWorkflowTemplate + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupClusterWorkflowTemplate + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthClusterWorkflowTemplate + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthClusterWorkflowTemplate = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowClusterWorkflowTemplate = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupClusterWorkflowTemplate = fmt.Errorf("proto: unexpected end of group") +) diff --git a/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.pb.gw.go b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.pb.gw.go new file mode 100644 index 000000000000..08d59169dabc --- /dev/null +++ b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.pb.gw.go @@ -0,0 +1,658 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto + +/* +Package clusterworkflowtemplate is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package clusterworkflowtemplate + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterWorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CreateClusterWorkflowTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server ClusterWorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.CreateClusterWorkflowTemplate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterWorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateGetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetClusterWorkflowTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server ClusterWorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateGetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetClusterWorkflowTemplate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterWorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateListRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListClusterWorkflowTemplates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0(ctx context.Context, marshaler runtime.Marshaler, server ClusterWorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateListRequest + var metadata runtime.ServerMetadata + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListClusterWorkflowTemplates(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterWorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + msg, err := client.UpdateClusterWorkflowTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server ClusterWorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + msg, err := server.UpdateClusterWorkflowTemplate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterWorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateDeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DeleteClusterWorkflowTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server ClusterWorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateDeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DeleteClusterWorkflowTemplate(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterWorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateLintRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LintClusterWorkflowTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server ClusterWorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ClusterWorkflowTemplateLintRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.LintClusterWorkflowTemplate(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterClusterWorkflowTemplateServiceHandlerServer registers the http handlers for service ClusterWorkflowTemplateService to "mux". +// UnaryRPC :call ClusterWorkflowTemplateServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterClusterWorkflowTemplateServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ClusterWorkflowTemplateServiceServer) error { + + mux.Handle("POST", pattern_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterClusterWorkflowTemplateServiceHandlerFromEndpoint is same as RegisterClusterWorkflowTemplateServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterClusterWorkflowTemplateServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterClusterWorkflowTemplateServiceHandler(ctx, mux, conn) +} + +// RegisterClusterWorkflowTemplateServiceHandler registers the http handlers for service ClusterWorkflowTemplateService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterClusterWorkflowTemplateServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterClusterWorkflowTemplateServiceHandlerClient(ctx, mux, NewClusterWorkflowTemplateServiceClient(conn)) +} + +// RegisterClusterWorkflowTemplateServiceHandlerClient registers the http handlers for service ClusterWorkflowTemplateService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ClusterWorkflowTemplateServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterWorkflowTemplateServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ClusterWorkflowTemplateServiceClient" to call the correct interceptors. +func RegisterClusterWorkflowTemplateServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ClusterWorkflowTemplateServiceClient) error { + + mux.Handle("POST", pattern_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "cluster-workflow-templates"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "cluster-workflow-templates", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "cluster-workflow-templates"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "cluster-workflow-templates", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "cluster-workflow-templates", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "cluster-workflow-templates", "lint"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_ClusterWorkflowTemplateService_CreateClusterWorkflowTemplate_0 = runtime.ForwardResponseMessage + + forward_ClusterWorkflowTemplateService_GetClusterWorkflowTemplate_0 = runtime.ForwardResponseMessage + + forward_ClusterWorkflowTemplateService_ListClusterWorkflowTemplates_0 = runtime.ForwardResponseMessage + + forward_ClusterWorkflowTemplateService_UpdateClusterWorkflowTemplate_0 = runtime.ForwardResponseMessage + + forward_ClusterWorkflowTemplateService_DeleteClusterWorkflowTemplate_0 = runtime.ForwardResponseMessage + + forward_ClusterWorkflowTemplateService_LintClusterWorkflowTemplate_0 = runtime.ForwardResponseMessage +) diff --git a/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto new file mode 100644 index 000000000000..f690fe7e01db --- /dev/null +++ b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; +option go_package = "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; + +// Workflow Service +// +// Workflow Service API performs CRUD actions against application resources +package clusterworkflowtemplate; + + +message ClusterWorkflowTemplateCreateRequest { + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate template = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions createOptions = 2; +} + +message ClusterWorkflowTemplateGetRequest { + string name = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions getOptions = 2; +} + +message ClusterWorkflowTemplateListRequest { + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions listOptions = 1; +} + +message ClusterWorkflowTemplateUpdateRequest { + string name = 1; + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate template = 2; +} + +message ClusterWorkflowTemplateDeleteRequest { + string name = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; +} +message ClusterWorkflowTemplateDeleteResponse { +} +message ClusterWorkflowTemplateLintRequest { + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate template = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions createOptions = 2; +} + +service ClusterWorkflowTemplateService { + rpc CreateClusterWorkflowTemplate (ClusterWorkflowTemplateCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate) { + option (google.api.http) = { + post: "/api/v1/cluster-workflow-templates" + body: "*" + }; + } + + rpc GetClusterWorkflowTemplate (ClusterWorkflowTemplateGetRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate) { + option (google.api.http).get = "/api/v1/cluster-workflow-templates/{name}"; + } + + rpc ListClusterWorkflowTemplates (ClusterWorkflowTemplateListRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList) { + option (google.api.http).get = "/api/v1/cluster-workflow-templates"; + } + + rpc UpdateClusterWorkflowTemplate (ClusterWorkflowTemplateUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate) { + option (google.api.http) = { + put: "/api/v1/cluster-workflow-templates/{name}" + body: "*" + }; + } + + rpc DeleteClusterWorkflowTemplate (ClusterWorkflowTemplateDeleteRequest) returns (ClusterWorkflowTemplateDeleteResponse) { + option (google.api.http).delete = "/api/v1/cluster-workflow-templates/{name}"; + } + + rpc LintClusterWorkflowTemplate (ClusterWorkflowTemplateLintRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate) { + option (google.api.http) = { + post: "/api/v1/cluster-workflow-templates/lint" + body: "*" + }; + } + +} \ No newline at end of file diff --git a/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json new file mode 100644 index 000000000000..e0de56adfb96 --- /dev/null +++ b/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json @@ -0,0 +1,4189 @@ +{ + "swagger": "2.0", + "info": { + "title": "Workflow Service", + "description": "Workflow Service API performs CRUD actions against application resources", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/api/v1/cluster-workflow-templates": { + "get": { + "operationId": "ListClusterWorkflowTemplates", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList" + } + } + }, + "parameters": [ + { + "name": "listOptions.labelSelector", + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "listOptions.fieldSelector", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "listOptions.watch", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "listOptions.allowWatchBookmarks", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "listOptions.resourceVersion", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "listOptions.timeoutSeconds", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "listOptions.limit", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "listOptions.continue", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "ClusterWorkflowTemplateService" + ] + }, + "post": { + "operationId": "CreateClusterWorkflowTemplate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/clusterworkflowtemplate.ClusterWorkflowTemplateCreateRequest" + } + } + ], + "tags": [ + "ClusterWorkflowTemplateService" + ] + } + }, + "/api/v1/cluster-workflow-templates/lint": { + "post": { + "operationId": "LintClusterWorkflowTemplate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/clusterworkflowtemplate.ClusterWorkflowTemplateLintRequest" + } + } + ], + "tags": [ + "ClusterWorkflowTemplateService" + ] + } + }, + "/api/v1/cluster-workflow-templates/{name}": { + "get": { + "operationId": "GetClusterWorkflowTemplate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "getOptions.resourceVersion", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "ClusterWorkflowTemplateService" + ] + }, + "delete": { + "operationId": "DeleteClusterWorkflowTemplate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterworkflowtemplate.ClusterWorkflowTemplateDeleteResponse" + } + } + }, + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "deleteOptions.gracePeriodSeconds", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "deleteOptions.preconditions.uid", + "description": "Specifies the target UID.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "deleteOptions.preconditions.resourceVersion", + "description": "Specifies the target ResourceVersion\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "deleteOptions.orphanDependents", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "deleteOptions.propagationPolicy", + "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "deleteOptions.dryRun", + "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" + } + ], + "tags": [ + "ClusterWorkflowTemplateService" + ] + }, + "put": { + "operationId": "UpdateClusterWorkflowTemplate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/clusterworkflowtemplate.ClusterWorkflowTemplateUpdateRequest" + } + } + ], + "tags": [ + "ClusterWorkflowTemplateService" + ] + } + } + }, + "definitions": { + "clusterworkflowtemplate.ClusterWorkflowTemplateCreateRequest": { + "type": "object", + "properties": { + "template": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + }, + "createOptions": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions" + } + } + }, + "clusterworkflowtemplate.ClusterWorkflowTemplateDeleteResponse": { + "type": "object" + }, + "clusterworkflowtemplate.ClusterWorkflowTemplateLintRequest": { + "type": "object", + "properties": { + "template": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + }, + "createOptions": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions" + } + } + }, + "clusterworkflowtemplate.ClusterWorkflowTemplateUpdateRequest": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "template": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArchiveStrategy": { + "type": "object", + "properties": { + "tar": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TarStrategy" + }, + "none": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.NoneStrategy" + } + }, + "title": "ArchiveStrategy describes how to archive files/directory when saving artifacts" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Arguments": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Parameter" + }, + "title": "Parameters is the list of parameters to pass to the template or workflow\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Artifact" + }, + "title": "Artifacts is the list of artifacts to pass to the template or workflow\n+patchStrategy=merge\n+patchMergeKey=name" + } + }, + "title": "Arguments to a template" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Artifact": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the artifact. must be unique within a template's inputs/outputs." + }, + "path": { + "type": "string", + "title": "Path is the container path to the artifact" + }, + "mode": { + "type": "integer", + "format": "int32", + "description": "mode bits to use on this file, must be a value between 0 and 0777\nset when loading input artifacts." + }, + "from": { + "type": "string", + "title": "From allows an artifact to reference an artifact from a previous step" + }, + "artifactLocation": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactLocation", + "title": "ArtifactLocation contains the location of the artifact" + }, + "globalName": { + "type": "string", + "title": "GlobalName exports an output artifact to the global scope, making it available as\n'{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts" + }, + "archive": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArchiveStrategy", + "description": "Archive controls how the artifact will be saved to the artifact repository." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Make Artifacts optional, if Artifacts doesn't generate or exist" + } + }, + "title": "Artifact indicates an artifact to place at a specified path" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactLocation": { + "type": "object", + "properties": { + "archiveLogs": { + "type": "boolean", + "format": "boolean", + "title": "ArchiveLogs indicates if the container logs should be archived" + }, + "s3": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.S3Artifact", + "title": "S3 contains S3 artifact location details" + }, + "git": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GitArtifact", + "title": "Git contains git artifact location details" + }, + "http": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HTTPArtifact", + "title": "HTTP contains HTTP artifact location details" + }, + "artifactory": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryArtifact", + "title": "Artifactory contains artifactory artifact location details" + }, + "hdfs": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSArtifact", + "title": "HDFS contains HDFS artifact location details" + }, + "raw": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.RawArtifact", + "title": "Raw contains raw artifact location details" + }, + "oss": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.OSSArtifact", + "title": "OSS contains OSS artifact location details" + }, + "gcs": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GCSArtifact", + "title": "GCS contains GCS artifact location details" + } + }, + "description": "ArtifactLocation describes a location for a single or multiple artifacts.\nIt is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname).\nIt is also used to describe the location of multiple artifacts such as the archive location\nof a single workflow step, which the executor will use as a default location to store its files." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRef": { + "type": "object", + "properties": { + "configMap": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryArtifact": { + "type": "object", + "properties": { + "url": { + "type": "string", + "title": "URL of the artifact" + }, + "artifactoryAuth": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryAuth" + } + }, + "title": "ArtifactoryArtifact is the location of an artifactory artifact" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryAuth": { + "type": "object", + "properties": { + "usernameSecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + }, + "passwordSecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + } + }, + "title": "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Backoff": { + "type": "object", + "properties": { + "duration": { + "type": "string" + }, + "factor": { + "type": "integer", + "format": "int32" + }, + "maxDuration": { + "type": "string" + } + } + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplateSpec" + } + }, + "title": "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope\n+genclient\n+genclient:noStatus\n+genclient:nonNamespaced\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate" + } + } + }, + "title": "ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ContinueOn": { + "type": "object", + "properties": { + "error": { + "type": "boolean", + "format": "boolean", + "title": "+optional" + }, + "failed": { + "type": "boolean", + "format": "boolean", + "title": "+optional" + } + }, + "description": "ContinueOn defines if a workflow should continue even if a task or step fails/errors.\nIt can be specified if the workflow should continue when the pod errors, fails or both." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Counter": { + "type": "object", + "properties": { + "value": { + "type": "string", + "title": "Value is the value of the metric" + } + }, + "title": "Counter is a Counter prometheus metric" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.DAGTask": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the target" + }, + "template": { + "type": "string", + "title": "Name of template to execute" + }, + "arguments": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Arguments", + "title": "Arguments are the parameter and artifact arguments to the template" + }, + "templateRef": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TemplateRef", + "description": "TemplateRef is the reference to the template resource to execute." + }, + "dependencies": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Dependencies are name of other targets which this depends on" + }, + "withItems": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Item" + }, + "title": "WithItems expands a task into multiple parallel tasks from the items in the list" + }, + "withParam": { + "type": "string", + "description": "WithParam expands a task into multiple parallel tasks from the value in the parameter,\nwhich is expected to be a JSON list." + }, + "withSequence": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Sequence", + "title": "WithSequence expands a task into a numeric sequence" + }, + "when": { + "type": "string", + "title": "When is an expression in which the task should conditionally execute" + }, + "continueOn": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ContinueOn", + "title": "ContinueOn makes argo to proceed with the following step even if this step fails.\nErrors and Failed states can be specified" + }, + "onExit": { + "type": "string", + "description": "OnExit is a template reference which is invoked at the end of the\ntemplate, irrespective of the success, failure, or error of the\nprimary template." + } + }, + "title": "DAGTask represents a node in the graph during DAG execution" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.DAGTemplate": { + "type": "object", + "properties": { + "target": { + "type": "string", + "title": "Target are one or more names of targets to execute in a DAG" + }, + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.DAGTask" + }, + "title": "Tasks are a list of DAG tasks\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "failFast": { + "type": "boolean", + "format": "boolean", + "title": "This flag is for DAG logic. The DAG logic has a built-in \"fail fast\" feature to stop scheduling new steps,\nas soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed\nbefore failing the DAG itself.\nThe FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to\ncompletion (either success or failure), regardless of the failed outcomes of branches in the DAG.\nMore info and example about this feature at https://github.com/argoproj/argo/issues/1442" + } + }, + "title": "DAGTemplate is a template subtype for directed acyclic graph templates" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ExecutorConfig": { + "type": "object", + "properties": { + "serviceAccountName": { + "type": "string", + "description": "ServiceAccountName specifies the service account name of the executor container." + } + }, + "description": "ExecutorConfig holds configurations of an executor container." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GCSArtifact": { + "type": "object", + "properties": { + "gCSBucket": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GCSBucket" + }, + "key": { + "type": "string", + "title": "Key is the path in the bucket where the artifact resides" + } + }, + "title": "GCSArtifact is the location of a GCS artifact" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GCSBucket": { + "type": "object", + "properties": { + "bucket": { + "type": "string", + "title": "Bucket is the name of the bucket" + }, + "serviceAccountKeySecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "ServiceAccountKeySecret is the secret selector to the bucket's service account key" + } + }, + "title": "GCSBucket contains the access information for interfacring with a GCS bucket" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Gauge": { + "type": "object", + "properties": { + "value": { + "type": "string", + "title": "Value is the value of the metric" + }, + "realtime": { + "type": "boolean", + "format": "boolean", + "title": "Realtime emits this metric in real time if applicable" + } + }, + "title": "Gauge is a Gauge prometheus metric" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GitArtifact": { + "type": "object", + "properties": { + "repo": { + "type": "string", + "title": "Repo is the git repository" + }, + "revision": { + "type": "string", + "title": "Revision is the git commit, tag, branch to checkout" + }, + "depth": { + "type": "string", + "format": "uint64", + "title": "Depth specifies clones/fetches should be shallow and include the given\nnumber of commits from the branch tip" + }, + "fetch": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Fetch specifies a number of refs that should be fetched before checkout" + }, + "usernameSecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + }, + "passwordSecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + }, + "sshPrivateKeySecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key" + }, + "insecureIgnoreHostKey": { + "type": "boolean", + "format": "boolean", + "title": "InsecureIgnoreHostKey disables SSH strict host key checking during git clone" + } + }, + "title": "GitArtifact is the location of an git artifact" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSArtifact": { + "type": "object", + "properties": { + "hDFSConfig": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSConfig" + }, + "path": { + "type": "string", + "title": "Path is a file path in HDFS" + }, + "force": { + "type": "boolean", + "format": "boolean", + "title": "Force copies a file forcibly even if it exists (default: false)" + } + }, + "title": "HDFSArtifact is the location of an HDFS artifact" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSConfig": { + "type": "object", + "properties": { + "hDFSKrbConfig": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSKrbConfig" + }, + "addresses": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Addresses is accessible addresses of HDFS name nodes" + }, + "hdfsUser": { + "type": "string", + "description": "HDFSUser is the user to access HDFS file system.\nIt is ignored if either ccache or keytab is used." + } + }, + "title": "HDFSConfig is configurations for HDFS" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSKrbConfig": { + "type": "object", + "properties": { + "krbCCacheSecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache\nEither ccache or keytab can be set to use Kerberos." + }, + "krbKeytabSecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab\nEither ccache or keytab can be set to use Kerberos." + }, + "krbUsername": { + "type": "string", + "description": "KrbUsername is the Kerberos username used with Kerberos keytab\nIt must be set if keytab is used." + }, + "krbRealm": { + "type": "string", + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab\nIt must be set if keytab is used." + }, + "krbConfigConfigMap": { + "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapKeySelector", + "description": "KrbConfig is the configmap selector for Kerberos config as string\nIt must be set if either ccache or keytab is used." + }, + "krbServicePrincipalName": { + "type": "string", + "description": "KrbServicePrincipalName is the principal name of Kerberos service\nIt must be set if either ccache or keytab is used." + } + }, + "title": "HDFSKrbConfig is auth configurations for Kerberos" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HTTPArtifact": { + "type": "object", + "properties": { + "url": { + "type": "string", + "title": "URL of the artifact" + } + }, + "title": "HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Histogram": { + "type": "object", + "properties": { + "value": { + "type": "string", + "title": "Value is the value of the metric" + }, + "buckets": { + "type": "array", + "items": { + "type": "number", + "format": "double" + }, + "title": "Buckets is a list of bucket divisors for the histogram" + } + }, + "title": "Histogram is a Histogram prometheus metric" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Inputs": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Parameter" + }, + "title": "Parameters are a list of parameters passed as inputs\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Artifact" + }, + "title": "Artifact are a list of artifacts passed as inputs\n+patchStrategy=merge\n+patchMergeKey=name" + } + }, + "title": "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Item": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "numVal": { + "type": "string" + }, + "boolVal": { + "type": "boolean", + "format": "boolean" + }, + "strVal": { + "type": "string" + }, + "mapVal": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ItemValue" + } + }, + "listVal": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ItemValue" + } + } + }, + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "title": "Item expands a single workflow step into multiple parallel steps\nThe value of Item can be a map, string, bool, or number" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ItemValue": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "numVal": { + "type": "string" + }, + "boolVal": { + "type": "boolean", + "format": "boolean" + }, + "strVal": { + "type": "string" + }, + "mapVal": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listVal": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + } + } + }, + "title": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metadata": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "title": "Pod metdata" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.MetricLabel": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "title": "MetricLabel is a single label for a prometheus metric" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metrics": { + "type": "object", + "properties": { + "prometheus": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Prometheus" + }, + "title": "Prometheus is a list of prometheus metrics to be emitted" + } + }, + "title": "Metrics are a list of metrics emitted from a Workflow/Template" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.NoneStrategy": { + "type": "object", + "description": "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent\nfiles. Note that if the artifact is a directory, the artifact driver must support the ability to\nsave/load the directory appropriately." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.OSSArtifact": { + "type": "object", + "properties": { + "oSSBucket": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.OSSBucket" + }, + "key": { + "type": "string", + "title": "Key is the path in the bucket where the artifact resides" + } + }, + "title": "OSSArtifact is the location of an OSS artifact" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.OSSBucket": { + "type": "object", + "properties": { + "endpoint": { + "type": "string", + "title": "Endpoint is the hostname of the bucket endpoint" + }, + "bucket": { + "type": "string", + "title": "Bucket is the name of the bucket" + }, + "accessKeySecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "AccessKeySecret is the secret selector to the bucket's access key" + }, + "secretKeySecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "SecretKeySecret is the secret selector to the bucket's secret key" + } + }, + "title": "OSSBucket contains the access information required for interfacing with an OSS bucket" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Outputs": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Parameter" + }, + "title": "Parameters holds the list of output parameters produced by a step\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Artifact" + }, + "title": "Artifacts holds the list of output artifacts produced by a step\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "result": { + "type": "string", + "title": "Result holds the result (stdout) of a script template" + } + }, + "title": "Outputs hold parameters, artifacts, and results from a step" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ParallelSteps": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowStep" + } + } + } + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Parameter": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the parameter name" + }, + "default": { + "type": "string", + "title": "Default is the default value to use for an input parameter if a value was not supplied\nDEPRECATED: This field is not used" + }, + "value": { + "type": "string", + "title": "Value is the literal value to use for the parameter.\nIf specified in the context of an input parameter, the value takes precedence over any passed values" + }, + "valueFrom": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ValueFrom", + "title": "ValueFrom is the source for the output parameter's value" + }, + "globalName": { + "type": "string", + "title": "GlobalName exports an output parameter to the global scope, making it available as\n'{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters" + } + }, + "title": "Parameter indicate a passed string parameter to a service template with an optional default value" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.PodGC": { + "type": "object", + "properties": { + "strategy": { + "type": "string" + } + }, + "title": "PodGC describes how to delete completed pods as they complete" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Prometheus": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the metric" + }, + "labels": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.MetricLabel" + }, + "title": "Labels is a list of metric labels" + }, + "help": { + "type": "string", + "title": "Help is a string that describes the metric" + }, + "when": { + "type": "string", + "title": "When is a conditional statement that decides when to emit the metric" + }, + "gauge": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Gauge", + "title": "Gauge is a gauge metric" + }, + "histogram": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Histogram", + "title": "Histogram is a histogram metric" + }, + "counter": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Counter", + "title": "Counter is a counter metric" + } + }, + "title": "Prometheus is a prometheus metric to be emitted" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.RawArtifact": { + "type": "object", + "properties": { + "data": { + "type": "string", + "title": "Data is the string contents of the artifact" + } + }, + "title": "RawArtifact allows raw string content to be placed as an artifact in a container" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ResourceTemplate": { + "type": "object", + "properties": { + "action": { + "type": "string", + "title": "Action is the action to perform to the resource.\nMust be one of: get, create, apply, delete, replace, patch" + }, + "mergeStrategy": { + "type": "string", + "title": "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\"\nMust be one of: strategic, merge, json" + }, + "manifest": { + "type": "string", + "title": "Manifest contains the kubernetes manifest" + }, + "setOwnerReference": { + "type": "boolean", + "format": "boolean", + "description": "SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource." + }, + "successCondition": { + "type": "string", + "title": "SuccessCondition is a label selector expression which describes the conditions\nof the k8s resource in which it is acceptable to proceed to the following step" + }, + "failureCondition": { + "type": "string", + "title": "FailureCondition is a label selector expression which describes the conditions\nof the k8s resource in which the step was considered failed" + } + }, + "title": "ResourceTemplate is a template subtype to manipulate kubernetes resources" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.RetryStrategy": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "format": "int32", + "title": "Limit is the maximum number of attempts when retrying a container" + }, + "retryPolicy": { + "type": "string", + "title": "RetryPolicy is a policy of NodePhase statuses that will be retried" + }, + "backoff": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Backoff", + "title": "Backoff is a backoff strategy" + } + }, + "title": "RetryStrategy provides controls on how to retry a workflow step" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.S3Artifact": { + "type": "object", + "properties": { + "s3Bucket": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.S3Bucket" + }, + "key": { + "type": "string", + "title": "Key is the key in the bucket where the artifact resides" + } + }, + "title": "S3Artifact is the location of an S3 artifact" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.S3Bucket": { + "type": "object", + "properties": { + "endpoint": { + "type": "string", + "title": "Endpoint is the hostname of the bucket endpoint" + }, + "bucket": { + "type": "string", + "title": "Bucket is the name of the bucket" + }, + "region": { + "type": "string", + "title": "Region contains the optional bucket region" + }, + "insecure": { + "type": "boolean", + "format": "boolean", + "title": "Insecure will connect to the service with TLS" + }, + "accessKeySecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "AccessKeySecret is the secret selector to the bucket's access key" + }, + "secretKeySecret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "SecretKeySecret is the secret selector to the bucket's secret key" + }, + "roleARN": { + "type": "string", + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume." + }, + "useSDKCreds": { + "type": "boolean", + "format": "boolean", + "description": "UseSDKCreds tells the driver to figure out credentials based on sdk defaults." + } + }, + "title": "S3Bucket contains the access information required for interfacing with an S3 bucket" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ScriptTemplate": { + "type": "object", + "properties": { + "container": { + "$ref": "#/definitions/k8s.io.api.core.v1.Container" + }, + "source": { + "type": "string", + "title": "Source contains the source code of the script to execute" + } + }, + "title": "ScriptTemplate is a template subtype to enable scripting through code steps" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Sequence": { + "type": "object", + "properties": { + "count": { + "type": "string", + "title": "Count is number of elements in the sequence (default: 0). Not to be used with end" + }, + "start": { + "type": "string", + "title": "Number at which to start the sequence (default: 0)" + }, + "end": { + "type": "string", + "title": "Number at which to end the sequence (default: 0). Not to be used with Count" + }, + "format": { + "type": "string", + "title": "Format is a printf format string to format the value in the sequence" + } + }, + "title": "Sequence expands a workflow step into numeric range" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.SuspendTemplate": { + "type": "object", + "properties": { + "duration": { + "type": "string", + "title": "Duration is the seconds to wait before automatically resuming a template" + } + }, + "title": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TTLStrategy": { + "type": "object", + "properties": { + "secondsAfterCompletion": { + "type": "integer", + "format": "int32" + }, + "secondsAfterSuccess": { + "type": "integer", + "format": "int32" + }, + "secondsAfterFailure": { + "type": "integer", + "format": "int32" + } + }, + "title": "TTLStrategy is the strategy for the time to live depending on if the workflow succeded or failed" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TarStrategy": { + "type": "object", + "title": "TarStrategy will tar and gzip the file or directory when saving" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Template": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the template" + }, + "template": { + "type": "string", + "description": "Template is the name of the template which is used as the base of this template.\nDEPRECATED: This field is not used." + }, + "arguments": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Arguments", + "description": "Arguments hold arguments to the template.\nDEPRECATED: This field is not used." + }, + "templateRef": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TemplateRef", + "description": "TemplateRef is the reference to the template resource which is used as the base of this template.\nDEPRECATED: This field is not used." + }, + "inputs": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Inputs", + "title": "Inputs describe what inputs parameters and artifacts are supplied to this template" + }, + "outputs": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Outputs", + "title": "Outputs describe the parameters and artifacts that this template produces" + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector to schedule this step of the workflow to be\nrun on the selected node(s). Overrides the selector set at the workflow level." + }, + "affinity": { + "$ref": "#/definitions/k8s.io.api.core.v1.Affinity", + "title": "Affinity sets the pod's scheduling constraints\nOverrides the affinity set at the workflow level (if any)" + }, + "metadata": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metadata", + "title": "Metdata sets the pods's metadata, i.e. annotations and labels" + }, + "daemon": { + "type": "boolean", + "format": "boolean", + "title": "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness" + }, + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ParallelSteps" + }, + "title": "Steps define a series of sequential/parallel workflow steps" + }, + "container": { + "$ref": "#/definitions/k8s.io.api.core.v1.Container", + "title": "Container is the main container image to run in the pod" + }, + "script": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ScriptTemplate", + "title": "Script runs a portion of code against an interpreter" + }, + "resource": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ResourceTemplate", + "title": "Resource template subtype which can run k8s resources" + }, + "dag": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.DAGTemplate", + "title": "DAG template subtype which runs a DAG" + }, + "suspend": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.SuspendTemplate", + "title": "Suspend template subtype which can suspend a workflow when reaching the step" + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.Volume" + }, + "title": "Volumes is a list of volumes that can be mounted by containers in a template.\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "initContainers": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.UserContainer" + }, + "title": "InitContainers is a list of containers which run before the main container.\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "sidecars": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.UserContainer" + }, + "title": "Sidecars is a list of containers which run alongside the main container\nSidecars are automatically killed when the main container completes\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "archiveLocation": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactLocation", + "description": "Location in which all files related to the step will be stored (logs, artifacts, etc...).\nCan be overridden by individual items in Outputs. If omitted, will use the default\nartifact repository location configured in the controller, appended with the\n\u003cworkflowname\u003e/\u003cnodename\u003e in the key." + }, + "activeDeadlineSeconds": { + "type": "string", + "format": "int64", + "description": "Optional duration in seconds relative to the StartTime that the pod may be active on a node\nbefore the system actively tries to terminate the pod; value must be positive integer\nThis field is only applicable to container and script templates." + }, + "retryStrategy": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.RetryStrategy", + "title": "RetryStrategy describes how to retry a template when it fails" + }, + "parallelism": { + "type": "string", + "format": "int64", + "description": "Parallelism limits the max total parallel pods that can execute at the same time within the\nboundaries of this template invocation. If additional steps/dag templates are invoked, the\npods created by those templates will not be counted towards this total." + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.Toleration" + }, + "title": "Tolerations to apply to workflow pods.\n+patchStrategy=merge\n+patchMergeKey=key" + }, + "schedulerName": { + "type": "string", + "title": "If specified, the pod will be dispatched by specified scheduler.\nOr it will be dispatched by workflow scope scheduler if specified.\nIf neither specified, the pod will be dispatched by default scheduler.\n+optional" + }, + "priorityClassName": { + "type": "string", + "description": "PriorityClassName to apply to workflow pods." + }, + "priority": { + "type": "integer", + "format": "int32", + "description": "Priority to apply to workflow pods." + }, + "serviceAccountName": { + "type": "string", + "title": "ServiceAccountName to apply to workflow pods" + }, + "automountServiceAccountToken": { + "type": "boolean", + "format": "boolean", + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.\nServiceAccountName of ExecutorConfig must be specified if this value is false." + }, + "executor": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ExecutorConfig", + "description": "Executor holds configurations of the executor container." + }, + "hostAliases": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.HostAlias" + }, + "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec\n+patchStrategy=merge\n+patchMergeKey=ip" + }, + "securityContext": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodSecurityContext", + "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + }, + "podSpecPatch": { + "type": "string", + "description": "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of\ncontainer fields which are not strings (e.g. resource limits)." + }, + "resubmitPendingPods": { + "type": "boolean", + "format": "boolean", + "title": "ResubmitPendingPods is a flag to enable resubmitting pods that remain Pending after initial submission" + }, + "metrics": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metrics", + "title": "Metrics are a list of metrics emitted from this template" + } + }, + "title": "Template is a reusable and composable unit of execution in a workflow" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TemplateRef": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name is the resource name of the template." + }, + "template": { + "type": "string", + "description": "Template is the name of referred template in the resource." + }, + "runtimeResolution": { + "type": "boolean", + "format": "boolean", + "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + }, + "clusterscope": { + "type": "boolean", + "format": "boolean", + "description": "ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate)." + } + }, + "description": "TemplateRef is a reference of template resource." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.UserContainer": { + "type": "object", + "properties": { + "container": { + "$ref": "#/definitions/k8s.io.api.core.v1.Container" + }, + "mirrorVolumeMounts": { + "type": "boolean", + "format": "boolean", + "title": "MirrorVolumeMounts will mount the same volumes specified in the main container\nto the container (including artifacts), at the same mountPaths. This enables\ndind daemon to partially see the same filesystem as the main container in\norder to use features such as docker volume binding" + } + }, + "description": "UserContainer is a container specified by a user." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ValueFrom": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path in the container to retrieve an output parameter value from in container templates" + }, + "jsonPath": { + "type": "string", + "title": "JSONPath of a resource to retrieve an output parameter value from in resource templates" + }, + "jqFilter": { + "type": "string", + "title": "JQFilter expression against the resource object in resource templates" + }, + "parameter": { + "type": "string", + "title": "Parameter reference to a step or dag task in which to retrieve an output parameter value from\n(e.g. '{{steps.mystep.outputs.myparam}}')" + }, + "default": { + "type": "string", + "title": "Default specifies a value to be used if retrieving the value from the specified source fails" + } + }, + "title": "ValueFrom describes a location in which to obtain the value to a parameter" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowSpec": { + "type": "object", + "properties": { + "templates": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Template" + }, + "title": "Templates is a list of workflow templates used in a workflow\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "entrypoint": { + "type": "string", + "description": "Entrypoint is a template reference to the starting point of the workflow." + }, + "arguments": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Arguments", + "title": "Arguments contain the parameters and artifacts sent to the workflow entrypoint\nParameters are referencable globally using the 'workflow' variable prefix.\ne.g. {{workflow.parameters.myparam}}" + }, + "serviceAccountName": { + "type": "string", + "description": "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as." + }, + "automountServiceAccountToken": { + "type": "boolean", + "format": "boolean", + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.\nServiceAccountName of ExecutorConfig must be specified if this value is false." + }, + "executor": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ExecutorConfig", + "description": "Executor holds configurations of executor containers of the workflow." + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.Volume" + }, + "title": "Volumes is a list of volumes that can be mounted by containers in a workflow.\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "volumeClaimTemplates": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PersistentVolumeClaim" + }, + "title": "VolumeClaimTemplates is a list of claims that containers are allowed to reference.\nThe Workflow controller will create the claims at the beginning of the workflow\nand delete the claims upon completion of the workflow\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "parallelism": { + "type": "string", + "format": "int64", + "title": "Parallelism limits the max total parallel pods that can execute at the same time in a workflow" + }, + "artifactRepositoryRef": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRef", + "description": "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config." + }, + "suspend": { + "type": "boolean", + "format": "boolean", + "title": "Suspend will suspend the workflow and prevent execution of any future steps in the workflow" + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which will result in all pods of the workflow\nto be scheduled on the selected node(s). This is able to be overridden by\na nodeSelector specified in the template." + }, + "affinity": { + "$ref": "#/definitions/k8s.io.api.core.v1.Affinity", + "title": "Affinity sets the scheduling constraints for all pods in the workflow.\nCan be overridden by an affinity specified in the template" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.Toleration" + }, + "title": "Tolerations to apply to workflow pods.\n+patchStrategy=merge\n+patchMergeKey=key" + }, + "imagePullSecrets": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference" + }, + "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "hostNetwork": { + "type": "boolean", + "format": "boolean", + "description": "Host networking requested for this workflow pod. Default to false." + }, + "dnsPolicy": { + "type": "string", + "description": "Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'." + }, + "dnsConfig": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodDNSConfig", + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to\nthose generated from DNSPolicy." + }, + "onExit": { + "type": "string", + "description": "OnExit is a template reference which is invoked at the end of the\nworkflow, irrespective of the success, failure, or error of the\nprimary workflow." + }, + "ttlSecondsAfterFinished": { + "type": "integer", + "format": "int32", + "description": "TTLSecondsAfterFinished limits the lifetime of a Workflow that has finished execution\n(Succeeded, Failed, Error). If this field is set, once the Workflow finishes, it will be\ndeleted after ttlSecondsAfterFinished expires. If this field is unset,\nttlSecondsAfterFinished will not expire. If this field is set to zero,\nttlSecondsAfterFinished expires immediately after the Workflow finishes.\nDEPRECATED: Use TTLStrategy.SecondsAfterCompletion instead." + }, + "ttlStrategy": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TTLStrategy", + "description": "TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it\nSucceeded or Failed. If this struct is set, once the Workflow finishes, it will be\ndeleted after the time to live expires. If this field is unset,\nthe controller config map will hold the default values." + }, + "activeDeadlineSeconds": { + "type": "string", + "format": "int64", + "title": "Optional duration in seconds relative to the workflow start time which the workflow is\nallowed to run before the controller terminates the workflow. A value of zero is used to\nterminate a Running workflow" + }, + "priority": { + "type": "integer", + "format": "int32", + "description": "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first." + }, + "schedulerName": { + "type": "string", + "title": "Set scheduler name for all pods.\nWill be overridden if container/script template's scheduler name is set.\nDefault scheduler will be used if neither specified.\n+optional" + }, + "podGC": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.PodGC", + "title": "PodGC describes the strategy to use when to deleting completed pods" + }, + "podPriorityClassName": { + "type": "string", + "description": "PriorityClassName to apply to workflow pods." + }, + "podPriority": { + "type": "integer", + "format": "int32", + "description": "Priority to apply to workflow pods." + }, + "hostAliases": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.HostAlias" + }, + "title": "+patchStrategy=merge\n+patchMergeKey=ip" + }, + "securityContext": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodSecurityContext", + "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + }, + "podSpecPatch": { + "type": "string", + "description": "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of\ncontainer fields which are not strings (e.g. resource limits)." + }, + "podDisruptionBudget": { + "$ref": "#/definitions/k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec", + "title": "PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods.\nController will automatically add the selector with workflow name, if selector is empty.\nOptional: Defaults to empty.\n+optional" + }, + "metrics": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metrics", + "title": "Metrics are a list of metrics emitted from this Workflow" + }, + "shutdown": { + "type": "string", + "title": "Shutdown will shutdown the workflow according to its ShutdownStrategy" + } + }, + "description": "WorkflowSpec is the specification of a Workflow." + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowStep": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the step" + }, + "template": { + "type": "string", + "title": "Template is the name of the template to execute as the step" + }, + "arguments": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Arguments", + "title": "Arguments hold arguments to the template" + }, + "templateRef": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TemplateRef", + "description": "TemplateRef is the reference to the template resource to execute as the step." + }, + "withItems": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Item" + }, + "title": "WithItems expands a step into multiple parallel steps from the items in the list" + }, + "withParam": { + "type": "string", + "description": "WithParam expands a step into multiple parallel steps from the value in the parameter,\nwhich is expected to be a JSON list." + }, + "withSequence": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Sequence", + "title": "WithSequence expands a step into a numeric sequence" + }, + "when": { + "type": "string", + "title": "When is an expression in which the step should conditionally execute" + }, + "continueOn": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ContinueOn", + "title": "ContinueOn makes argo to proceed with the following step even if this step fails.\nErrors and Failed states can be specified" + }, + "onExit": { + "type": "string", + "description": "OnExit is a template reference which is invoked at the end of the\ntemplate, irrespective of the success, failure, or error of the\nprimary template." + } + }, + "title": "WorkflowStep is a reference to a template to execute in a series of step" + }, + "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplateSpec": { + "type": "object", + "properties": { + "workflowSpec": { + "$ref": "#/definitions/github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowSpec" + } + }, + "description": "WorkflowTemplateSpec is a spec of WorkflowTemplate." + }, + "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "partition": { + "type": "integer", + "format": "int32", + "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\".\nIf omitted, the default is \"false\".\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + } + }, + "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk\nmust also be in the same AWS zone as the kubelet. An AWS EBS disk\ncan only be mounted as read/write once. AWS EBS volumes support\nownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.Affinity": { + "type": "object", + "properties": { + "nodeAffinity": { + "$ref": "#/definitions/k8s.io.api.core.v1.NodeAffinity", + "title": "Describes node affinity scheduling rules for the pod.\n+optional" + }, + "podAffinity": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodAffinity", + "title": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).\n+optional" + }, + "podAntiAffinity": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodAntiAffinity", + "title": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).\n+optional" + } + }, + "description": "Affinity is a group of affinity scheduling rules." + }, + "k8s.io.api.core.v1.AzureDiskVolumeSource": { + "type": "object", + "properties": { + "diskName": { + "type": "string", + "title": "The Name of the data disk in the blob storage" + }, + "diskURI": { + "type": "string", + "title": "The URI the data disk in the blob storage" + }, + "cachingMode": { + "type": "string", + "title": "Host Caching mode: None, Read Only, Read Write.\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "kind": { + "type": "string", + "title": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" + } + }, + "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + }, + "k8s.io.api.core.v1.AzureFileVolumeSource": { + "type": "object", + "properties": { + "secretName": { + "type": "string", + "title": "the name of secret that contains Azure Storage Account Name and Key" + }, + "shareName": { + "type": "string", + "title": "Share Name" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod." + }, + "k8s.io.api.core.v1.CSIVolumeSource": { + "type": "object", + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional" + }, + "volumeAttributes": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "VolumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional" + }, + "nodePublishSecretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "NodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.\n+optional" + } + }, + "title": "Represents a source location of a volume to mount, managed by an external CSI driver" + }, + "k8s.io.api.core.v1.Capabilities": { + "type": "object", + "properties": { + "add": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Added capabilities\n+optional" + }, + "drop": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Removed capabilities\n+optional" + } + }, + "description": "Adds and removes POSIX capabilities from running containers." + }, + "k8s.io.api.core.v1.CephFSVolumeSource": { + "type": "object", + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + }, + "path": { + "type": "string", + "title": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional" + }, + "user": { + "type": "string", + "title": "Optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "secretFile": { + "type": "string", + "title": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + } + }, + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod\nCephfs volumes do not support ownership management or SELinux relabeling." + }, + "k8s.io.api.core.v1.CinderVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "volume id used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "Optional: points to a secret object containing parameters used to connect\nto OpenStack.\n+optional" + } + }, + "description": "Represents a cinder volume resource in Openstack.\nA Cinder volume must exist before mounting to a container.\nThe volume must also be in the same region as the kubelet.\nCinder volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.ConfigMapEnvSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "description": "The ConfigMap to select from." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap must be defined\n+optional" + } + }, + "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment\nvariables with.\n\nThe contents of the target ConfigMap's Data field will represent the\nkey-value pairs as environment variables." + }, + "k8s.io.api.core.v1.ConfigMapKeySelector": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "description": "The ConfigMap to select from." + }, + "key": { + "type": "string", + "description": "The key to select." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or its key must be defined\n+optional" + } + }, + "description": "Selects a key from a ConfigMap." + }, + "k8s.io.api.core.v1.ConfigMapProjection": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" + } + }, + "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names,\nunless the items element is populated with specific mappings of keys to paths.\nNote that this is identical to a configmap volume source without the default\nmode." + }, + "k8s.io.api.core.v1.ConfigMapVolumeSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" + } + }, + "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nvolume as files using the keys in the Data field as the file names, unless\nthe items element is populated with specific mappings of keys to paths.\nConfigMap volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.Container": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." + }, + "image": { + "type": "string", + "title": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional" + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + }, + "workingDir": { + "type": "string", + "title": "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.\n+optional" + }, + "ports": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.ContainerPort" + }, + "title": "List of ports to expose from the container. Exposing a port here gives\nthe system additional information about the network connections a\ncontainer uses, but is primarily informational. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nCannot be updated.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol" + }, + "envFrom": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.EnvFromSource" + }, + "title": "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.\n+optional" + }, + "env": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.EnvVar" + }, + "title": "List of environment variables to set in the container.\nCannot be updated.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" + }, + "resources": { + "$ref": "#/definitions/k8s.io.api.core.v1.ResourceRequirements", + "title": "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + }, + "volumeMounts": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.VolumeMount" + }, + "title": "Pod volumes to mount into the container's filesystem.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge" + }, + "volumeDevices": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.VolumeDevice" + }, + "title": "volumeDevices is the list of block devices to be used by the container.\nThis is a beta feature.\n+patchMergeKey=devicePath\n+patchStrategy=merge\n+optional" + }, + "livenessProbe": { + "$ref": "#/definitions/k8s.io.api.core.v1.Probe", + "title": "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "readinessProbe": { + "$ref": "#/definitions/k8s.io.api.core.v1.Probe", + "title": "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "startupProbe": { + "$ref": "#/definitions/k8s.io.api.core.v1.Probe", + "title": "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nThis is an alpha feature enabled by the StartupProbe feature flag.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "lifecycle": { + "$ref": "#/definitions/k8s.io.api.core.v1.Lifecycle", + "title": "Actions that the management system should take in response to container lifecycle events.\nCannot be updated.\n+optional" + }, + "terminationMessagePath": { + "type": "string", + "title": "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.\n+optional" + }, + "terminationMessagePolicy": { + "type": "string", + "title": "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.\n+optional" + }, + "imagePullPolicy": { + "type": "string", + "title": "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n+optional" + }, + "securityContext": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecurityContext", + "title": "Security options the pod should run with.\nMore info: https://kubernetes.io/docs/concepts/policy/security-context/\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/\n+optional" + }, + "stdin": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.\n+optional" + }, + "stdinOnce": { + "type": "boolean", + "format": "boolean", + "title": "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false\n+optional" + }, + "tty": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.\n+optional" + } + }, + "description": "A single application container that you want to run within a pod." + }, + "k8s.io.api.core.v1.ContainerPort": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.\n+optional" + }, + "hostPort": { + "type": "integer", + "format": "int32", + "title": "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 \u003c x \u003c 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this.\n+optional" + }, + "containerPort": { + "type": "integer", + "format": "int32", + "description": "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 \u003c x \u003c 65536." + }, + "protocol": { + "type": "string", + "title": "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\".\n+optional" + }, + "hostIP": { + "type": "string", + "title": "What host IP to bind the external port to.\n+optional" + } + }, + "description": "ContainerPort represents a network port in a single container." + }, + "k8s.io.api.core.v1.DownwardAPIProjection": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.DownwardAPIVolumeFile" + }, + "title": "Items is a list of DownwardAPIVolume file\n+optional" + } + }, + "description": "Represents downward API info for projecting into a projected volume.\nNote that this is identical to a downwardAPI volume source without the default\nmode." + }, + "k8s.io.api.core.v1.DownwardAPIVolumeFile": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + }, + "fieldRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.ObjectFieldSelector", + "title": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.\n+optional" + }, + "resourceFieldRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.ResourceFieldSelector", + "title": "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.\n+optional" + }, + "mode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on this file, must be a value between 0\nand 0777. If not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "title": "DownwardAPIVolumeFile represents information to create the file containing the pod field" + }, + "k8s.io.api.core.v1.DownwardAPIVolumeSource": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.DownwardAPIVolumeFile" + }, + "title": "Items is a list of downward API volume file\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "description": "DownwardAPIVolumeSource represents a volume containing downward API info.\nDownward API volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.EmptyDirVolumeSource": { + "type": "object", + "properties": { + "medium": { + "type": "string", + "title": "What type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + }, + "sizeLimit": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity", + "title": "Total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: http://kubernetes.io/docs/user-guide/volumes#emptydir\n+optional" + } + }, + "description": "Represents an empty directory for a pod.\nEmpty directory volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.EnvFromSource": { + "type": "object", + "properties": { + "prefix": { + "type": "string", + "title": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.\n+optional" + }, + "configMapRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapEnvSource", + "title": "The ConfigMap to select from\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretEnvSource", + "title": "The Secret to select from\n+optional" + } + }, + "title": "EnvFromSource represents the source of a set of ConfigMaps" + }, + "k8s.io.api.core.v1.EnvVar": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the environment variable. Must be a C_IDENTIFIER." + }, + "value": { + "type": "string", + "title": "Variable references $(VAR_NAME) are expanded\nusing the previous defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. The $(VAR_NAME)\nsyntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped\nreferences will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\".\n+optional" + }, + "valueFrom": { + "$ref": "#/definitions/k8s.io.api.core.v1.EnvVarSource", + "title": "Source for the environment variable's value. Cannot be used if value is not empty.\n+optional" + } + }, + "description": "EnvVar represents an environment variable present in a Container." + }, + "k8s.io.api.core.v1.EnvVarSource": { + "type": "object", + "properties": { + "fieldRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.ObjectFieldSelector", + "title": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.\n+optional" + }, + "resourceFieldRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.ResourceFieldSelector", + "title": "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.\n+optional" + }, + "configMapKeyRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapKeySelector", + "title": "Selects a key of a ConfigMap.\n+optional" + }, + "secretKeyRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretKeySelector", + "title": "Selects a key of a secret in the pod's namespace\n+optional" + } + }, + "description": "EnvVarSource represents a source for the value of an EnvVar." + }, + "k8s.io.api.core.v1.ExecAction": { + "type": "object", + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\n+optional" + } + }, + "description": "ExecAction describes a \"run in container\" action." + }, + "k8s.io.api.core.v1.FCVolumeSource": { + "type": "object", + "properties": { + "targetWWNs": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Optional: FC target worldwide names (WWNs)\n+optional" + }, + "lun": { + "type": "integer", + "format": "int32", + "title": "Optional: FC target lun number\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "wwids": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional" + } + }, + "description": "Represents a Fibre Channel volume.\nFibre Channel volumes can only be mounted as read/write once.\nFibre Channel volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.FlexVolumeSource": { + "type": "object", + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the driver to use for this volume." + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "Optional: SecretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "options": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Optional: Extra command options if any.\n+optional" + } + }, + "description": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." + }, + "k8s.io.api.core.v1.FlockerVolumeSource": { + "type": "object", + "properties": { + "datasetName": { + "type": "string", + "title": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional" + }, + "datasetUUID": { + "type": "string", + "title": "UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional" + } + }, + "description": "Represents a Flocker volume mounted by the Flocker agent.\nOne and only one of datasetName and datasetUUID should be set.\nFlocker volumes do not support ownership management or SELinux relabeling." + }, + "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource": { + "type": "object", + "properties": { + "pdName": { + "type": "string", + "title": "Unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "partition": { + "type": "integer", + "format": "int32", + "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + } + }, + "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must\nalso be in the same GCE project and zone as the kubelet. A GCE PD\ncan only be mounted as read/write once or read-only many times. GCE\nPDs support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.GitRepoVolumeSource": { + "type": "object", + "properties": { + "repository": { + "type": "string", + "title": "Repository URL" + }, + "revision": { + "type": "string", + "title": "Commit hash for the specified revision.\n+optional" + }, + "directory": { + "type": "string", + "title": "Target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional" + } + }, + "description": "Represents a volume that is populated with the contents of a git repository.\nGit repo volumes do not support ownership management.\nGit repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." + }, + "k8s.io.api.core.v1.GlusterfsVolumeSource": { + "type": "object", + "properties": { + "endpoints": { + "type": "string", + "title": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + }, + "path": { + "type": "string", + "title": "Path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional" + } + }, + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod.\nGlusterfs volumes do not support ownership management or SELinux relabeling." + }, + "k8s.io.api.core.v1.HTTPGetAction": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path to access on the HTTP server.\n+optional" + }, + "port": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString", + "description": "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + }, + "host": { + "type": "string", + "title": "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead.\n+optional" + }, + "scheme": { + "type": "string", + "title": "Scheme to use for connecting to the host.\nDefaults to HTTP.\n+optional" + }, + "httpHeaders": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.HTTPHeader" + }, + "title": "Custom headers to set in the request. HTTP allows repeated headers.\n+optional" + } + }, + "description": "HTTPGetAction describes an action based on HTTP Get requests." + }, + "k8s.io.api.core.v1.HTTPHeader": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "The header field name" + }, + "value": { + "type": "string", + "title": "The header field value" + } + }, + "title": "HTTPHeader describes a custom header to be used in HTTP probes" + }, + "k8s.io.api.core.v1.Handler": { + "type": "object", + "properties": { + "exec": { + "$ref": "#/definitions/k8s.io.api.core.v1.ExecAction", + "title": "One and only one of the following should be specified.\nExec specifies the action to take.\n+optional" + }, + "httpGet": { + "$ref": "#/definitions/k8s.io.api.core.v1.HTTPGetAction", + "title": "HTTPGet specifies the http request to perform.\n+optional" + }, + "tcpSocket": { + "$ref": "#/definitions/k8s.io.api.core.v1.TCPSocketAction", + "title": "TCPSocket specifies an action involving a TCP port.\nTCP hooks not yet supported\nTODO: implement a realistic TCP lifecycle hook\n+optional" + } + }, + "description": "Handler defines a specific action that should be taken\nTODO: pass structured data to these actions, and document that data here." + }, + "k8s.io.api.core.v1.HostAlias": { + "type": "object", + "properties": { + "ip": { + "type": "string", + "description": "IP address of the host file entry." + }, + "hostnames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Hostnames for the above IP address." + } + }, + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\npod's hosts file." + }, + "k8s.io.api.core.v1.HostPathVolumeSource": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + }, + "type": { + "type": "string", + "title": "Type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n+optional" + } + }, + "description": "Represents a host path mapped into a pod.\nHost path volumes do not support ownership management or SELinux relabeling." + }, + "k8s.io.api.core.v1.ISCSIVolumeSource": { + "type": "object", + "properties": { + "targetPortal": { + "type": "string", + "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + }, + "iqn": { + "type": "string", + "description": "Target iSCSI Qualified Name." + }, + "lun": { + "type": "integer", + "format": "int32", + "description": "iSCSI Target Lun number." + }, + "iscsiInterface": { + "type": "string", + "title": "iSCSI Interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional" + }, + "portals": { + "type": "array", + "items": { + "type": "string" + }, + "title": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional" + }, + "chapAuthDiscovery": { + "type": "boolean", + "format": "boolean", + "title": "whether support iSCSI Discovery CHAP authentication\n+optional" + }, + "chapAuthSession": { + "type": "boolean", + "format": "boolean", + "title": "whether support iSCSI Session CHAP authentication\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "CHAP Secret for iSCSI target and initiator authentication\n+optional" + }, + "initiatorName": { + "type": "string", + "title": "Custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional" + } + }, + "description": "Represents an ISCSI disk.\nISCSI volumes can only be mounted as read/write once.\nISCSI volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.KeyToPath": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The key to project." + }, + "path": { + "type": "string", + "description": "The relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + }, + "mode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on this file, must be a value between 0\nand 0777. If not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "description": "Maps a string key to a path within a volume." + }, + "k8s.io.api.core.v1.Lifecycle": { + "type": "object", + "properties": { + "postStart": { + "$ref": "#/definitions/k8s.io.api.core.v1.Handler", + "title": "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + }, + "preStop": { + "$ref": "#/definitions/k8s.io.api.core.v1.Handler", + "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The reason for termination is passed to the\nhandler. The Pod's termination grace period countdown begins before the\nPreStop hooked is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod. Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + } + }, + "description": "Lifecycle describes actions that the management system should take in response to container lifecycle\nevents. For the PostStart and PreStop lifecycle handlers, management of the container blocks\nuntil the action is complete, unless the container process fails, in which case the handler is aborted." + }, + "k8s.io.api.core.v1.LocalObjectReference": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?\n+optional" + } + }, + "description": "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace." + }, + "k8s.io.api.core.v1.NFSVolumeSource": { + "type": "object", + "properties": { + "server": { + "type": "string", + "title": "Server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "path": { + "type": "string", + "title": "Path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force\nthe NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + } + }, + "description": "Represents an NFS mount that lasts the lifetime of a pod.\nNFS volumes do not support ownership management or SELinux relabeling." + }, + "k8s.io.api.core.v1.NodeAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "$ref": "#/definitions/k8s.io.api.core.v1.NodeSelector", + "title": "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PreferredSchedulingTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Node affinity is a group of node affinity scheduling rules." + }, + "k8s.io.api.core.v1.NodeSelector": { + "type": "object", + "properties": { + "nodeSelectorTerms": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.NodeSelectorTerm" + }, + "description": "Required. A list of node selector terms. The terms are ORed." + } + }, + "description": "A node selector represents the union of the results of one or more label queries\nover a set of nodes; that is, it represents the OR of the selectors represented\nby the node selector terms." + }, + "k8s.io.api.core.v1.NodeSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The label key that the selector applies to." + }, + "operator": { + "type": "string", + "description": "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "title": "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch.\n+optional" + } + }, + "description": "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." + }, + "k8s.io.api.core.v1.NodeSelectorTerm": { + "type": "object", + "properties": { + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.NodeSelectorRequirement" + }, + "title": "A list of node selector requirements by node's labels.\n+optional" + }, + "matchFields": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.NodeSelectorRequirement" + }, + "title": "A list of node selector requirements by node's fields.\n+optional" + } + }, + "description": "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm." + }, + "k8s.io.api.core.v1.ObjectFieldSelector": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "title": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".\n+optional" + }, + "fieldPath": { + "type": "string", + "description": "Path of the field to select in the specified API version." + } + }, + "description": "ObjectFieldSelector selects an APIVersioned field of an object." + }, + "k8s.io.api.core.v1.PersistentVolumeClaim": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "title": "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" + }, + "spec": { + "$ref": "#/definitions/k8s.io.api.core.v1.PersistentVolumeClaimSpec", + "title": "Spec defines the desired characteristics of a volume requested by a pod author.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + }, + "status": { + "$ref": "#/definitions/k8s.io.api.core.v1.PersistentVolumeClaimStatus", + "title": "Status represents the current information/status of a persistent volume claim.\nRead-only.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + } + }, + "title": "PersistentVolumeClaim is a user's request for and claim to a persistent volume" + }, + "k8s.io.api.core.v1.PersistentVolumeClaimCondition": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "status": { + "type": "string" + }, + "lastProbeTime": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", + "title": "Last time we probed the condition.\n+optional" + }, + "lastTransitionTime": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", + "title": "Last time the condition transitioned from one status to another.\n+optional" + }, + "reason": { + "type": "string", + "title": "Unique, this should be a short, machine understandable string that gives the reason\nfor condition's last transition. If it reports \"ResizeStarted\" that means the underlying\npersistent volume is being resized.\n+optional" + }, + "message": { + "type": "string", + "title": "Human-readable message indicating details about last transition.\n+optional" + } + }, + "title": "PersistentVolumeClaimCondition contails details about state of pvc" + }, + "k8s.io.api.core.v1.PersistentVolumeClaimSpec": { + "type": "object", + "properties": { + "accessModes": { + "type": "array", + "items": { + "type": "string" + }, + "title": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional" + }, + "selector": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", + "title": "A label query over volumes to consider for binding.\n+optional" + }, + "resources": { + "$ref": "#/definitions/k8s.io.api.core.v1.ResourceRequirements", + "title": "Resources represents the minimum resources the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional" + }, + "volumeName": { + "type": "string", + "title": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional" + }, + "storageClassName": { + "type": "string", + "title": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional" + }, + "volumeMode": { + "type": "string", + "title": "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\nThis is a beta feature.\n+optional" + }, + "dataSource": { + "$ref": "#/definitions/k8s.io.api.core.v1.TypedLocalObjectReference", + "title": "This field requires the VolumeSnapshotDataSource alpha feature gate to be\nenabled and currently VolumeSnapshot is the only supported data source.\nIf the provisioner can support VolumeSnapshot data source, it will create\na new volume and data will be restored to the volume at the same time.\nIf the provisioner does not support VolumeSnapshot data source, volume will\nnot be created and the failure will be reported as an event.\nIn the future, we plan to support more data source types and the behavior\nof the provisioner may change.\n+optional" + } + }, + "title": "PersistentVolumeClaimSpec describes the common attributes of storage devices\nand allows a Source for provider-specific attributes" + }, + "k8s.io.api.core.v1.PersistentVolumeClaimStatus": { + "type": "object", + "properties": { + "phase": { + "type": "string", + "title": "Phase represents the current phase of PersistentVolumeClaim.\n+optional" + }, + "accessModes": { + "type": "array", + "items": { + "type": "string" + }, + "title": "AccessModes contains the actual access modes the volume backing the PVC has.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional" + }, + "capacity": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" + }, + "title": "Represents the actual resources of the underlying volume.\n+optional" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PersistentVolumeClaimCondition" + }, + "title": "Current Condition of persistent volume claim. If underlying persistent volume is being\nresized then the Condition will be set to 'ResizeStarted'.\n+optional\n+patchMergeKey=type\n+patchStrategy=merge" + } + }, + "description": "PersistentVolumeClaimStatus is the current status of a persistent volume claim." + }, + "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource": { + "type": "object", + "properties": { + "claimName": { + "type": "string", + "title": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional" + } + }, + "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.\nThis volume finds the bound PV and mounts that volume for the pod. A\nPersistentVolumeClaimVolumeSource is, essentially, a wrapper around another\ntype of volume that is owned by someone else (the system)." + }, + "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource": { + "type": "object", + "properties": { + "pdID": { + "type": "string", + "title": "ID that identifies Photon Controller persistent disk" + }, + "fsType": { + "type": "string", + "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + } + }, + "description": "Represents a Photon Controller persistent disk resource." + }, + "k8s.io.api.core.v1.PodAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodAffinityTerm" + }, + "title": "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.WeightedPodAffinityTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Pod affinity is a group of inter pod affinity scheduling rules." + }, + "k8s.io.api.core.v1.PodAffinityTerm": { + "type": "object", + "properties": { + "labelSelector": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", + "title": "A label query over a set of resources, in this case pods.\n+optional" + }, + "namespaces": { + "type": "array", + "items": { + "type": "string" + }, + "title": "namespaces specifies which namespaces the labelSelector applies to (matches against);\nnull or empty list means \"this pod's namespace\"\n+optional" + }, + "topologyKey": { + "type": "string", + "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + } + }, + "title": "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key \u003ctopologyKey\u003e matches that of any node on which\na pod of the set of pods is running" + }, + "k8s.io.api.core.v1.PodAntiAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodAffinityTerm" + }, + "title": "If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.WeightedPodAffinityTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules." + }, + "k8s.io.api.core.v1.PodDNSConfig": { + "type": "object", + "properties": { + "nameservers": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed.\n+optional" + }, + "searches": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed.\n+optional" + }, + "options": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodDNSConfigOption" + }, + "title": "A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy.\n+optional" + } + }, + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to\nthose generated from DNSPolicy." + }, + "k8s.io.api.core.v1.PodDNSConfigOption": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Required." + }, + "value": { + "type": "string", + "title": "+optional" + } + }, + "description": "PodDNSConfigOption defines DNS resolver options of a pod." + }, + "k8s.io.api.core.v1.PodSecurityContext": { + "type": "object", + "properties": { + "seLinuxOptions": { + "$ref": "#/definitions/k8s.io.api.core.v1.SELinuxOptions", + "title": "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\n+optional" + }, + "windowsOptions": { + "$ref": "#/definitions/k8s.io.api.core.v1.WindowsSecurityContextOptions", + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsUser": { + "type": "string", + "format": "int64", + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + }, + "runAsGroup": { + "type": "string", + "format": "int64", + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + }, + "runAsNonRoot": { + "type": "boolean", + "format": "boolean", + "title": "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "supplementalGroups": { + "type": "array", + "items": { + "type": "string", + "format": "int64" + }, + "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\n+optional" + }, + "fsGroup": { + "type": "string", + "format": "int64", + "description": "1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\n+optional", + "title": "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:" + }, + "sysctls": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.Sysctl" + }, + "title": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\n+optional" + } + }, + "description": "PodSecurityContext holds pod-level security attributes and common container settings.\nSome fields are also present in container.securityContext. Field values of\ncontainer.securityContext take precedence over field values of PodSecurityContext." + }, + "k8s.io.api.core.v1.PortworxVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "VolumeID uniquely identifies a Portworx volume" + }, + "fsType": { + "type": "string", + "description": "FSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "description": "PortworxVolumeSource represents a Portworx volume resource." + }, + "k8s.io.api.core.v1.PreferredSchedulingTerm": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "format": "int32", + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100." + }, + "preference": { + "$ref": "#/definitions/k8s.io.api.core.v1.NodeSelectorTerm", + "description": "A node selector term, associated with the corresponding weight." + } + }, + "description": "An empty preferred scheduling term matches all objects with implicit weight 0\n(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." + }, + "k8s.io.api.core.v1.Probe": { + "type": "object", + "properties": { + "handler": { + "$ref": "#/definitions/k8s.io.api.core.v1.Handler", + "title": "The action taken to determine the health of a container" + }, + "initialDelaySeconds": { + "type": "integer", + "format": "int32", + "title": "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "timeoutSeconds": { + "type": "integer", + "format": "int32", + "title": "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "periodSeconds": { + "type": "integer", + "format": "int32", + "title": "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.\n+optional" + }, + "successThreshold": { + "type": "integer", + "format": "int32", + "title": "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.\n+optional" + }, + "failureThreshold": { + "type": "integer", + "format": "int32", + "title": "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.\n+optional" + } + }, + "description": "Probe describes a health check to be performed against a container to determine whether it is\nalive or ready to receive traffic." + }, + "k8s.io.api.core.v1.ProjectedVolumeSource": { + "type": "object", + "properties": { + "sources": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.VolumeProjection" + }, + "title": "list of volume projections" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Mode bits to use on created files by default. Must be a value between\n0 and 0777.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "title": "Represents a projected volume source" + }, + "k8s.io.api.core.v1.QuobyteVolumeSource": { + "type": "object", + "properties": { + "registry": { + "type": "string", + "title": "Registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" + }, + "volume": { + "type": "string", + "description": "Volume is a string that references an already created Quobyte volume by name." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional" + }, + "user": { + "type": "string", + "title": "User to map volume access to\nDefaults to serivceaccount user\n+optional" + }, + "group": { + "type": "string", + "title": "Group to map volume access to\nDefault is no group\n+optional" + }, + "tenant": { + "type": "string", + "title": "Tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional" + } + }, + "description": "Represents a Quobyte mount that lasts the lifetime of a pod.\nQuobyte volumes do not support ownership management or SELinux relabeling." + }, + "k8s.io.api.core.v1.RBDVolumeSource": { + "type": "object", + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "image": { + "type": "string", + "title": "The rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "pool": { + "type": "string", + "title": "The rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "user": { + "type": "string", + "title": "The rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "keyring": { + "type": "string", + "title": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "SecretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + } + }, + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod.\nRBD volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.ResourceFieldSelector": { + "type": "object", + "properties": { + "containerName": { + "type": "string", + "title": "Container name: required for volumes, optional for env vars\n+optional" + }, + "resource": { + "type": "string", + "title": "Required: resource to select" + }, + "divisor": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity", + "title": "Specifies the output format of the exposed resources, defaults to \"1\"\n+optional" + } + }, + "title": "ResourceFieldSelector represents container resources (cpu, memory) and their output format" + }, + "k8s.io.api.core.v1.ResourceRequirements": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" + }, + "title": "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + }, + "requests": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" + }, + "title": "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + } + }, + "description": "ResourceRequirements describes the compute resource requirements." + }, + "k8s.io.api.core.v1.SELinuxOptions": { + "type": "object", + "properties": { + "user": { + "type": "string", + "title": "User is a SELinux user label that applies to the container.\n+optional" + }, + "role": { + "type": "string", + "title": "Role is a SELinux role label that applies to the container.\n+optional" + }, + "type": { + "type": "string", + "title": "Type is a SELinux type label that applies to the container.\n+optional" + }, + "level": { + "type": "string", + "title": "Level is SELinux level label that applies to the container.\n+optional" + } + }, + "title": "SELinuxOptions are the labels to be applied to the container" + }, + "k8s.io.api.core.v1.ScaleIOVolumeSource": { + "type": "object", + "properties": { + "gateway": { + "type": "string", + "description": "The host address of the ScaleIO API Gateway." + }, + "system": { + "type": "string", + "description": "The name of the storage system as configured in ScaleIO." + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "description": "SecretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." + }, + "sslEnabled": { + "type": "boolean", + "format": "boolean", + "title": "Flag to enable/disable SSL communication with Gateway, default false\n+optional" + }, + "protectionDomain": { + "type": "string", + "title": "The name of the ScaleIO Protection Domain for the configured storage.\n+optional" + }, + "storagePool": { + "type": "string", + "title": "The ScaleIO Storage Pool associated with the protection domain.\n+optional" + }, + "storageMode": { + "type": "string", + "title": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional" + }, + "volumeName": { + "type": "string", + "description": "The name of a volume already created in the ScaleIO system\nthat is associated with this volume source." + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "title": "ScaleIOVolumeSource represents a persistent ScaleIO volume" + }, + "k8s.io.api.core.v1.SecretEnvSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "description": "The Secret to select from." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret must be defined\n+optional" + } + }, + "description": "SecretEnvSource selects a Secret to populate the environment\nvariables with.\n\nThe contents of the target Secret's Data field will represent the\nkey-value pairs as environment variables." + }, + "k8s.io.api.core.v1.SecretKeySelector": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "description": "The name of the secret in the pod's namespace to select from." + }, + "key": { + "type": "string", + "description": "The key of the secret to select from. Must be a valid secret key." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its key must be defined\n+optional" + } + }, + "description": "SecretKeySelector selects a key of a Secret." + }, + "k8s.io.api.core.v1.SecretProjection": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its key must be defined\n+optional" + } + }, + "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names.\nNote that this is identical to a secret volume source without the default\nmode." + }, + "k8s.io.api.core.v1.SecretVolumeSource": { + "type": "object", + "properties": { + "secretName": { + "type": "string", + "title": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its keys must be defined\n+optional" + } + }, + "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume\nas files using the keys in the Data field as the file names.\nSecret volumes support ownership management and SELinux relabeling." + }, + "k8s.io.api.core.v1.SecurityContext": { + "type": "object", + "properties": { + "capabilities": { + "$ref": "#/definitions/k8s.io.api.core.v1.Capabilities", + "title": "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\n+optional" + }, + "privileged": { + "type": "boolean", + "format": "boolean", + "title": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\n+optional" + }, + "seLinuxOptions": { + "$ref": "#/definitions/k8s.io.api.core.v1.SELinuxOptions", + "title": "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "windowsOptions": { + "$ref": "#/definitions/k8s.io.api.core.v1.WindowsSecurityContextOptions", + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsUser": { + "type": "string", + "format": "int64", + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsGroup": { + "type": "string", + "format": "int64", + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsNonRoot": { + "type": "boolean", + "format": "boolean", + "title": "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "readOnlyRootFilesystem": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container has a read-only root filesystem.\nDefault is false.\n+optional" + }, + "allowPrivilegeEscalation": { + "type": "boolean", + "format": "boolean", + "title": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\n+optional" + }, + "procMount": { + "type": "string", + "title": "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\n+optional" + } + }, + "description": "SecurityContext holds security configuration that will be applied to a container.\nSome fields are present in both SecurityContext and PodSecurityContext. When both\nare set, the values in SecurityContext take precedence." + }, + "k8s.io.api.core.v1.ServiceAccountTokenProjection": { + "type": "object", + "properties": { + "audience": { + "type": "string", + "title": "Audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional" + }, + "expirationSeconds": { + "type": "string", + "format": "int64", + "title": "ExpirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional" + }, + "path": { + "type": "string", + "description": "Path is the path relative to the mount point of the file to project the\ntoken into." + } + }, + "description": "ServiceAccountTokenProjection represents a projected service account token\nvolume. This projection can be used to insert a service account token into\nthe pods runtime filesystem for use against APIs (Kubernetes API Server or\notherwise)." + }, + "k8s.io.api.core.v1.StorageOSVolumeSource": { + "type": "object", + "properties": { + "volumeName": { + "type": "string", + "description": "VolumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." + }, + "volumeNamespace": { + "type": "string", + "title": "VolumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/k8s.io.api.core.v1.LocalObjectReference", + "title": "SecretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.\n+optional" + } + }, + "description": "Represents a StorageOS persistent volume resource." + }, + "k8s.io.api.core.v1.Sysctl": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of a property to set" + }, + "value": { + "type": "string", + "title": "Value of a property to set" + } + }, + "title": "Sysctl defines a kernel parameter to be set" + }, + "k8s.io.api.core.v1.TCPSocketAction": { + "type": "object", + "properties": { + "port": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString", + "description": "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + }, + "host": { + "type": "string", + "title": "Optional: Host name to connect to, defaults to the pod IP.\n+optional" + } + }, + "title": "TCPSocketAction describes an action based on opening a socket" + }, + "k8s.io.api.core.v1.Toleration": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys.\n+optional" + }, + "operator": { + "type": "string", + "title": "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category.\n+optional" + }, + "value": { + "type": "string", + "title": "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string.\n+optional" + }, + "effect": { + "type": "string", + "title": "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n+optional" + }, + "tolerationSeconds": { + "type": "string", + "format": "int64", + "title": "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system.\n+optional" + } + }, + "description": "The pod this Toleration is attached to tolerates any taint that matches\nthe triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e." + }, + "k8s.io.api.core.v1.TypedLocalObjectReference": { + "type": "object", + "properties": { + "apiGroup": { + "type": "string", + "title": "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.\n+optional" + }, + "kind": { + "type": "string", + "title": "Kind is the type of resource being referenced" + }, + "name": { + "type": "string", + "title": "Name is the name of resource being referenced" + } + }, + "description": "TypedLocalObjectReference contains enough information to let you locate the\ntyped referenced object inside the same namespace." + }, + "k8s.io.api.core.v1.Volume": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Volume's name.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + }, + "volumeSource": { + "$ref": "#/definitions/k8s.io.api.core.v1.VolumeSource", + "description": "VolumeSource represents the location and type of the mounted volume.\nIf not specified, the Volume is implied to be an EmptyDir.\nThis implied behavior is deprecated and will be removed in a future version." + } + }, + "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod." + }, + "k8s.io.api.core.v1.VolumeDevice": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name must match the name of a persistentVolumeClaim in the pod" + }, + "devicePath": { + "type": "string", + "description": "devicePath is the path inside of the container that the device will be mapped to." + } + }, + "description": "volumeDevice describes a mapping of a raw block device within a container." + }, + "k8s.io.api.core.v1.VolumeMount": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "This must match the Name of a Volume." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.\n+optional" + }, + "mountPath": { + "type": "string", + "description": "Path within the container at which the volume should be mounted. Must\nnot contain ':'." + }, + "subPath": { + "type": "string", + "title": "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).\n+optional" + }, + "mountPropagation": { + "type": "string", + "title": "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\n+optional" + }, + "subPathExpr": { + "type": "string", + "title": "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.\nThis field is beta in 1.15.\n+optional" + } + }, + "description": "VolumeMount describes a mounting of a Volume within a container." + }, + "k8s.io.api.core.v1.VolumeProjection": { + "type": "object", + "properties": { + "secret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretProjection", + "title": "information about the secret data to project\n+optional" + }, + "downwardAPI": { + "$ref": "#/definitions/k8s.io.api.core.v1.DownwardAPIProjection", + "title": "information about the downwardAPI data to project\n+optional" + }, + "configMap": { + "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapProjection", + "title": "information about the configMap data to project\n+optional" + }, + "serviceAccountToken": { + "$ref": "#/definitions/k8s.io.api.core.v1.ServiceAccountTokenProjection", + "title": "information about the serviceAccountToken data to project\n+optional" + } + }, + "title": "Projection that may be projected along with other supported volume types" + }, + "k8s.io.api.core.v1.VolumeSource": { + "type": "object", + "properties": { + "hostPath": { + "$ref": "#/definitions/k8s.io.api.core.v1.HostPathVolumeSource", + "title": "HostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional" + }, + "emptyDir": { + "$ref": "#/definitions/k8s.io.api.core.v1.EmptyDirVolumeSource", + "title": "EmptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + }, + "gcePersistentDisk": { + "$ref": "#/definitions/k8s.io.api.core.v1.GCEPersistentDiskVolumeSource", + "title": "GCEPersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + }, + "awsElasticBlockStore": { + "$ref": "#/definitions/k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource", + "title": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + }, + "gitRepo": { + "$ref": "#/definitions/k8s.io.api.core.v1.GitRepoVolumeSource", + "title": "GitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional" + }, + "secret": { + "$ref": "#/definitions/k8s.io.api.core.v1.SecretVolumeSource", + "title": "Secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + }, + "nfs": { + "$ref": "#/definitions/k8s.io.api.core.v1.NFSVolumeSource", + "title": "NFS represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + }, + "iscsi": { + "$ref": "#/definitions/k8s.io.api.core.v1.ISCSIVolumeSource", + "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\n+optional" + }, + "glusterfs": { + "$ref": "#/definitions/k8s.io.api.core.v1.GlusterfsVolumeSource", + "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\n+optional" + }, + "persistentVolumeClaim": { + "$ref": "#/definitions/k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource", + "title": "PersistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + }, + "rbd": { + "$ref": "#/definitions/k8s.io.api.core.v1.RBDVolumeSource", + "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md\n+optional" + }, + "flexVolume": { + "$ref": "#/definitions/k8s.io.api.core.v1.FlexVolumeSource", + "title": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional" + }, + "cinder": { + "$ref": "#/definitions/k8s.io.api.core.v1.CinderVolumeSource", + "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + }, + "cephfs": { + "$ref": "#/definitions/k8s.io.api.core.v1.CephFSVolumeSource", + "title": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional" + }, + "flocker": { + "$ref": "#/definitions/k8s.io.api.core.v1.FlockerVolumeSource", + "title": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional" + }, + "downwardAPI": { + "$ref": "#/definitions/k8s.io.api.core.v1.DownwardAPIVolumeSource", + "title": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional" + }, + "fc": { + "$ref": "#/definitions/k8s.io.api.core.v1.FCVolumeSource", + "title": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional" + }, + "azureFile": { + "$ref": "#/definitions/k8s.io.api.core.v1.AzureFileVolumeSource", + "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional" + }, + "configMap": { + "$ref": "#/definitions/k8s.io.api.core.v1.ConfigMapVolumeSource", + "title": "ConfigMap represents a configMap that should populate this volume\n+optional" + }, + "vsphereVolume": { + "$ref": "#/definitions/k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource", + "title": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional" + }, + "quobyte": { + "$ref": "#/definitions/k8s.io.api.core.v1.QuobyteVolumeSource", + "title": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional" + }, + "azureDisk": { + "$ref": "#/definitions/k8s.io.api.core.v1.AzureDiskVolumeSource", + "title": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional" + }, + "photonPersistentDisk": { + "$ref": "#/definitions/k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource", + "title": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + }, + "projected": { + "$ref": "#/definitions/k8s.io.api.core.v1.ProjectedVolumeSource", + "title": "Items for all in one resources secrets, configmaps, and downward API" + }, + "portworxVolume": { + "$ref": "#/definitions/k8s.io.api.core.v1.PortworxVolumeSource", + "title": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional" + }, + "scaleIO": { + "$ref": "#/definitions/k8s.io.api.core.v1.ScaleIOVolumeSource", + "title": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional" + }, + "storageos": { + "$ref": "#/definitions/k8s.io.api.core.v1.StorageOSVolumeSource", + "title": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional" + }, + "csi": { + "$ref": "#/definitions/k8s.io.api.core.v1.CSIVolumeSource", + "title": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).\n+optional" + } + }, + "description": "Represents the source of a volume to mount.\nOnly one of its members may be specified." + }, + "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource": { + "type": "object", + "properties": { + "volumePath": { + "type": "string", + "title": "Path that identifies vSphere volume vmdk" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "storagePolicyName": { + "type": "string", + "title": "Storage Policy Based Management (SPBM) profile name.\n+optional" + }, + "storagePolicyID": { + "type": "string", + "title": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional" + } + }, + "description": "Represents a vSphere volume resource." + }, + "k8s.io.api.core.v1.WeightedPodAffinityTerm": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "format": "int32", + "description": "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + }, + "podAffinityTerm": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodAffinityTerm", + "description": "Required. A pod affinity term, associated with the corresponding weight." + } + }, + "title": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" + }, + "k8s.io.api.core.v1.WindowsSecurityContextOptions": { + "type": "object", + "properties": { + "gmsaCredentialSpecName": { + "type": "string", + "title": "GMSACredentialSpecName is the name of the GMSA credential spec to use.\nThis field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.\n+optional" + }, + "gmsaCredentialSpec": { + "type": "string", + "title": "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field.\nThis field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.\n+optional" + }, + "runAsUserName": { + "type": "string", + "title": "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nThis field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.\n+optional" + } + }, + "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials." + }, + "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec": { + "type": "object", + "properties": { + "minAvailable": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString", + "title": "An eviction is allowed if at least \"minAvailable\" pods selected by\n\"selector\" will still be available after the eviction, i.e. even in the\nabsence of the evicted pod. So for example you can prevent all voluntary\nevictions by specifying \"100%\".\n+optional" + }, + "selector": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", + "title": "Label query over pods whose evictions are managed by the disruption\nbudget.\n+optional" + }, + "maxUnavailable": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString", + "title": "An eviction is allowed if at most \"maxUnavailable\" pods selected by\n\"selector\" are unavailable after the eviction, i.e. even in absence of\nthe evicted pod. For example, one can prevent all voluntary evictions\nby specifying 0. This is a mutually exclusive setting with \"minAvailable\".\n+optional" + } + }, + "description": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget." + }, + "k8s.io.apimachinery.pkg.api.resource.Quantity": { + "type": "object", + "properties": { + "string": { + "type": "string" + } + }, + "description": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true" + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions": { + "type": "object", + "properties": { + "dryRun": { + "type": "array", + "items": { + "type": "string" + }, + "title": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional" + }, + "fieldManager": { + "type": "string", + "title": "fieldManager is a name associated with the actor or entity\nthat is making these changes. The value must be less than or\n128 characters long, and only contain printable characters,\nas defined by https://golang.org/pkg/unicode/#IsPrint.\n+optional" + } + }, + "description": "CreateOptions may be provided when creating an API object." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions": { + "type": "object", + "properties": { + "gracePeriodSeconds": { + "type": "string", + "format": "int64", + "title": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional" + }, + "preconditions": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions", + "title": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be\nreturned.\n+optional" + }, + "orphanDependents": { + "type": "boolean", + "format": "boolean", + "title": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional" + }, + "propagationPolicy": { + "type": "string", + "title": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional" + }, + "dryRun": { + "type": "array", + "items": { + "type": "string" + }, + "title": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional" + } + }, + "description": "DeleteOptions may be provided when deleting an API object." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1": { + "type": "object", + "properties": { + "Raw": { + "type": "string", + "format": "byte", + "description": "Raw is the underlying serialization of this object." + } + }, + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map\n'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item\n'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list\n'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff" + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions": { + "type": "object", + "properties": { + "resourceVersion": { + "type": "string", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv." + } + }, + "description": "GetOptions is the standard query options to the standard REST get call." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n+optional" + }, + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" + }, + "title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional" + } + }, + "description": "A label selector is a label query over a set of resources. The result of matchLabels and\nmatchExpressions are ANDed. An empty label selector matches all objects. A null\nlabel selector matches no objects." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "key is the label key that the selector applies to.\n+patchMergeKey=key\n+patchStrategy=merge" + }, + "operator": { + "type": "string", + "description": "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional" + } + }, + "description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta": { + "type": "object", + "properties": { + "selfLink": { + "type": "string", + "description": "selfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "continue": { + "type": "string", + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage." + }, + "remainingItemCount": { + "type": "string", + "format": "int64", + "title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional" + } + }, + "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions": { + "type": "object", + "properties": { + "labelSelector": { + "type": "string", + "title": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional" + }, + "fieldSelector": { + "type": "string", + "title": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional" + }, + "watch": { + "type": "boolean", + "format": "boolean", + "title": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional" + }, + "allowWatchBookmarks": { + "type": "boolean", + "format": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional" + }, + "timeoutSeconds": { + "type": "string", + "format": "int64", + "title": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional" + }, + "limit": { + "type": "string", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned." + }, + "continue": { + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications." + } + }, + "description": "ListOptions is the query options to a standard REST list call." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": { + "type": "object", + "properties": { + "manager": { + "type": "string", + "description": "Manager is an identifier of the workflow managing these fields." + }, + "operation": { + "type": "string", + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'." + }, + "apiVersion": { + "type": "string", + "description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted." + }, + "time": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", + "title": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'\n+optional" + }, + "fieldsType": { + "type": "string", + "title": "FieldsType is the discriminator for the different fields format and version.\nThere is currently only one possible value: \"FieldsV1\"" + }, + "fieldsV1": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1", + "title": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.\n+optional" + } + }, + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional" + }, + "generateName": { + "type": "string", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional" + }, + "namespace": { + "type": "string", + "description": "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional" + }, + "selfLink": { + "type": "string", + "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" + }, + "uid": { + "type": "string", + "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional" + }, + "resourceVersion": { + "type": "string", + "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "generation": { + "type": "string", + "format": "int64", + "title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional" + }, + "creationTimestamp": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", + "description": "CreationTimestamp is a timestamp representing the server time when this object was\ncreated. It is not guaranteed to be set in happens-before order across separate operations.\nClients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system.\nRead-only.\nNull for lists.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" + }, + "deletionTimestamp": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This\nfield is set by the server when a graceful deletion is requested by the user, and is not\ndirectly settable by a client. The resource is expected to be deleted (no longer visible\nfrom resource lists, and not reachable by name) after the time in this field, once the\nfinalizers list is empty. As long as the finalizers list contains items, deletion is blocked.\nOnce the deletionTimestamp is set, this value may not be unset or be set further into the\nfuture, although it may be shortened or the resource may be deleted prior to this time.\nFor example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react\nby sending a graceful termination signal to the containers in the pod. After that 30 seconds,\nthe Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,\nremove the pod from the API. In the presence of network partitions, this object may still\nexist after this timestamp, until an administrator or automated process can determine the\nresource is fully terminated.\nIf not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" + }, + "deletionGracePeriodSeconds": { + "type": "string", + "format": "int64", + "title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional" + }, + "ownerReferences": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference" + }, + "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge" + }, + "finalizers": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\n+optional\n+patchStrategy=merge" + }, + "clusterName": { + "type": "string", + "title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional" + }, + "managedFields": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" + }, + "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional" + } + }, + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "description": "API version of the referent." + }, + "kind": { + "type": "string", + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + }, + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names" + }, + "uid": { + "type": "string", + "title": "UID of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids" + }, + "controller": { + "type": "boolean", + "format": "boolean", + "title": "If true, this reference points to the managing controller.\n+optional" + }, + "blockOwnerDeletion": { + "type": "boolean", + "format": "boolean", + "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + } + }, + "description": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions": { + "type": "object", + "properties": { + "uid": { + "type": "string", + "title": "Specifies the target UID.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "Specifies the target ResourceVersion\n+optional" + } + }, + "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out." + }, + "k8s.io.apimachinery.pkg.apis.meta.v1.Time": { + "type": "object", + "properties": { + "seconds": { + "type": "string", + "format": "int64", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive." + }, + "nanos": { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context." + } + }, + "description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false" + }, + "k8s.io.apimachinery.pkg.util.intstr.IntOrString": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "intVal": { + "type": "integer", + "format": "int32" + }, + "strVal": { + "type": "string" + } + }, + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString" + } + } +} diff --git a/pkg/apiclient/cronworkflow/cron-workflow.swagger.json b/pkg/apiclient/cronworkflow/cron-workflow.swagger.json index d6a980844bcd..afa0ebd3637a 100644 --- a/pkg/apiclient/cronworkflow/cron-workflow.swagger.json +++ b/pkg/apiclient/cronworkflow/cron-workflow.swagger.json @@ -1547,6 +1547,11 @@ "type": "boolean", "format": "boolean", "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + }, + "clusterscope": { + "type": "boolean", + "format": "boolean", + "description": "ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate)." } }, "description": "TemplateRef is a reference of template resource." diff --git a/pkg/apiclient/workflow/workflow.swagger.json b/pkg/apiclient/workflow/workflow.swagger.json index 0677449eeaf1..f277ed5eb4e8 100644 --- a/pkg/apiclient/workflow/workflow.swagger.json +++ b/pkg/apiclient/workflow/workflow.swagger.json @@ -1913,6 +1913,11 @@ "type": "boolean", "format": "boolean", "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + }, + "clusterscope": { + "type": "boolean", + "format": "boolean", + "description": "ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate)." } }, "description": "TemplateRef is a reference of template resource." diff --git a/pkg/apiclient/workflowarchive/workflow-archive.swagger.json b/pkg/apiclient/workflowarchive/workflow-archive.swagger.json index c9ad42b7e19c..09ebc1062064 100644 --- a/pkg/apiclient/workflowarchive/workflow-archive.swagger.json +++ b/pkg/apiclient/workflowarchive/workflow-archive.swagger.json @@ -1344,6 +1344,11 @@ "type": "boolean", "format": "boolean", "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + }, + "clusterscope": { + "type": "boolean", + "format": "boolean", + "description": "ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate)." } }, "description": "TemplateRef is a reference of template resource." diff --git a/pkg/apiclient/workflowtemplate/workflow-template.swagger.json b/pkg/apiclient/workflowtemplate/workflow-template.swagger.json index be7b258b7236..a7e8c03fcce4 100644 --- a/pkg/apiclient/workflowtemplate/workflow-template.swagger.json +++ b/pkg/apiclient/workflowtemplate/workflow-template.swagger.json @@ -1415,6 +1415,11 @@ "type": "boolean", "format": "boolean", "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + }, + "clusterscope": { + "type": "boolean", + "format": "boolean", + "description": "ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate)." } }, "description": "TemplateRef is a reference of template resource." diff --git a/pkg/apis/workflow/register.go b/pkg/apis/workflow/register.go index 0aade43f5998..edb0d7d88bd3 100644 --- a/pkg/apis/workflow/register.go +++ b/pkg/apis/workflow/register.go @@ -2,20 +2,25 @@ package workflow // Workflow constants const ( - Group string = "argoproj.io" - WorkflowKind string = "Workflow" - WorkflowSingular string = "workflow" - WorkflowPlural string = "workflows" - WorkflowShortName string = "wf" - WorkflowFullName string = WorkflowPlural + "." + Group - WorkflowTemplateKind string = "WorkflowTemplate" - WorkflowTemplateSingular string = "workflowtemplate" - WorkflowTemplatePlural string = "workflowtemplates" - WorkflowTemplateShortName string = "wftmpl" - WorkflowTemplateFullName string = WorkflowTemplatePlural + "." + Group - CronWorkflowKind string = "CronWorkflow" - CronWorkflowSingular string = "cronworkflow" - CronWorkflowPlural string = "cronworkflows" - CronWorkflowShortName string = "cronwf" - CronWorkflowFullName string = WorkflowTemplatePlural + "." + Group + Group string = "argoproj.io" + WorkflowKind string = "Workflow" + WorkflowSingular string = "workflow" + WorkflowPlural string = "workflows" + WorkflowShortName string = "wf" + WorkflowFullName string = WorkflowPlural + "." + Group + WorkflowTemplateKind string = "WorkflowTemplate" + WorkflowTemplateSingular string = "workflowtemplate" + WorkflowTemplatePlural string = "workflowtemplates" + WorkflowTemplateShortName string = "wftmpl" + WorkflowTemplateFullName string = WorkflowTemplatePlural + "." + Group + CronWorkflowKind string = "CronWorkflow" + CronWorkflowSingular string = "cronworkflow" + CronWorkflowPlural string = "cronworkflows" + CronWorkflowShortName string = "cronwf" + CronWorkflowFullName string = CronWorkflowPlural + "." + Group + ClusterWorkflowTemplateKind string = "ClusterWorkflowTemplate" + ClusterWorkflowTemplateSingular string = "clusterworkflowtemplate" + ClusterWorkflowTemplatePlural string = "clusterworkflowtemplates" + ClusterWorkflowTemplateShortName string = "cwftmpl" + ClusterWorkflowTemplateFullName string = ClusterWorkflowTemplatePlural + "." + Group ) diff --git a/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go b/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go new file mode 100644 index 000000000000..b8322d55b1fa --- /dev/null +++ b/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go @@ -0,0 +1,62 @@ +package v1alpha1 + +import ( + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope +// +genclient +// +genclient:noStatus +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterWorkflowTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowTemplateSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +type ClusterWorkflowTemplates []ClusterWorkflowTemplate + +func (w ClusterWorkflowTemplates) Len() int { + return len(w) +} + +func (w ClusterWorkflowTemplates) Less(i, j int) bool { + return strings.Compare(w[j].ObjectMeta.Name, w[i].ObjectMeta.Name) > 0 +} + +func (w ClusterWorkflowTemplates) Swap(i, j int) { + w[i], w[j] = w[j], w[i] +} + +// ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterWorkflowTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items ClusterWorkflowTemplates `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +var _ TemplateGetter = &ClusterWorkflowTemplate{} + +// GetTemplateByName retrieves a defined template by its name +func (cwftmpl *ClusterWorkflowTemplate) GetTemplateByName(name string) *Template { + for _, t := range cwftmpl.Spec.Templates { + if t.Name == name { + return &t + } + } + return nil +} + +// GetTemplateScope returns the template scope of workflow template. +func (cwftmpl *ClusterWorkflowTemplate) GetTemplateScope() string { + return "cluster/" + cwftmpl.Name +} + +// GetAllTemplates returns the list of templates of cluster workflow template +func (cwftmpl *ClusterWorkflowTemplate) GetAllTemplates() []Template { + return cwftmpl.Spec.Templates +} diff --git a/pkg/apis/workflow/v1alpha1/common.go b/pkg/apis/workflow/v1alpha1/common.go new file mode 100644 index 000000000000..da7ce4cfa6fb --- /dev/null +++ b/pkg/apis/workflow/v1alpha1/common.go @@ -0,0 +1,34 @@ +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// TemplateGetter is an interface to get templates. +type TemplateGetter interface { + GetNamespace() string + GetName() string + GroupVersionKind() schema.GroupVersionKind + GetTemplateByName(name string) *Template + GetTemplateScope() string + GetAllTemplates() []Template +} + +// TemplateHolder is an interface for holders of templates. +type TemplateHolder interface { + GetTemplateName() string + GetTemplateRef() *TemplateRef + IsResolvable() bool +} + +// TemplateStorage is an interface of template storage getter and setter. +type TemplateStorage interface { + GetStoredTemplate(templateScope string, holder TemplateHolder) *Template + SetStoredTemplate(templateScope string, holder TemplateHolder, tmpl *Template) (bool, error) +} + +// WorkflowTemplateInterface is an simplified TemplateGetter +type WorkflowTemplateInterface interface { + GetTemplateByName(name string) *Template + GetTemplateScope() string +} diff --git a/pkg/apis/workflow/v1alpha1/generated.pb.go b/pkg/apis/workflow/v1alpha1/generated.pb.go index 6c24664b831e..237c0eab50d9 100644 --- a/pkg/apis/workflow/v1alpha1/generated.pb.go +++ b/pkg/apis/workflow/v1alpha1/generated.pb.go @@ -259,10 +259,66 @@ func (m *Backoff) XXX_DiscardUnknown() { var xxx_messageInfo_Backoff proto.InternalMessageInfo +func (m *ClusterWorkflowTemplate) Reset() { *m = ClusterWorkflowTemplate{} } +func (*ClusterWorkflowTemplate) ProtoMessage() {} +func (*ClusterWorkflowTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{8} +} +func (m *ClusterWorkflowTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterWorkflowTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplate.Merge(m, src) +} +func (m *ClusterWorkflowTemplate) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplate proto.InternalMessageInfo + +func (m *ClusterWorkflowTemplateList) Reset() { *m = ClusterWorkflowTemplateList{} } +func (*ClusterWorkflowTemplateList) ProtoMessage() {} +func (*ClusterWorkflowTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{9} +} +func (m *ClusterWorkflowTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterWorkflowTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterWorkflowTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWorkflowTemplateList.Merge(m, src) +} +func (m *ClusterWorkflowTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ClusterWorkflowTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWorkflowTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWorkflowTemplateList proto.InternalMessageInfo + func (m *ContinueOn) Reset() { *m = ContinueOn{} } func (*ContinueOn) ProtoMessage() {} func (*ContinueOn) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{8} + return fileDescriptor_c23edafa7e7ea072, []int{10} } func (m *ContinueOn) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -290,7 +346,7 @@ var xxx_messageInfo_ContinueOn proto.InternalMessageInfo func (m *Counter) Reset() { *m = Counter{} } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{9} + return fileDescriptor_c23edafa7e7ea072, []int{11} } func (m *Counter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -318,7 +374,7 @@ var xxx_messageInfo_Counter proto.InternalMessageInfo func (m *CronWorkflow) Reset() { *m = CronWorkflow{} } func (*CronWorkflow) ProtoMessage() {} func (*CronWorkflow) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{10} + return fileDescriptor_c23edafa7e7ea072, []int{12} } func (m *CronWorkflow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -346,7 +402,7 @@ var xxx_messageInfo_CronWorkflow proto.InternalMessageInfo func (m *CronWorkflowList) Reset() { *m = CronWorkflowList{} } func (*CronWorkflowList) ProtoMessage() {} func (*CronWorkflowList) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{11} + return fileDescriptor_c23edafa7e7ea072, []int{13} } func (m *CronWorkflowList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -374,7 +430,7 @@ var xxx_messageInfo_CronWorkflowList proto.InternalMessageInfo func (m *CronWorkflowSpec) Reset() { *m = CronWorkflowSpec{} } func (*CronWorkflowSpec) ProtoMessage() {} func (*CronWorkflowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{12} + return fileDescriptor_c23edafa7e7ea072, []int{14} } func (m *CronWorkflowSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -402,7 +458,7 @@ var xxx_messageInfo_CronWorkflowSpec proto.InternalMessageInfo func (m *CronWorkflowStatus) Reset() { *m = CronWorkflowStatus{} } func (*CronWorkflowStatus) ProtoMessage() {} func (*CronWorkflowStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{13} + return fileDescriptor_c23edafa7e7ea072, []int{15} } func (m *CronWorkflowStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -430,7 +486,7 @@ var xxx_messageInfo_CronWorkflowStatus proto.InternalMessageInfo func (m *DAGTask) Reset() { *m = DAGTask{} } func (*DAGTask) ProtoMessage() {} func (*DAGTask) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{14} + return fileDescriptor_c23edafa7e7ea072, []int{16} } func (m *DAGTask) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -458,7 +514,7 @@ var xxx_messageInfo_DAGTask proto.InternalMessageInfo func (m *DAGTemplate) Reset() { *m = DAGTemplate{} } func (*DAGTemplate) ProtoMessage() {} func (*DAGTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{15} + return fileDescriptor_c23edafa7e7ea072, []int{17} } func (m *DAGTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -486,7 +542,7 @@ var xxx_messageInfo_DAGTemplate proto.InternalMessageInfo func (m *ExecutorConfig) Reset() { *m = ExecutorConfig{} } func (*ExecutorConfig) ProtoMessage() {} func (*ExecutorConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{16} + return fileDescriptor_c23edafa7e7ea072, []int{18} } func (m *ExecutorConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -514,7 +570,7 @@ var xxx_messageInfo_ExecutorConfig proto.InternalMessageInfo func (m *GCSArtifact) Reset() { *m = GCSArtifact{} } func (*GCSArtifact) ProtoMessage() {} func (*GCSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{17} + return fileDescriptor_c23edafa7e7ea072, []int{19} } func (m *GCSArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -542,7 +598,7 @@ var xxx_messageInfo_GCSArtifact proto.InternalMessageInfo func (m *GCSBucket) Reset() { *m = GCSBucket{} } func (*GCSBucket) ProtoMessage() {} func (*GCSBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{18} + return fileDescriptor_c23edafa7e7ea072, []int{20} } func (m *GCSBucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -570,7 +626,7 @@ var xxx_messageInfo_GCSBucket proto.InternalMessageInfo func (m *Gauge) Reset() { *m = Gauge{} } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{19} + return fileDescriptor_c23edafa7e7ea072, []int{21} } func (m *Gauge) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -598,7 +654,7 @@ var xxx_messageInfo_Gauge proto.InternalMessageInfo func (m *GitArtifact) Reset() { *m = GitArtifact{} } func (*GitArtifact) ProtoMessage() {} func (*GitArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{20} + return fileDescriptor_c23edafa7e7ea072, []int{22} } func (m *GitArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -626,7 +682,7 @@ var xxx_messageInfo_GitArtifact proto.InternalMessageInfo func (m *HDFSArtifact) Reset() { *m = HDFSArtifact{} } func (*HDFSArtifact) ProtoMessage() {} func (*HDFSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{21} + return fileDescriptor_c23edafa7e7ea072, []int{23} } func (m *HDFSArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -654,7 +710,7 @@ var xxx_messageInfo_HDFSArtifact proto.InternalMessageInfo func (m *HDFSConfig) Reset() { *m = HDFSConfig{} } func (*HDFSConfig) ProtoMessage() {} func (*HDFSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{22} + return fileDescriptor_c23edafa7e7ea072, []int{24} } func (m *HDFSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -682,7 +738,7 @@ var xxx_messageInfo_HDFSConfig proto.InternalMessageInfo func (m *HDFSKrbConfig) Reset() { *m = HDFSKrbConfig{} } func (*HDFSKrbConfig) ProtoMessage() {} func (*HDFSKrbConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{23} + return fileDescriptor_c23edafa7e7ea072, []int{25} } func (m *HDFSKrbConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -710,7 +766,7 @@ var xxx_messageInfo_HDFSKrbConfig proto.InternalMessageInfo func (m *HTTPArtifact) Reset() { *m = HTTPArtifact{} } func (*HTTPArtifact) ProtoMessage() {} func (*HTTPArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{24} + return fileDescriptor_c23edafa7e7ea072, []int{26} } func (m *HTTPArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -738,7 +794,7 @@ var xxx_messageInfo_HTTPArtifact proto.InternalMessageInfo func (m *Histogram) Reset() { *m = Histogram{} } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{25} + return fileDescriptor_c23edafa7e7ea072, []int{27} } func (m *Histogram) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +822,7 @@ var xxx_messageInfo_Histogram proto.InternalMessageInfo func (m *Inputs) Reset() { *m = Inputs{} } func (*Inputs) ProtoMessage() {} func (*Inputs) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{26} + return fileDescriptor_c23edafa7e7ea072, []int{28} } func (m *Inputs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -794,7 +850,7 @@ var xxx_messageInfo_Inputs proto.InternalMessageInfo func (m *Item) Reset() { *m = Item{} } func (*Item) ProtoMessage() {} func (*Item) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{27} + return fileDescriptor_c23edafa7e7ea072, []int{29} } func (m *Item) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -822,7 +878,7 @@ var xxx_messageInfo_Item proto.InternalMessageInfo func (m *ItemValue) Reset() { *m = ItemValue{} } func (*ItemValue) ProtoMessage() {} func (*ItemValue) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{28} + return fileDescriptor_c23edafa7e7ea072, []int{30} } func (m *ItemValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -850,7 +906,7 @@ var xxx_messageInfo_ItemValue proto.InternalMessageInfo func (m *Link) Reset() { *m = Link{} } func (*Link) ProtoMessage() {} func (*Link) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{29} + return fileDescriptor_c23edafa7e7ea072, []int{31} } func (m *Link) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -878,7 +934,7 @@ var xxx_messageInfo_Link proto.InternalMessageInfo func (m *Metadata) Reset() { *m = Metadata{} } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{30} + return fileDescriptor_c23edafa7e7ea072, []int{32} } func (m *Metadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -906,7 +962,7 @@ var xxx_messageInfo_Metadata proto.InternalMessageInfo func (m *MetricLabel) Reset() { *m = MetricLabel{} } func (*MetricLabel) ProtoMessage() {} func (*MetricLabel) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{31} + return fileDescriptor_c23edafa7e7ea072, []int{33} } func (m *MetricLabel) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -934,7 +990,7 @@ var xxx_messageInfo_MetricLabel proto.InternalMessageInfo func (m *Metrics) Reset() { *m = Metrics{} } func (*Metrics) ProtoMessage() {} func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{32} + return fileDescriptor_c23edafa7e7ea072, []int{34} } func (m *Metrics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -962,7 +1018,7 @@ var xxx_messageInfo_Metrics proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{33} + return fileDescriptor_c23edafa7e7ea072, []int{35} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -990,7 +1046,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NoneStrategy) Reset() { *m = NoneStrategy{} } func (*NoneStrategy) ProtoMessage() {} func (*NoneStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{34} + return fileDescriptor_c23edafa7e7ea072, []int{36} } func (m *NoneStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1018,7 +1074,7 @@ var xxx_messageInfo_NoneStrategy proto.InternalMessageInfo func (m *OSSArtifact) Reset() { *m = OSSArtifact{} } func (*OSSArtifact) ProtoMessage() {} func (*OSSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{35} + return fileDescriptor_c23edafa7e7ea072, []int{37} } func (m *OSSArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1046,7 +1102,7 @@ var xxx_messageInfo_OSSArtifact proto.InternalMessageInfo func (m *OSSBucket) Reset() { *m = OSSBucket{} } func (*OSSBucket) ProtoMessage() {} func (*OSSBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{36} + return fileDescriptor_c23edafa7e7ea072, []int{38} } func (m *OSSBucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1074,7 +1130,7 @@ var xxx_messageInfo_OSSBucket proto.InternalMessageInfo func (m *Outputs) Reset() { *m = Outputs{} } func (*Outputs) ProtoMessage() {} func (*Outputs) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{37} + return fileDescriptor_c23edafa7e7ea072, []int{39} } func (m *Outputs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1102,7 +1158,7 @@ var xxx_messageInfo_Outputs proto.InternalMessageInfo func (m *ParallelSteps) Reset() { *m = ParallelSteps{} } func (*ParallelSteps) ProtoMessage() {} func (*ParallelSteps) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{38} + return fileDescriptor_c23edafa7e7ea072, []int{40} } func (m *ParallelSteps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1130,7 +1186,7 @@ var xxx_messageInfo_ParallelSteps proto.InternalMessageInfo func (m *Parameter) Reset() { *m = Parameter{} } func (*Parameter) ProtoMessage() {} func (*Parameter) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{39} + return fileDescriptor_c23edafa7e7ea072, []int{41} } func (m *Parameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1158,7 +1214,7 @@ var xxx_messageInfo_Parameter proto.InternalMessageInfo func (m *PodGC) Reset() { *m = PodGC{} } func (*PodGC) ProtoMessage() {} func (*PodGC) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{40} + return fileDescriptor_c23edafa7e7ea072, []int{42} } func (m *PodGC) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1186,7 +1242,7 @@ var xxx_messageInfo_PodGC proto.InternalMessageInfo func (m *Prometheus) Reset() { *m = Prometheus{} } func (*Prometheus) ProtoMessage() {} func (*Prometheus) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{41} + return fileDescriptor_c23edafa7e7ea072, []int{43} } func (m *Prometheus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1214,7 +1270,7 @@ var xxx_messageInfo_Prometheus proto.InternalMessageInfo func (m *RawArtifact) Reset() { *m = RawArtifact{} } func (*RawArtifact) ProtoMessage() {} func (*RawArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{42} + return fileDescriptor_c23edafa7e7ea072, []int{44} } func (m *RawArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1242,7 +1298,7 @@ var xxx_messageInfo_RawArtifact proto.InternalMessageInfo func (m *ResourceTemplate) Reset() { *m = ResourceTemplate{} } func (*ResourceTemplate) ProtoMessage() {} func (*ResourceTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{43} + return fileDescriptor_c23edafa7e7ea072, []int{45} } func (m *ResourceTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1270,7 +1326,7 @@ var xxx_messageInfo_ResourceTemplate proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{44} + return fileDescriptor_c23edafa7e7ea072, []int{46} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1298,7 +1354,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *S3Artifact) Reset() { *m = S3Artifact{} } func (*S3Artifact) ProtoMessage() {} func (*S3Artifact) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{45} + return fileDescriptor_c23edafa7e7ea072, []int{47} } func (m *S3Artifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1326,7 +1382,7 @@ var xxx_messageInfo_S3Artifact proto.InternalMessageInfo func (m *S3Bucket) Reset() { *m = S3Bucket{} } func (*S3Bucket) ProtoMessage() {} func (*S3Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{46} + return fileDescriptor_c23edafa7e7ea072, []int{48} } func (m *S3Bucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1354,7 +1410,7 @@ var xxx_messageInfo_S3Bucket proto.InternalMessageInfo func (m *ScriptTemplate) Reset() { *m = ScriptTemplate{} } func (*ScriptTemplate) ProtoMessage() {} func (*ScriptTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{47} + return fileDescriptor_c23edafa7e7ea072, []int{49} } func (m *ScriptTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1382,7 +1438,7 @@ var xxx_messageInfo_ScriptTemplate proto.InternalMessageInfo func (m *Sequence) Reset() { *m = Sequence{} } func (*Sequence) ProtoMessage() {} func (*Sequence) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{48} + return fileDescriptor_c23edafa7e7ea072, []int{50} } func (m *Sequence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1410,7 +1466,7 @@ var xxx_messageInfo_Sequence proto.InternalMessageInfo func (m *SuspendTemplate) Reset() { *m = SuspendTemplate{} } func (*SuspendTemplate) ProtoMessage() {} func (*SuspendTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{49} + return fileDescriptor_c23edafa7e7ea072, []int{51} } func (m *SuspendTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1438,7 +1494,7 @@ var xxx_messageInfo_SuspendTemplate proto.InternalMessageInfo func (m *TTLStrategy) Reset() { *m = TTLStrategy{} } func (*TTLStrategy) ProtoMessage() {} func (*TTLStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{50} + return fileDescriptor_c23edafa7e7ea072, []int{52} } func (m *TTLStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1466,7 +1522,7 @@ var xxx_messageInfo_TTLStrategy proto.InternalMessageInfo func (m *TarStrategy) Reset() { *m = TarStrategy{} } func (*TarStrategy) ProtoMessage() {} func (*TarStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{51} + return fileDescriptor_c23edafa7e7ea072, []int{53} } func (m *TarStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1494,7 +1550,7 @@ var xxx_messageInfo_TarStrategy proto.InternalMessageInfo func (m *Template) Reset() { *m = Template{} } func (*Template) ProtoMessage() {} func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{52} + return fileDescriptor_c23edafa7e7ea072, []int{54} } func (m *Template) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1522,7 +1578,7 @@ var xxx_messageInfo_Template proto.InternalMessageInfo func (m *TemplateRef) Reset() { *m = TemplateRef{} } func (*TemplateRef) ProtoMessage() {} func (*TemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{53} + return fileDescriptor_c23edafa7e7ea072, []int{55} } func (m *TemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1550,7 +1606,7 @@ var xxx_messageInfo_TemplateRef proto.InternalMessageInfo func (m *UserContainer) Reset() { *m = UserContainer{} } func (*UserContainer) ProtoMessage() {} func (*UserContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{54} + return fileDescriptor_c23edafa7e7ea072, []int{56} } func (m *UserContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1578,7 +1634,7 @@ var xxx_messageInfo_UserContainer proto.InternalMessageInfo func (m *ValueFrom) Reset() { *m = ValueFrom{} } func (*ValueFrom) ProtoMessage() {} func (*ValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{55} + return fileDescriptor_c23edafa7e7ea072, []int{57} } func (m *ValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1606,7 +1662,7 @@ var xxx_messageInfo_ValueFrom proto.InternalMessageInfo func (m *Workflow) Reset() { *m = Workflow{} } func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{56} + return fileDescriptor_c23edafa7e7ea072, []int{58} } func (m *Workflow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1634,7 +1690,7 @@ var xxx_messageInfo_Workflow proto.InternalMessageInfo func (m *WorkflowCondition) Reset() { *m = WorkflowCondition{} } func (*WorkflowCondition) ProtoMessage() {} func (*WorkflowCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{57} + return fileDescriptor_c23edafa7e7ea072, []int{59} } func (m *WorkflowCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1662,7 +1718,7 @@ var xxx_messageInfo_WorkflowCondition proto.InternalMessageInfo func (m *WorkflowList) Reset() { *m = WorkflowList{} } func (*WorkflowList) ProtoMessage() {} func (*WorkflowList) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{58} + return fileDescriptor_c23edafa7e7ea072, []int{60} } func (m *WorkflowList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1690,7 +1746,7 @@ var xxx_messageInfo_WorkflowList proto.InternalMessageInfo func (m *WorkflowSpec) Reset() { *m = WorkflowSpec{} } func (*WorkflowSpec) ProtoMessage() {} func (*WorkflowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{59} + return fileDescriptor_c23edafa7e7ea072, []int{61} } func (m *WorkflowSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1718,7 +1774,7 @@ var xxx_messageInfo_WorkflowSpec proto.InternalMessageInfo func (m *WorkflowStatus) Reset() { *m = WorkflowStatus{} } func (*WorkflowStatus) ProtoMessage() {} func (*WorkflowStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{60} + return fileDescriptor_c23edafa7e7ea072, []int{62} } func (m *WorkflowStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1746,7 +1802,7 @@ var xxx_messageInfo_WorkflowStatus proto.InternalMessageInfo func (m *WorkflowStep) Reset() { *m = WorkflowStep{} } func (*WorkflowStep) ProtoMessage() {} func (*WorkflowStep) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{61} + return fileDescriptor_c23edafa7e7ea072, []int{63} } func (m *WorkflowStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1774,7 +1830,7 @@ var xxx_messageInfo_WorkflowStep proto.InternalMessageInfo func (m *WorkflowTemplate) Reset() { *m = WorkflowTemplate{} } func (*WorkflowTemplate) ProtoMessage() {} func (*WorkflowTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{62} + return fileDescriptor_c23edafa7e7ea072, []int{64} } func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1802,7 +1858,7 @@ var xxx_messageInfo_WorkflowTemplate proto.InternalMessageInfo func (m *WorkflowTemplateList) Reset() { *m = WorkflowTemplateList{} } func (*WorkflowTemplateList) ProtoMessage() {} func (*WorkflowTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{63} + return fileDescriptor_c23edafa7e7ea072, []int{65} } func (m *WorkflowTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1830,7 +1886,7 @@ var xxx_messageInfo_WorkflowTemplateList proto.InternalMessageInfo func (m *WorkflowTemplateSpec) Reset() { *m = WorkflowTemplateSpec{} } func (*WorkflowTemplateSpec) ProtoMessage() {} func (*WorkflowTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c23edafa7e7ea072, []int{64} + return fileDescriptor_c23edafa7e7ea072, []int{66} } func (m *WorkflowTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1864,6 +1920,8 @@ func init() { proto.RegisterType((*ArtifactoryArtifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryArtifact") proto.RegisterType((*ArtifactoryAuth)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryAuth") proto.RegisterType((*Backoff)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Backoff") + proto.RegisterType((*ClusterWorkflowTemplate)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate") + proto.RegisterType((*ClusterWorkflowTemplateList)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList") proto.RegisterType((*ContinueOn)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ContinueOn") proto.RegisterType((*Counter)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Counter") proto.RegisterType((*CronWorkflow)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.CronWorkflow") @@ -1938,381 +1996,385 @@ func init() { } var fileDescriptor_c23edafa7e7ea072 = []byte{ - // 5979 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7c, 0x4d, 0x6c, 0x1c, 0xc9, - 0x75, 0xbf, 0x86, 0xe4, 0x0c, 0x67, 0xde, 0x90, 0x22, 0x55, 0xa2, 0xa4, 0x59, 0x5a, 0xcb, 0x91, - 0x7b, 0xff, 0xbb, 0x7f, 0x6d, 0xb2, 0x26, 0xbd, 0x92, 0x37, 0x59, 0x7f, 0xec, 0x07, 0x87, 0x14, - 0x25, 0xea, 0x83, 0xa4, 0xdf, 0x50, 0x52, 0xec, 0x5d, 0xd8, 0x69, 0x4e, 0xd7, 0xcc, 0xb4, 0x38, - 0xd3, 0xdd, 0xee, 0xea, 0x11, 0x97, 0xde, 0x04, 0xd9, 0x18, 0x09, 0x9c, 0x0f, 0x18, 0x88, 0x11, - 0xc0, 0x30, 0x60, 0x04, 0x08, 0x7c, 0x48, 0x2e, 0xc9, 0x31, 0x39, 0xfa, 0x60, 0xe4, 0x60, 0xf8, - 0x12, 0x27, 0x97, 0x38, 0x41, 0x40, 0x7b, 0x19, 0x24, 0x30, 0xe0, 0x00, 0x3e, 0xe4, 0x10, 0x80, - 0xc8, 0x21, 0xa8, 0x8f, 0xae, 0xfe, 0x98, 0x1e, 0x89, 0x9a, 0xa1, 0x04, 0x07, 0xf6, 0x89, 0x9c, - 0xf7, 0x5e, 0xfd, 0x5e, 0x75, 0x55, 0xf5, 0xab, 0xf7, 0x5e, 0xbd, 0x6a, 0x58, 0x69, 0xd9, 0x41, - 0xbb, 0xb7, 0xb3, 0xd8, 0x70, 0xbb, 0x4b, 0xa6, 0xdf, 0x72, 0x3d, 0xdf, 0x7d, 0x20, 0xfe, 0x59, - 0xf2, 0x76, 0x5b, 0x4b, 0xa6, 0x67, 0xb3, 0xa5, 0x3d, 0xd7, 0xdf, 0x6d, 0x76, 0xdc, 0xbd, 0xa5, - 0x87, 0xaf, 0x9a, 0x1d, 0xaf, 0x6d, 0xbe, 0xba, 0xd4, 0xa2, 0x0e, 0xf5, 0xcd, 0x80, 0x5a, 0x8b, - 0x9e, 0xef, 0x06, 0x2e, 0xb9, 0x1a, 0x81, 0x2c, 0x86, 0x20, 0xe2, 0x9f, 0x45, 0x6f, 0xb7, 0xb5, - 0xc8, 0x41, 0x16, 0x43, 0x90, 0xc5, 0x10, 0x64, 0xfe, 0x63, 0x31, 0xcd, 0x2d, 0x97, 0x2b, 0xe4, - 0x58, 0x3b, 0xbd, 0xa6, 0xf8, 0x25, 0x7e, 0x88, 0xff, 0xa4, 0x8e, 0x79, 0x63, 0xf7, 0x75, 0xb6, - 0x68, 0xbb, 0xbc, 0x4b, 0x4b, 0x0d, 0xd7, 0xa7, 0x4b, 0x0f, 0xfb, 0xfa, 0x31, 0xff, 0x72, 0x4c, - 0xc6, 0x73, 0x3b, 0x76, 0x63, 0x7f, 0xe9, 0xe1, 0xab, 0x3b, 0x34, 0xe8, 0xef, 0xf2, 0xfc, 0x27, - 0x22, 0xd1, 0xae, 0xd9, 0x68, 0xdb, 0x0e, 0xf5, 0xf7, 0xa3, 0x47, 0xee, 0xd2, 0xc0, 0xcc, 0x52, - 0xb0, 0x34, 0xa8, 0x95, 0xdf, 0x73, 0x02, 0xbb, 0x4b, 0xfb, 0x1a, 0xfc, 0xda, 0xe3, 0x1a, 0xb0, - 0x46, 0x9b, 0x76, 0xcd, 0x74, 0x3b, 0xe3, 0xef, 0x73, 0x30, 0xb3, 0xec, 0x37, 0xda, 0xf6, 0x43, - 0x5a, 0x0f, 0x38, 0xa3, 0xb5, 0x4f, 0xde, 0x81, 0xf1, 0xc0, 0xf4, 0x2b, 0xb9, 0x4b, 0xb9, 0xcb, - 0xe5, 0x2b, 0x6f, 0x2f, 0x0e, 0x31, 0xe6, 0x8b, 0xdb, 0xa6, 0x1f, 0xc2, 0xd5, 0x26, 0x0f, 0x0f, - 0xaa, 0xe3, 0xdb, 0xa6, 0x8f, 0x1c, 0x95, 0x7c, 0x11, 0x26, 0x1c, 0xd7, 0xa1, 0x95, 0x31, 0x81, - 0xbe, 0x3c, 0x14, 0xfa, 0x86, 0xeb, 0xe8, 0xde, 0xd6, 0x8a, 0x87, 0x07, 0xd5, 0x09, 0x4e, 0x41, - 0x01, 0x6c, 0xfc, 0x2c, 0x07, 0xa5, 0x65, 0xbf, 0xd5, 0xeb, 0x52, 0x27, 0x60, 0xc4, 0x07, 0xf0, - 0x4c, 0xdf, 0xec, 0xd2, 0x80, 0xfa, 0xac, 0x92, 0xbb, 0x34, 0x7e, 0xb9, 0x7c, 0xe5, 0xcd, 0xa1, - 0x94, 0x6e, 0x85, 0x30, 0x35, 0xf2, 0xbd, 0x83, 0xea, 0xa9, 0xc3, 0x83, 0x2a, 0x68, 0x12, 0xc3, - 0x98, 0x16, 0xe2, 0x40, 0xc9, 0xf4, 0x03, 0xbb, 0x69, 0x36, 0x02, 0x56, 0x19, 0x13, 0x2a, 0xdf, - 0x18, 0x4a, 0xe5, 0xb2, 0x42, 0xa9, 0x9d, 0x51, 0x1a, 0x4b, 0x21, 0x85, 0x61, 0xa4, 0xc2, 0xf8, - 0xe9, 0x38, 0x14, 0x43, 0x06, 0xb9, 0x04, 0x13, 0x8e, 0xd9, 0xa5, 0x62, 0xf6, 0x4a, 0xb5, 0x29, - 0xd5, 0x70, 0x62, 0xc3, 0xec, 0xf2, 0x01, 0x32, 0xbb, 0x94, 0x4b, 0x78, 0x66, 0xd0, 0x16, 0x33, - 0x10, 0x93, 0xd8, 0x32, 0x83, 0x36, 0x0a, 0x0e, 0xb9, 0x08, 0x13, 0x5d, 0xd7, 0xa2, 0x95, 0xf1, - 0x4b, 0xb9, 0xcb, 0x79, 0x39, 0xc0, 0x77, 0x5c, 0x8b, 0xa2, 0xa0, 0xf2, 0xf6, 0x4d, 0xdf, 0xed, - 0x56, 0x26, 0x92, 0xed, 0xd7, 0x7c, 0xb7, 0x8b, 0x82, 0x43, 0xfe, 0x38, 0x07, 0xb3, 0x61, 0xf7, - 0x6e, 0xbb, 0x0d, 0x33, 0xb0, 0x5d, 0xa7, 0x92, 0x17, 0x13, 0x7e, 0x6d, 0xa4, 0x81, 0x08, 0xc1, - 0x6a, 0x15, 0xa5, 0x75, 0x36, 0xcd, 0xc1, 0x3e, 0xc5, 0xe4, 0x0a, 0x40, 0xab, 0xe3, 0xee, 0x98, - 0x1d, 0x3e, 0x06, 0x95, 0x82, 0xe8, 0xb5, 0x9e, 0xc2, 0xeb, 0x9a, 0x83, 0x31, 0x29, 0xb2, 0x0b, - 0x93, 0xa6, 0x7c, 0x2b, 0x2a, 0x93, 0xa2, 0xdf, 0xab, 0x43, 0xf6, 0x3b, 0xf1, 0x66, 0xd5, 0xca, - 0x87, 0x07, 0xd5, 0x49, 0x45, 0xc4, 0x50, 0x03, 0x79, 0x05, 0x8a, 0xae, 0xc7, 0xbb, 0x6a, 0x76, - 0x2a, 0xc5, 0x4b, 0xb9, 0xcb, 0xc5, 0xda, 0xac, 0xea, 0x5e, 0x71, 0x53, 0xd1, 0x51, 0x4b, 0x18, - 0xff, 0x50, 0x80, 0xbe, 0xa7, 0x26, 0xaf, 0x42, 0x59, 0xa1, 0xdd, 0x76, 0x5b, 0x4c, 0x4c, 0x7e, - 0xb1, 0x36, 0x73, 0x78, 0x50, 0x2d, 0x2f, 0x47, 0x64, 0x8c, 0xcb, 0x90, 0xfb, 0x30, 0xc6, 0xae, - 0xaa, 0xd7, 0xf0, 0xad, 0xa1, 0x9e, 0xae, 0x7e, 0x55, 0x2f, 0xd0, 0xc2, 0xe1, 0x41, 0x75, 0xac, - 0x7e, 0x15, 0xc7, 0xd8, 0x55, 0x6e, 0x3e, 0x5a, 0x76, 0x20, 0x16, 0xcf, 0xb0, 0xe6, 0xe3, 0xba, - 0x1d, 0x68, 0x68, 0x61, 0x3e, 0xae, 0xdb, 0x01, 0x72, 0x54, 0x6e, 0x3e, 0xda, 0x41, 0xe0, 0x89, - 0xc5, 0x37, 0xac, 0xf9, 0xb8, 0xb1, 0xbd, 0xbd, 0xa5, 0xe1, 0xc5, 0xea, 0xe6, 0x14, 0x14, 0xc0, - 0xe4, 0x7d, 0x3e, 0x92, 0x92, 0xe7, 0xfa, 0xfb, 0x6a, 0xd5, 0xde, 0x18, 0x69, 0xd5, 0xba, 0xfe, - 0xbe, 0x56, 0xa7, 0xe6, 0x44, 0x33, 0x30, 0xae, 0x4d, 0x3c, 0x9d, 0xd5, 0x64, 0x62, 0x91, 0x0e, - 0xfd, 0x74, 0xab, 0x6b, 0xf5, 0xd4, 0xd3, 0xad, 0xae, 0xd5, 0x51, 0x00, 0xf3, 0xb9, 0xf1, 0xcd, - 0x3d, 0xb5, 0xa6, 0x87, 0x9b, 0x1b, 0x34, 0xf7, 0x92, 0x73, 0x83, 0xe6, 0x1e, 0x72, 0x54, 0x0e, - 0xee, 0x32, 0x26, 0x96, 0xf0, 0xb0, 0xe0, 0x9b, 0xf5, 0x7a, 0x12, 0x7c, 0xb3, 0x5e, 0x47, 0x8e, - 0x2a, 0x56, 0x55, 0x83, 0x55, 0x4a, 0xa3, 0xac, 0xaa, 0x95, 0x14, 0xf8, 0xf5, 0x95, 0x3a, 0x72, - 0x54, 0xa3, 0x05, 0xe7, 0x42, 0x0e, 0x52, 0xcf, 0x65, 0xb6, 0x98, 0x1a, 0xda, 0x24, 0x4b, 0x50, - 0x6a, 0xb8, 0x4e, 0xd3, 0x6e, 0xdd, 0x31, 0x3d, 0x65, 0x52, 0xb5, 0x2d, 0x5e, 0x09, 0x19, 0x18, - 0xc9, 0x90, 0xe7, 0x61, 0x7c, 0x97, 0xee, 0x2b, 0xdb, 0x5a, 0x56, 0xa2, 0xe3, 0xb7, 0xe8, 0x3e, - 0x72, 0xba, 0xf1, 0x9d, 0x1c, 0x9c, 0xcd, 0x58, 0x16, 0xbc, 0x59, 0xcf, 0xef, 0x28, 0x0d, 0xba, - 0xd9, 0x5d, 0xbc, 0x8d, 0x9c, 0x4e, 0xbe, 0x9a, 0x83, 0x99, 0xd8, 0x3a, 0x59, 0xee, 0x29, 0xf3, - 0x3d, 0xbc, 0x5d, 0x4a, 0x60, 0xd5, 0x2e, 0x28, 0x8d, 0x33, 0x29, 0x06, 0xa6, 0xb5, 0x1a, 0xff, - 0x24, 0xfc, 0x85, 0x04, 0x8d, 0x98, 0x70, 0xba, 0xc7, 0xa8, 0xcf, 0x37, 0x97, 0x3a, 0x6d, 0xf8, - 0x34, 0x50, 0xae, 0xc3, 0x8b, 0x8b, 0xd2, 0x29, 0xe1, 0xbd, 0x58, 0xe4, 0xae, 0xd4, 0xe2, 0xc3, - 0x57, 0x17, 0xa5, 0xc4, 0x2d, 0xba, 0x5f, 0xa7, 0x1d, 0xca, 0x31, 0x6a, 0xe4, 0xf0, 0xa0, 0x7a, - 0xfa, 0x6e, 0x02, 0x00, 0x53, 0x80, 0x5c, 0x85, 0x67, 0x32, 0xb6, 0xe7, 0xfa, 0x96, 0x52, 0x31, - 0xf6, 0xc4, 0x2a, 0xb6, 0x12, 0x00, 0x98, 0x02, 0x34, 0xbe, 0x91, 0x83, 0xc9, 0x9a, 0xd9, 0xd8, - 0x75, 0x9b, 0x4d, 0x6e, 0x91, 0xad, 0x9e, 0x2f, 0xf7, 0x2d, 0x39, 0x27, 0xda, 0x22, 0xaf, 0x2a, - 0x3a, 0x6a, 0x09, 0xf2, 0x12, 0x14, 0xe4, 0x70, 0x88, 0x4e, 0xe5, 0x6b, 0xa7, 0x95, 0x6c, 0x61, - 0x4d, 0x50, 0x51, 0x71, 0xc9, 0x6b, 0x50, 0xee, 0x9a, 0xef, 0x85, 0x00, 0xc2, 0x40, 0x96, 0x6a, - 0x67, 0x95, 0x70, 0xf9, 0x4e, 0xc4, 0xc2, 0xb8, 0x9c, 0xf1, 0x39, 0x80, 0x15, 0xd7, 0x09, 0x6c, - 0xa7, 0x47, 0x37, 0x1d, 0xf2, 0x02, 0xe4, 0xa9, 0xef, 0xbb, 0xbe, 0xb2, 0xf1, 0xd3, 0xaa, 0x79, - 0xfe, 0x1a, 0x27, 0xa2, 0xe4, 0xc9, 0x1e, 0xd9, 0x1d, 0x6a, 0x89, 0x1e, 0x15, 0xe3, 0x3d, 0xe2, - 0x54, 0x54, 0x5c, 0x63, 0x11, 0x26, 0x57, 0xdc, 0x9e, 0x13, 0x50, 0x9f, 0xe3, 0x3e, 0x34, 0x3b, - 0xbd, 0xd0, 0x71, 0xd0, 0xb8, 0xf7, 0x38, 0x11, 0x25, 0xcf, 0xf8, 0xfe, 0x18, 0x4c, 0xad, 0xf8, - 0xae, 0x73, 0x5f, 0xad, 0x28, 0xf2, 0x9b, 0x50, 0xe4, 0x2e, 0xac, 0x65, 0x06, 0xa6, 0x9a, 0xf4, - 0x8f, 0xc7, 0x66, 0x44, 0x7b, 0xa2, 0xd1, 0x5a, 0xe4, 0xd2, 0x7c, 0x8e, 0x36, 0x77, 0x1e, 0xd0, - 0x46, 0x70, 0x87, 0x06, 0x66, 0xb4, 0x17, 0x47, 0x34, 0xd4, 0xa8, 0xa4, 0x05, 0x13, 0xcc, 0xa3, - 0x0d, 0x35, 0xdf, 0xc3, 0xb9, 0x0f, 0xf1, 0x2e, 0xd7, 0x3d, 0xda, 0x88, 0x9c, 0x16, 0xfe, 0x0b, - 0x85, 0x02, 0xe2, 0x42, 0x81, 0x05, 0x66, 0xd0, 0x63, 0x6a, 0xe7, 0xba, 0x3e, 0xba, 0x2a, 0x01, - 0x17, 0x0d, 0xbe, 0xfc, 0x8d, 0x4a, 0x8d, 0xf1, 0xc3, 0x1c, 0xcc, 0xc6, 0xc5, 0x6f, 0xdb, 0x2c, - 0x20, 0xef, 0xf6, 0x0d, 0xe8, 0xe2, 0xf1, 0x06, 0x94, 0xb7, 0x16, 0xc3, 0xa9, 0x57, 0x6a, 0x48, - 0x89, 0x0d, 0x66, 0x13, 0xf2, 0x76, 0x40, 0xbb, 0xa1, 0x57, 0xba, 0x3c, 0xf2, 0x23, 0x46, 0xeb, - 0x64, 0x9d, 0xe3, 0xa2, 0x84, 0x37, 0xfe, 0x27, 0x9f, 0x7c, 0x34, 0x3e, 0xcc, 0xdc, 0x2b, 0x9c, - 0xda, 0x8b, 0x11, 0xd4, 0xf3, 0x0d, 0xd7, 0x89, 0xc4, 0x74, 0xfe, 0x3f, 0xd5, 0x89, 0xa9, 0x38, - 0xf5, 0x28, 0xf5, 0x1b, 0x13, 0xca, 0xf9, 0x2b, 0xce, 0x43, 0x22, 0xab, 0xd7, 0xa1, 0xca, 0x5a, - 0xeb, 0x81, 0xab, 0x2b, 0x3a, 0x6a, 0x09, 0xf2, 0x2e, 0x9c, 0x69, 0xb8, 0x4e, 0xa3, 0xe7, 0xfb, - 0xd4, 0x69, 0xec, 0x6f, 0x89, 0x90, 0x4f, 0xbd, 0xc0, 0x8b, 0xaa, 0xd9, 0x99, 0x95, 0xb4, 0xc0, - 0x51, 0x16, 0x11, 0xfb, 0x81, 0xc8, 0xcb, 0x30, 0xc9, 0x7a, 0xcc, 0xa3, 0x8e, 0x25, 0xfc, 0x9a, - 0x62, 0x6d, 0x46, 0x61, 0x4e, 0xd6, 0x25, 0x19, 0x43, 0x3e, 0xb9, 0x0b, 0x17, 0x58, 0xc0, 0x8d, - 0xb2, 0xd3, 0x5a, 0xa5, 0xa6, 0xd5, 0xb1, 0x1d, 0x6e, 0x22, 0x5d, 0xc7, 0x62, 0xc2, 0x55, 0x19, - 0xaf, 0x7d, 0xe4, 0xf0, 0xa0, 0x7a, 0xa1, 0x9e, 0x2d, 0x82, 0x83, 0xda, 0x92, 0x2f, 0xc0, 0x3c, - 0xeb, 0x35, 0x1a, 0x94, 0xb1, 0x66, 0xaf, 0x73, 0xd3, 0xdd, 0x61, 0x37, 0x6c, 0xc6, 0xed, 0xfb, - 0x6d, 0xbb, 0x6b, 0x07, 0xc2, 0x1d, 0xc9, 0xd7, 0x16, 0x0e, 0x0f, 0xaa, 0xf3, 0xf5, 0x81, 0x52, - 0xf8, 0x08, 0x04, 0x82, 0x70, 0x5e, 0x9a, 0x9c, 0x3e, 0xec, 0x49, 0x81, 0x3d, 0x7f, 0x78, 0x50, - 0x3d, 0xbf, 0x96, 0x29, 0x81, 0x03, 0x5a, 0xf2, 0x19, 0xe4, 0x91, 0xed, 0x97, 0x79, 0x34, 0x59, - 0x4c, 0xce, 0xe0, 0xb6, 0xa2, 0xa3, 0x96, 0x20, 0x0f, 0xa2, 0xc5, 0xc7, 0x5f, 0x0a, 0xe5, 0x48, - 0x3c, 0xb9, 0xb5, 0x9a, 0xe3, 0x51, 0xc7, 0xfd, 0x18, 0x12, 0x7f, 0xb1, 0x30, 0x81, 0x6d, 0xfc, - 0x63, 0x0e, 0x48, 0xbf, 0x21, 0x20, 0xb7, 0xa0, 0x60, 0x36, 0x02, 0x1e, 0x53, 0xc8, 0x38, 0xf4, - 0x85, 0xac, 0xcd, 0x4b, 0xaa, 0x42, 0xda, 0xa4, 0x7c, 0x85, 0xd0, 0xc8, 0x7a, 0x2c, 0x8b, 0xa6, - 0xa8, 0x20, 0x88, 0x0b, 0x67, 0x3a, 0x26, 0x0b, 0xc2, 0xb5, 0x6a, 0xf1, 0x47, 0x56, 0x46, 0xf2, - 0x57, 0x8e, 0xf7, 0x50, 0xbc, 0x45, 0xed, 0x1c, 0x5f, 0xb9, 0xb7, 0xd3, 0x40, 0xd8, 0x8f, 0x6d, - 0x7c, 0xb7, 0x00, 0x93, 0xab, 0xcb, 0xd7, 0xb7, 0x4d, 0xb6, 0x7b, 0x8c, 0x20, 0x93, 0x4f, 0x0e, - 0xed, 0x7a, 0x1d, 0x33, 0xe8, 0x7b, 0xbd, 0xb6, 0x15, 0x1d, 0xb5, 0x04, 0x71, 0x79, 0xc4, 0xac, - 0x42, 0x76, 0x65, 0x7e, 0xdf, 0x1c, 0xd2, 0xb1, 0x51, 0x28, 0xf1, 0x90, 0x59, 0x91, 0x30, 0xd2, - 0x41, 0x18, 0x94, 0x43, 0xe5, 0x48, 0x9b, 0x2a, 0x9a, 0x18, 0x32, 0xd5, 0x11, 0xe1, 0x48, 0xef, - 0x3e, 0x46, 0xc0, 0xb8, 0x16, 0xf2, 0x09, 0x98, 0xb2, 0x28, 0x7f, 0x8b, 0xa9, 0xd3, 0xb0, 0x29, - 0x7f, 0x61, 0xc7, 0xf9, 0xb8, 0x70, 0xc3, 0xb5, 0x1a, 0xa3, 0x63, 0x42, 0x8a, 0x3c, 0x80, 0xd2, - 0x9e, 0x1d, 0xb4, 0x85, 0x7d, 0xad, 0x14, 0xc4, 0xc2, 0xf9, 0xe4, 0x50, 0x1d, 0xe5, 0x08, 0xd1, - 0xb0, 0xdc, 0x0f, 0x31, 0x31, 0x82, 0xe7, 0xee, 0x2e, 0xff, 0x21, 0xf2, 0x1a, 0xe2, 0xcd, 0x2c, - 0x25, 0x1b, 0x08, 0x06, 0x46, 0x32, 0x84, 0xc1, 0x14, 0xff, 0x51, 0xa7, 0x5f, 0xea, 0xf1, 0xd5, - 0xaa, 0x7c, 0xff, 0xe1, 0xb2, 0x1d, 0x21, 0x88, 0x1c, 0x91, 0xfb, 0x31, 0x58, 0x4c, 0x28, 0xe1, - 0xab, 0x6f, 0xaf, 0x4d, 0x1d, 0xf1, 0x0a, 0xc7, 0x56, 0xdf, 0xfd, 0x36, 0x75, 0x50, 0x70, 0x88, - 0x0b, 0xd0, 0xd0, 0x2e, 0x53, 0x05, 0x46, 0x88, 0x71, 0x23, 0xcf, 0xab, 0x76, 0x9a, 0xfb, 0x28, - 0xd1, 0x6f, 0x8c, 0xa9, 0xe0, 0x0e, 0x97, 0xeb, 0x5c, 0x7b, 0xcf, 0x0e, 0x2a, 0x65, 0xd1, 0x29, - 0xfd, 0xd6, 0x6e, 0x0a, 0x2a, 0x2a, 0xae, 0xf1, 0xdd, 0x1c, 0x94, 0xf9, 0x4b, 0x14, 0x2e, 0xfc, - 0x97, 0xa0, 0x10, 0x98, 0x7e, 0x4b, 0xb9, 0xcc, 0xb1, 0x76, 0xdb, 0x82, 0x8a, 0x8a, 0x4b, 0x4c, - 0xc8, 0x07, 0x26, 0xdb, 0x0d, 0x37, 0xee, 0xcf, 0x0c, 0xf5, 0x2c, 0xea, 0xed, 0x8d, 0xf6, 0x6c, - 0xfe, 0x8b, 0xa1, 0x44, 0x26, 0x97, 0xa1, 0xc8, 0x0d, 0xed, 0x9a, 0xc9, 0x64, 0xec, 0x5e, 0xac, - 0x4d, 0xf1, 0xb7, 0x75, 0x4d, 0xd1, 0x50, 0x73, 0x8d, 0x77, 0xe1, 0xf4, 0xb5, 0xf7, 0x68, 0xa3, - 0x17, 0xb8, 0xbe, 0x8c, 0x81, 0xc8, 0x4d, 0x20, 0x8c, 0xfa, 0x0f, 0xed, 0x06, 0x5d, 0x6e, 0x34, - 0xb8, 0x43, 0xb9, 0x11, 0x59, 0x87, 0x79, 0xa5, 0x8d, 0xd4, 0xfb, 0x24, 0x30, 0xa3, 0x95, 0xf1, - 0x67, 0x39, 0x28, 0xc7, 0x22, 0x35, 0x6e, 0x1b, 0x5a, 0x2b, 0xf5, 0x5a, 0xaf, 0xb1, 0xab, 0x03, - 0x8b, 0x37, 0x87, 0x0d, 0xff, 0x24, 0x4a, 0xb4, 0xa6, 0x35, 0x09, 0x23, 0x1d, 0x8f, 0x0b, 0xe1, - 0xfe, 0x36, 0x07, 0x51, 0x3b, 0x3e, 0x81, 0x3b, 0x51, 0xd7, 0x62, 0x13, 0xa8, 0x70, 0x15, 0x97, - 0x7c, 0x90, 0x83, 0x0b, 0xc9, 0x87, 0x15, 0xf1, 0xc9, 0x93, 0x87, 0x32, 0x55, 0xa5, 0xe0, 0x42, - 0x3d, 0x1b, 0x0d, 0x07, 0xa9, 0x31, 0xee, 0x41, 0xfe, 0xba, 0xd9, 0x6b, 0xd1, 0x63, 0xb9, 0xfa, - 0x7c, 0x39, 0xf8, 0xd4, 0xec, 0x04, 0xe1, 0xb6, 0xa2, 0x96, 0x03, 0x2a, 0x1a, 0x6a, 0xae, 0xf1, - 0x57, 0x13, 0x50, 0x8e, 0x25, 0x6c, 0xf8, 0xeb, 0xe9, 0x53, 0xcf, 0x4d, 0x6f, 0x0e, 0x3c, 0xb0, - 0x46, 0xc1, 0xe1, 0x9b, 0x83, 0x4f, 0x1f, 0xda, 0x8c, 0x47, 0x41, 0xa9, 0xcd, 0x01, 0x15, 0x1d, - 0xb5, 0x04, 0xa9, 0x42, 0xde, 0xa2, 0x5e, 0xd0, 0x16, 0xab, 0x72, 0xa2, 0x56, 0xe2, 0x5d, 0x5d, - 0xe5, 0x04, 0x94, 0x74, 0x2e, 0xd0, 0xa4, 0x41, 0xa3, 0x5d, 0x99, 0x10, 0x06, 0x55, 0x08, 0xac, - 0x71, 0x02, 0x4a, 0x7a, 0x46, 0x80, 0x9a, 0x7f, 0xfa, 0x01, 0x6a, 0xe1, 0x84, 0x03, 0x54, 0xe2, - 0xc1, 0x59, 0xc6, 0xda, 0x5b, 0xbe, 0xfd, 0xd0, 0x0c, 0x68, 0xb4, 0x7a, 0x26, 0x9f, 0x44, 0xcf, - 0x85, 0xc3, 0x83, 0xea, 0xd9, 0x7a, 0xfd, 0x46, 0x1a, 0x05, 0xb3, 0xa0, 0x49, 0x1d, 0xce, 0xd9, - 0x0e, 0xa3, 0x8d, 0x9e, 0x4f, 0xd7, 0x5b, 0x8e, 0xeb, 0xd3, 0x1b, 0x2e, 0xe3, 0x70, 0x2a, 0x4b, - 0xf9, 0xbc, 0x9a, 0xb4, 0x73, 0xeb, 0x59, 0x42, 0x98, 0xdd, 0xd6, 0xf8, 0x7e, 0x0e, 0xa6, 0xe2, - 0x39, 0x2a, 0xc2, 0x00, 0xda, 0xab, 0x6b, 0x75, 0x69, 0x4a, 0xd4, 0x1b, 0xfe, 0xd6, 0xd0, 0xa9, - 0x2f, 0x09, 0x13, 0x05, 0x95, 0x11, 0x0d, 0x63, 0x6a, 0x8e, 0x91, 0x04, 0x7f, 0x01, 0xf2, 0x4d, - 0xd7, 0x6f, 0x50, 0x65, 0x0c, 0xf5, 0x5b, 0xb2, 0xc6, 0x89, 0x28, 0x79, 0xc6, 0x4f, 0x72, 0x10, - 0xd3, 0x40, 0x7e, 0x07, 0xa6, 0xb9, 0x8e, 0x5b, 0xfe, 0x4e, 0xe2, 0x69, 0x6a, 0x43, 0x3f, 0x8d, - 0x46, 0xaa, 0x9d, 0x53, 0xfa, 0xa7, 0x13, 0x64, 0x4c, 0xea, 0x23, 0xbf, 0x0a, 0x25, 0xd3, 0xb2, - 0x7c, 0xca, 0x18, 0x95, 0x7b, 0x45, 0xa9, 0x36, 0x2d, 0x9c, 0xa0, 0x90, 0x88, 0x11, 0x9f, 0xbf, - 0x86, 0x6d, 0xab, 0xc9, 0xf8, 0xca, 0x56, 0xb1, 0x8c, 0x7e, 0x0d, 0xb9, 0x12, 0x4e, 0x47, 0x2d, - 0x61, 0x7c, 0x6d, 0x02, 0x92, 0xba, 0x89, 0x05, 0x33, 0xbb, 0xfe, 0xce, 0xca, 0x8a, 0xd9, 0x68, - 0x0f, 0x95, 0xf8, 0x39, 0x7b, 0x78, 0x50, 0x9d, 0xb9, 0x95, 0x44, 0xc0, 0x34, 0xa4, 0xd2, 0x72, - 0x8b, 0xee, 0x07, 0xe6, 0xce, 0x30, 0x06, 0x33, 0xd4, 0x12, 0x47, 0xc0, 0x34, 0x24, 0x79, 0x0d, - 0xca, 0xbb, 0xfe, 0x4e, 0xf8, 0x92, 0xa7, 0x73, 0x33, 0xb7, 0x22, 0x16, 0xc6, 0xe5, 0xf8, 0x10, - 0xee, 0xfa, 0x3b, 0xdc, 0x28, 0x86, 0xe7, 0x21, 0x7a, 0x08, 0x6f, 0x29, 0x3a, 0x6a, 0x09, 0xe2, - 0x01, 0xd9, 0x0d, 0x47, 0x4f, 0x67, 0x0f, 0x95, 0x2d, 0xba, 0x9c, 0xf5, 0x34, 0x5a, 0x28, 0xfe, - 0x40, 0xe7, 0xf9, 0x66, 0x7a, 0xab, 0x0f, 0x07, 0x33, 0xb0, 0xc9, 0xe7, 0xe0, 0xc2, 0xae, 0xbf, - 0xa3, 0xb6, 0x8a, 0x2d, 0xdf, 0x76, 0x1a, 0xb6, 0x97, 0x38, 0x08, 0xd1, 0xdb, 0xc9, 0xad, 0x6c, - 0x31, 0x1c, 0xd4, 0xde, 0xf8, 0x18, 0x4c, 0xc5, 0x13, 0xe9, 0x8f, 0x49, 0x61, 0x1a, 0xf7, 0xa1, - 0x24, 0xa2, 0xb7, 0x16, 0x77, 0x1b, 0xf5, 0x0e, 0x34, 0xfe, 0x88, 0x1d, 0xe8, 0x45, 0x98, 0x94, - 0x9b, 0x27, 0x13, 0x86, 0x3d, 0x27, 0x4f, 0x4f, 0xe4, 0xbe, 0xca, 0x30, 0xe4, 0x19, 0xff, 0x99, - 0x83, 0xc2, 0xba, 0xe3, 0xf5, 0x7e, 0x41, 0x0e, 0xfb, 0xbe, 0x3d, 0x01, 0x13, 0xdc, 0x59, 0x27, - 0x97, 0x61, 0x22, 0xd8, 0xf7, 0xe4, 0x26, 0x3e, 0x5e, 0x9b, 0x0b, 0x2d, 0xd8, 0xf6, 0xbe, 0x47, - 0x8f, 0xd4, 0x5f, 0x14, 0x12, 0xe4, 0x4d, 0x28, 0x38, 0xbd, 0xee, 0x3d, 0xb3, 0xa3, 0xac, 0xdd, - 0x4b, 0xa1, 0x8f, 0xb2, 0x21, 0xa8, 0x47, 0x07, 0xd5, 0x39, 0xea, 0x34, 0x5c, 0xcb, 0x76, 0x5a, - 0x4b, 0x0f, 0x98, 0xeb, 0x2c, 0x6e, 0xf4, 0xba, 0x3b, 0xd4, 0x47, 0xd5, 0x8a, 0xbc, 0x0c, 0x93, - 0x3b, 0xae, 0xdb, 0xe1, 0x00, 0xe3, 0xc9, 0xf4, 0x44, 0x4d, 0x92, 0x31, 0xe4, 0x73, 0x77, 0x88, - 0x05, 0x3e, 0x97, 0x9c, 0x48, 0xba, 0x43, 0x75, 0x41, 0x45, 0xc5, 0x25, 0x5d, 0x28, 0x74, 0x4d, - 0x8f, 0xcb, 0xe5, 0xc5, 0x90, 0x5d, 0x1b, 0x3a, 0xa2, 0x59, 0xbc, 0x23, 0x70, 0xae, 0x39, 0x81, - 0xbf, 0x1f, 0xa9, 0x93, 0x44, 0x54, 0x4a, 0x88, 0x0d, 0x93, 0x1d, 0x9b, 0x05, 0x5c, 0x5f, 0x61, - 0x84, 0x55, 0xc1, 0xf5, 0x89, 0x25, 0x1a, 0x8d, 0xc0, 0x6d, 0x09, 0x8b, 0x21, 0xfe, 0xfc, 0x3e, - 0x94, 0x63, 0x3d, 0x22, 0xb3, 0xd2, 0x99, 0x14, 0x6f, 0x85, 0xf0, 0x1f, 0xc9, 0x76, 0xb8, 0xf6, - 0xc7, 0x46, 0xf0, 0x65, 0x75, 0x4f, 0xd4, 0xcb, 0xf2, 0xa9, 0xb1, 0xd7, 0x73, 0x9f, 0x2a, 0x7e, - 0xf3, 0xcf, 0xab, 0xa7, 0x3e, 0xf8, 0xd7, 0x4b, 0xa7, 0x8c, 0xbf, 0x1b, 0x87, 0x92, 0x16, 0xf9, - 0xbf, 0xbd, 0x52, 0xfc, 0xd4, 0x4a, 0xb9, 0x39, 0xda, 0x78, 0x1d, 0x6b, 0xb9, 0x2c, 0x27, 0x97, - 0xcb, 0x54, 0xed, 0xff, 0xc7, 0xa6, 0xfa, 0xe8, 0xa0, 0x5a, 0x49, 0x0e, 0x02, 0x9a, 0x7b, 0x77, - 0x28, 0x63, 0x66, 0x8b, 0x46, 0xcb, 0xe0, 0x93, 0x8f, 0x5b, 0x06, 0x73, 0xf1, 0x65, 0x50, 0xca, - 0x9e, 0xc6, 0x0e, 0x4c, 0xdc, 0xb6, 0x9d, 0xe3, 0xa4, 0x5b, 0x5e, 0x80, 0x3c, 0x6b, 0xb8, 0x5e, - 0x98, 0x6b, 0xd1, 0x06, 0xb5, 0xce, 0x89, 0x28, 0x79, 0xa1, 0x85, 0x1e, 0x1f, 0x60, 0xa1, 0x3f, - 0x18, 0x87, 0x62, 0x98, 0xd0, 0x22, 0xbf, 0x9f, 0x83, 0xb2, 0xe9, 0x38, 0x6e, 0x20, 0xce, 0x20, - 0x42, 0x63, 0xba, 0x31, 0xd4, 0xe0, 0x87, 0xa0, 0x8b, 0xcb, 0x11, 0xa0, 0x9c, 0x00, 0xbd, 0xc1, - 0xc6, 0x38, 0x18, 0xd7, 0x4b, 0xbe, 0x04, 0x85, 0x8e, 0xb9, 0x43, 0x3b, 0xa1, 0x6d, 0x5d, 0x1f, - 0xad, 0x07, 0xb7, 0x05, 0x56, 0x6a, 0xf6, 0x25, 0x11, 0x95, 0xa2, 0xf9, 0x37, 0x61, 0x36, 0xdd, - 0xd1, 0x27, 0x99, 0x3f, 0x3e, 0xf5, 0x31, 0x35, 0x4f, 0xd2, 0xd4, 0xf8, 0x2c, 0x94, 0xef, 0xd0, - 0xc0, 0xb7, 0x1b, 0x02, 0x20, 0x8c, 0x44, 0x73, 0xd9, 0x91, 0x68, 0xb4, 0x8b, 0x8e, 0x3d, 0xe2, - 0xc8, 0xe6, 0xcb, 0x30, 0x29, 0x21, 0x19, 0x71, 0x01, 0x3c, 0xdf, 0xed, 0xd2, 0xa0, 0x4d, 0x7b, - 0xe1, 0x8c, 0x0e, 0xe7, 0x68, 0x6f, 0x69, 0x18, 0x99, 0x15, 0x89, 0x7e, 0x63, 0x4c, 0x85, 0xf1, - 0xd7, 0x53, 0x00, 0x1b, 0xae, 0x45, 0x55, 0xfe, 0x73, 0x1e, 0xc6, 0x6c, 0x4b, 0x3d, 0x0d, 0xa8, - 0xce, 0x8e, 0xad, 0xaf, 0xe2, 0x98, 0x6d, 0xe9, 0x25, 0x3e, 0x36, 0x70, 0x89, 0xbf, 0x06, 0x65, - 0xcb, 0x66, 0x5e, 0xc7, 0xdc, 0xdf, 0xc8, 0xf0, 0xd0, 0x56, 0x23, 0x16, 0xc6, 0xe5, 0xc8, 0x2b, - 0xca, 0xf8, 0x49, 0x2b, 0x53, 0x49, 0x19, 0xbf, 0x22, 0xef, 0x5e, 0xcc, 0x00, 0xbe, 0x0e, 0x53, - 0x61, 0xc6, 0x4e, 0x68, 0xc9, 0x8b, 0x56, 0xa1, 0xc9, 0x9c, 0xda, 0x8e, 0xf1, 0x30, 0x21, 0x99, - 0xce, 0x28, 0x16, 0x9e, 0x49, 0x46, 0x71, 0x15, 0x66, 0x59, 0xe0, 0xfa, 0xd4, 0x0a, 0x25, 0xd6, - 0x57, 0x2b, 0x24, 0xf1, 0xa0, 0xb3, 0xf5, 0x14, 0x1f, 0xfb, 0x5a, 0x90, 0x2d, 0x98, 0x0b, 0x3b, - 0x11, 0x7f, 0xc0, 0xca, 0x59, 0x81, 0x74, 0x51, 0x21, 0xcd, 0xdd, 0xcf, 0x90, 0xc1, 0xcc, 0x96, - 0xe4, 0xd3, 0x30, 0x1d, 0x76, 0x53, 0x58, 0xa0, 0xca, 0x9c, 0x80, 0xd2, 0x31, 0xcc, 0x76, 0x9c, - 0x89, 0x49, 0x59, 0xf2, 0x71, 0xc8, 0x7b, 0x6d, 0x93, 0x51, 0x95, 0x80, 0x0c, 0xf3, 0x47, 0xf9, - 0x2d, 0x4e, 0x3c, 0x3a, 0xa8, 0x96, 0xf8, 0x9c, 0x89, 0x1f, 0x28, 0x05, 0xc9, 0x15, 0x80, 0x1d, - 0xb7, 0xe7, 0x58, 0xa6, 0xbf, 0xbf, 0xbe, 0xaa, 0xce, 0x02, 0xb4, 0xdf, 0x56, 0xd3, 0x1c, 0x8c, - 0x49, 0xf1, 0xad, 0xaa, 0x2b, 0x8d, 0xb6, 0xca, 0x23, 0xea, 0xad, 0x4a, 0xdb, 0x72, 0xc5, 0x27, - 0xef, 0x40, 0x49, 0x9c, 0x9b, 0x50, 0x6b, 0x39, 0x50, 0xc9, 0xc4, 0x27, 0x49, 0xb1, 0x6b, 0x7f, - 0xae, 0x1e, 0x82, 0x60, 0x84, 0x47, 0xbe, 0x00, 0xd0, 0xb4, 0x1d, 0x9b, 0xb5, 0x05, 0x7a, 0xf9, - 0x89, 0xd1, 0xf5, 0x73, 0xae, 0x69, 0x14, 0x8c, 0x21, 0x92, 0x9f, 0xe6, 0xe0, 0x8c, 0x4f, 0x99, - 0xdb, 0xf3, 0x1b, 0x94, 0xe9, 0xb3, 0xe7, 0x73, 0xe2, 0xe5, 0xbf, 0x37, 0x64, 0xf5, 0x5d, 0xf8, - 0x46, 0x2f, 0x62, 0x1a, 0x58, 0x5a, 0x56, 0x1a, 0x1e, 0x89, 0xf5, 0xf1, 0x8f, 0xb2, 0x88, 0x5f, - 0xf9, 0x51, 0xb5, 0xda, 0x5f, 0x74, 0xa9, 0xc1, 0xf9, 0x8a, 0xfa, 0xa3, 0x1f, 0x55, 0x67, 0xc3, - 0xdf, 0xfa, 0x94, 0xbc, 0xff, 0xb9, 0xb8, 0x49, 0xf4, 0x5c, 0x6b, 0x7d, 0xab, 0x32, 0x95, 0x34, - 0x89, 0x5b, 0x9c, 0x88, 0x92, 0x47, 0x2e, 0x43, 0xd1, 0x32, 0x69, 0xd7, 0x75, 0xa8, 0x55, 0x99, - 0x8e, 0x52, 0x5b, 0xab, 0x8a, 0x86, 0x9a, 0x4b, 0xbe, 0x08, 0x05, 0x5b, 0x84, 0x16, 0x95, 0xd3, - 0x62, 0x62, 0x3e, 0x3d, 0x9c, 0xf3, 0x21, 0x20, 0x6a, 0xc0, 0xf7, 0x1a, 0xf9, 0x3f, 0x2a, 0x58, - 0xd2, 0x80, 0x49, 0xb7, 0x17, 0x08, 0x0d, 0x33, 0x42, 0xc3, 0x70, 0x99, 0xdd, 0x4d, 0x89, 0x21, - 0x23, 0x24, 0xf5, 0x03, 0x43, 0x64, 0xfe, 0xbc, 0x8d, 0xb6, 0xdd, 0xb1, 0x7c, 0xea, 0x54, 0x66, - 0x45, 0x4e, 0x40, 0x3c, 0xef, 0x8a, 0xa2, 0xa1, 0xe6, 0x92, 0x5f, 0x87, 0x69, 0xb7, 0x17, 0x88, - 0xb7, 0x84, 0xcf, 0x32, 0xab, 0x9c, 0x11, 0xe2, 0x67, 0xf8, 0x3b, 0xbb, 0x19, 0x67, 0x60, 0x52, - 0x6e, 0x7e, 0x15, 0xce, 0x67, 0xaf, 0x85, 0xc7, 0x6d, 0x7f, 0xe3, 0xf1, 0xed, 0xef, 0x34, 0x4c, - 0xc5, 0x4b, 0x3b, 0x45, 0x2a, 0x38, 0x56, 0x11, 0x44, 0x5c, 0x28, 0xb9, 0xf5, 0x93, 0x48, 0x05, - 0x6f, 0xd6, 0xfb, 0x52, 0xc1, 0x9a, 0x84, 0x91, 0x8e, 0xc7, 0xa5, 0x82, 0xff, 0x66, 0x0c, 0xa2, - 0x76, 0xe4, 0x15, 0x28, 0x52, 0xc7, 0xf2, 0x5c, 0xdb, 0x09, 0xd2, 0x45, 0x23, 0xd7, 0x14, 0x1d, - 0xb5, 0x44, 0x2c, 0x71, 0x3c, 0xf6, 0xc8, 0xc4, 0x71, 0x1b, 0x66, 0x4c, 0x71, 0xac, 0x1a, 0x65, - 0xfc, 0xc6, 0x9f, 0x28, 0xe3, 0xa7, 0x4b, 0x7b, 0x92, 0x28, 0x98, 0x86, 0xe5, 0x9a, 0x58, 0xd4, - 0x5c, 0x68, 0x9a, 0x18, 0x4a, 0x53, 0x3d, 0x89, 0x82, 0x69, 0x58, 0xe3, 0xeb, 0x63, 0x10, 0xae, - 0xd2, 0x5f, 0x84, 0x98, 0x9d, 0x18, 0x50, 0xf0, 0x29, 0xeb, 0x75, 0x02, 0xe5, 0xb5, 0x08, 0x4b, - 0x80, 0x82, 0x82, 0x8a, 0x63, 0xec, 0xc1, 0x34, 0xef, 0x6d, 0xa7, 0x43, 0x3b, 0xf5, 0x80, 0x7a, - 0x8c, 0x34, 0x21, 0xcf, 0xf8, 0x3f, 0x6a, 0x4c, 0x46, 0x2c, 0x93, 0x08, 0xa8, 0x17, 0x8b, 0x0a, - 0x38, 0x2e, 0x4a, 0x78, 0xe3, 0x1b, 0x63, 0x50, 0xd2, 0xe3, 0x74, 0x8c, 0x50, 0xe3, 0x45, 0x98, - 0xb4, 0x68, 0xd3, 0xe4, 0x4f, 0xa3, 0xde, 0x0b, 0x6e, 0x74, 0x56, 0x25, 0x09, 0x43, 0x1e, 0xa9, - 0x26, 0x53, 0x3c, 0xa5, 0xbe, 0xf4, 0xce, 0x2e, 0x94, 0xc4, 0x3f, 0x6b, 0x61, 0x2d, 0xf1, 0xb0, - 0xf3, 0x7e, 0x2f, 0x44, 0x91, 0xa9, 0x4e, 0xfd, 0x13, 0x23, 0xfc, 0x54, 0x0d, 0x70, 0xfe, 0x38, - 0x35, 0xc0, 0xc6, 0x1a, 0xf0, 0x6d, 0xe3, 0xfa, 0x0a, 0x79, 0x03, 0x8a, 0x4c, 0x99, 0x24, 0x35, - 0x2e, 0x1f, 0xd5, 0xa5, 0x22, 0x8a, 0x7e, 0x74, 0x50, 0x9d, 0x16, 0xc2, 0x21, 0x01, 0x75, 0x13, - 0xe3, 0xab, 0x13, 0x10, 0x73, 0x90, 0x8f, 0x31, 0xc2, 0x56, 0x2a, 0xe6, 0x79, 0x7b, 0xd8, 0x98, - 0x27, 0x0c, 0x24, 0xe4, 0x82, 0x4b, 0x86, 0x39, 0xbc, 0x1f, 0x6d, 0xda, 0xf1, 0xd4, 0xfc, 0xe8, - 0x7e, 0xdc, 0xa0, 0x1d, 0x0f, 0x05, 0x47, 0x9f, 0xb3, 0x4e, 0x0c, 0x3c, 0x67, 0x7d, 0x07, 0xf2, - 0x2d, 0xb3, 0xd7, 0xa2, 0x2a, 0x87, 0xf9, 0xa9, 0xe1, 0xce, 0xe5, 0x38, 0x82, 0x5c, 0x20, 0xe2, - 0x5f, 0x94, 0x98, 0x7c, 0x81, 0xb4, 0xc3, 0x8c, 0xa1, 0xf2, 0xa7, 0x87, 0x5b, 0x20, 0x3a, 0xef, - 0x28, 0x17, 0x88, 0xfe, 0x89, 0x11, 0x3e, 0xdf, 0x88, 0x1b, 0xb2, 0x12, 0x4e, 0x1d, 0xa8, 0x7c, - 0x66, 0xc8, 0xe3, 0x62, 0x81, 0x21, 0xdf, 0x09, 0xf5, 0x03, 0x43, 0x64, 0x63, 0x09, 0xca, 0xb1, - 0xea, 0x59, 0x3e, 0xbe, 0xba, 0xce, 0x2b, 0x36, 0xbe, 0xab, 0x66, 0x60, 0xa2, 0xe0, 0x18, 0x47, - 0x63, 0xa0, 0xdd, 0x9e, 0xf8, 0x99, 0xb1, 0xd9, 0x88, 0x95, 0x26, 0x26, 0x2a, 0x44, 0x5c, 0x07, - 0x15, 0x97, 0x3b, 0xe1, 0x5d, 0xea, 0xb7, 0xf4, 0x76, 0xaa, 0x5e, 0x57, 0xed, 0x84, 0xdf, 0x89, - 0x33, 0x31, 0x29, 0xcb, 0x37, 0xb3, 0xae, 0xe9, 0xd8, 0x4d, 0xca, 0x82, 0xf4, 0xd9, 0xc0, 0x1d, - 0x45, 0x47, 0x2d, 0x41, 0xae, 0xc3, 0x19, 0x46, 0x83, 0xcd, 0x3d, 0x87, 0xfa, 0xba, 0x72, 0x45, - 0x95, 0x32, 0x3d, 0x17, 0xfa, 0x82, 0xf5, 0xb4, 0x00, 0xf6, 0xb7, 0x11, 0x01, 0x8d, 0xac, 0x22, - 0x5a, 0x71, 0x1d, 0xcb, 0xd6, 0x17, 0x07, 0xe2, 0x01, 0x4d, 0x8a, 0x8f, 0x7d, 0x2d, 0x38, 0x4a, - 0xd3, 0xb4, 0x3b, 0x3d, 0x9f, 0x46, 0x28, 0x85, 0x24, 0xca, 0x5a, 0x8a, 0x8f, 0x7d, 0x2d, 0x8c, - 0xff, 0xc8, 0xc1, 0x34, 0xd2, 0xc0, 0xdf, 0xd7, 0x83, 0x52, 0x85, 0x7c, 0x47, 0x14, 0x2d, 0xe5, - 0x44, 0xd1, 0x92, 0x58, 0xb2, 0xb2, 0x46, 0x49, 0xd2, 0xc9, 0x2a, 0x94, 0x7d, 0xde, 0x42, 0x15, - 0x88, 0xc9, 0x01, 0x37, 0xc2, 0x18, 0x15, 0x23, 0xd6, 0x51, 0xf2, 0x27, 0xc6, 0x9b, 0x11, 0x07, - 0x26, 0x77, 0x64, 0x21, 0xaa, 0xda, 0xea, 0x87, 0x5b, 0x8b, 0xaa, 0x98, 0x55, 0x9c, 0x17, 0x84, - 0x95, 0xad, 0x47, 0xd1, 0xbf, 0x18, 0x2a, 0x31, 0xbe, 0x99, 0x03, 0x88, 0x6a, 0xf9, 0xc9, 0x2e, - 0x14, 0xd9, 0xd5, 0x84, 0x93, 0x35, 0x64, 0x3d, 0x87, 0x02, 0x89, 0x15, 0xd6, 0x29, 0x0a, 0x6a, - 0x05, 0x8f, 0xf3, 0xb0, 0x7e, 0x32, 0x0e, 0xba, 0xd5, 0x53, 0x72, 0xb0, 0x5e, 0xe2, 0x9b, 0x73, - 0x2b, 0x2a, 0xc8, 0xd5, 0x72, 0x28, 0xa8, 0xa8, 0xb8, 0xdc, 0x8b, 0x0e, 0x0f, 0x34, 0xd5, 0xd2, - 0x16, 0x5e, 0x74, 0x78, 0xf6, 0x89, 0x9a, 0x9b, 0xe5, 0xb2, 0xe5, 0x9f, 0x99, 0xcb, 0x56, 0x78, - 0x2a, 0x2e, 0x1b, 0x0f, 0x97, 0x7d, 0xb7, 0x43, 0x97, 0x71, 0x43, 0x85, 0xe5, 0x3a, 0x5c, 0x46, - 0x49, 0xc6, 0x90, 0x4f, 0x5e, 0x83, 0x72, 0x8f, 0xd1, 0xfa, 0xea, 0xad, 0x15, 0x9f, 0x5a, 0x4c, - 0x9d, 0x15, 0xeb, 0x44, 0xcd, 0xdd, 0x88, 0x85, 0x71, 0x39, 0xe3, 0x0f, 0x72, 0x70, 0xba, 0xde, - 0xf0, 0x6d, 0x2f, 0xd0, 0x96, 0x6e, 0x43, 0x54, 0xdf, 0x07, 0x26, 0x8f, 0x7f, 0xd5, 0x52, 0x7c, - 0x7e, 0xc0, 0x31, 0x99, 0x14, 0x4a, 0x14, 0xe7, 0x4b, 0x12, 0x46, 0x10, 0x22, 0xe7, 0x2c, 0x6c, - 0x69, 0x7a, 0x49, 0xd4, 0x05, 0x15, 0x15, 0xd7, 0xf8, 0x56, 0x0e, 0x8a, 0xba, 0xda, 0xe8, 0x05, - 0xc8, 0x0b, 0xfb, 0x9d, 0xae, 0x96, 0x10, 0xd6, 0x1d, 0x25, 0x4f, 0xe4, 0x5f, 0x79, 0x48, 0xdf, - 0x97, 0x7f, 0xe5, 0x44, 0x94, 0x3c, 0xbe, 0xd6, 0xa9, 0x63, 0xa5, 0xf3, 0xaf, 0xd7, 0x1c, 0x0b, - 0x39, 0x5d, 0x14, 0x6d, 0xbb, 0x7e, 0xd7, 0x0c, 0xd2, 0x19, 0xf1, 0x35, 0x41, 0x45, 0xc5, 0x35, - 0xde, 0x82, 0x19, 0x55, 0x16, 0xaa, 0x07, 0xea, 0x89, 0xea, 0xd5, 0x8d, 0xff, 0xce, 0x41, 0x79, - 0x7b, 0xfb, 0xb6, 0x36, 0x6b, 0x08, 0xe7, 0x99, 0xac, 0x03, 0x5d, 0x6e, 0x06, 0xd4, 0x5f, 0x71, - 0xbb, 0x5e, 0x87, 0x6a, 0x2c, 0x55, 0x9c, 0x59, 0xcf, 0x94, 0xc0, 0x01, 0x2d, 0xc9, 0x3a, 0x9c, - 0x8d, 0x73, 0x94, 0xd1, 0x56, 0x05, 0xf2, 0xb2, 0x0a, 0xa1, 0x9f, 0x8d, 0x59, 0x6d, 0xd2, 0x50, - 0xca, 0x72, 0xab, 0xcb, 0x69, 0x7d, 0x50, 0x8a, 0x8d, 0x59, 0x6d, 0x8c, 0x69, 0x28, 0xc7, 0x6e, - 0x26, 0x1a, 0xdf, 0x7e, 0x0e, 0x74, 0x35, 0xe2, 0x2f, 0x6b, 0x1a, 0x87, 0xca, 0x40, 0x36, 0x74, - 0x86, 0x24, 0x3f, 0x7a, 0x86, 0x44, 0xaf, 0xf8, 0x54, 0x96, 0xa4, 0x15, 0x65, 0x49, 0x0a, 0x27, - 0x90, 0x25, 0xd1, 0xa6, 0xab, 0x2f, 0x53, 0xf2, 0x87, 0x39, 0x98, 0x72, 0x5c, 0x8b, 0x86, 0x06, - 0xb2, 0x32, 0x29, 0x1c, 0xf0, 0xcd, 0x91, 0x06, 0x51, 0x26, 0xcc, 0x14, 0xa2, 0x4c, 0x90, 0xe9, - 0x84, 0x72, 0x9c, 0x85, 0x09, 0xd5, 0x64, 0x0d, 0x8a, 0x66, 0xb3, 0x69, 0x3b, 0x76, 0xb0, 0xaf, - 0xca, 0x2a, 0x2f, 0x66, 0xd9, 0xbe, 0x65, 0x25, 0x23, 0x77, 0xa3, 0xf0, 0x17, 0xea, 0xb6, 0x7c, - 0x3b, 0xd7, 0x37, 0x0a, 0x4a, 0x23, 0x6c, 0xe7, 0xe1, 0x19, 0x4a, 0xcc, 0x11, 0x0c, 0xab, 0x9f, - 0xa3, 0x0b, 0x06, 0x06, 0x14, 0x64, 0xf2, 0x4c, 0xe4, 0x49, 0x8b, 0x32, 0xf0, 0x90, 0x89, 0x35, - 0x54, 0x1c, 0xd2, 0x0a, 0x03, 0xdb, 0xb2, 0x18, 0xdc, 0xda, 0xd0, 0xc1, 0xbe, 0x8e, 0x95, 0xb3, - 0x23, 0x5b, 0x72, 0x33, 0xbe, 0x7d, 0x4c, 0x1d, 0x67, 0xfb, 0x98, 0x1e, 0xb8, 0x75, 0xb4, 0xa0, - 0xc0, 0xc4, 0xe6, 0x24, 0x32, 0x86, 0xe5, 0x2b, 0x2b, 0xc3, 0xb9, 0x44, 0x89, 0xfd, 0x4d, 0x8e, - 0x8e, 0xa4, 0xa1, 0x82, 0x27, 0x2e, 0x14, 0xc3, 0xb4, 0xa6, 0x4a, 0x3a, 0x0e, 0x77, 0x36, 0x9e, - 0x0e, 0x1b, 0xc2, 0xf2, 0x3d, 0x49, 0x45, 0xad, 0x84, 0xbc, 0x03, 0xe3, 0x96, 0xd9, 0x52, 0xe9, - 0xc7, 0xb7, 0x87, 0x2e, 0x2c, 0x0d, 0xd5, 0x88, 0x8b, 0x75, 0xab, 0xcb, 0xd7, 0x91, 0xa3, 0x92, - 0xdd, 0xe8, 0x66, 0xc3, 0xec, 0x08, 0xf7, 0xd5, 0x52, 0xfb, 0x9d, 0x0c, 0xaf, 0xfa, 0xee, 0x46, - 0x5c, 0x83, 0xc9, 0x87, 0x6e, 0xa7, 0xd7, 0x55, 0x79, 0xcb, 0xf2, 0x95, 0xf9, 0xac, 0xd9, 0xbe, - 0x27, 0x44, 0x22, 0x23, 0x20, 0x7f, 0x33, 0x0c, 0xdb, 0x92, 0xaf, 0xe4, 0xe0, 0x34, 0x7f, 0x75, - 0xf4, 0x3a, 0x60, 0x15, 0x32, 0xc2, 0x4a, 0xbd, 0xcb, 0xf8, 0xc6, 0x18, 0xae, 0xb0, 0xf3, 0x4a, - 0xed, 0xe9, 0xf5, 0x84, 0x06, 0x4c, 0x69, 0x24, 0x1e, 0x14, 0x99, 0x6d, 0xd1, 0x86, 0xe9, 0xb3, - 0xca, 0xd9, 0x13, 0xd3, 0x1e, 0x79, 0xe2, 0x0a, 0x1b, 0xb5, 0x16, 0xf2, 0x7b, 0xe2, 0x8e, 0xa1, - 0xba, 0x1f, 0xac, 0xee, 0x6c, 0xcf, 0x9d, 0xe4, 0x9d, 0xed, 0xb3, 0xf2, 0x82, 0x61, 0x42, 0x03, - 0xa6, 0x55, 0x92, 0x4d, 0x38, 0x27, 0x6f, 0x38, 0xa4, 0xaf, 0xb7, 0x9c, 0x13, 0xd5, 0x0b, 0xcf, - 0x1d, 0x1e, 0x54, 0xcf, 0x2d, 0x67, 0x09, 0x60, 0x76, 0x3b, 0xf2, 0x3e, 0x4c, 0xfb, 0xf1, 0x28, - 0xae, 0x72, 0x7e, 0x84, 0x9a, 0xbc, 0x44, 0x3c, 0x28, 0xf3, 0xe2, 0x09, 0x12, 0x26, 0x75, 0x91, - 0x57, 0xa1, 0xec, 0x29, 0x4b, 0x65, 0xb3, 0x6e, 0xe5, 0x82, 0x78, 0x06, 0xb1, 0xa3, 0x6e, 0x45, - 0x64, 0x8c, 0xcb, 0x90, 0xbb, 0x50, 0x0e, 0xdc, 0x0e, 0xf5, 0xd5, 0xc1, 0x7b, 0x45, 0x4c, 0xfe, - 0x42, 0xd6, 0x4a, 0xde, 0xd6, 0x62, 0x91, 0x7b, 0x1d, 0xd1, 0x18, 0xc6, 0x71, 0xc8, 0xa7, 0x61, - 0x3a, 0xbc, 0xcd, 0xe4, 0x8b, 0x24, 0xd8, 0x73, 0xc9, 0x6c, 0x40, 0x3d, 0xce, 0xc4, 0xa4, 0x2c, - 0x8f, 0xef, 0x3d, 0xdf, 0x76, 0x7d, 0x3b, 0xd8, 0x5f, 0xe9, 0x98, 0x8c, 0x09, 0x80, 0x79, 0x01, - 0xa0, 0xe3, 0xfb, 0xad, 0xb4, 0x00, 0xf6, 0xb7, 0xe1, 0x41, 0x54, 0x48, 0xac, 0x7c, 0x44, 0x38, - 0x70, 0xc2, 0x2c, 0x85, 0x6d, 0x51, 0x73, 0x07, 0x94, 0x94, 0x5f, 0x1c, 0xa6, 0xa4, 0x9c, 0x58, - 0x70, 0xd1, 0xec, 0x05, 0x6e, 0x97, 0x13, 0x92, 0x4d, 0xb6, 0xdd, 0x5d, 0xea, 0x54, 0x2e, 0x89, - 0xbd, 0xea, 0xd2, 0xe1, 0x41, 0xf5, 0xe2, 0xf2, 0x23, 0xe4, 0xf0, 0x91, 0x28, 0xa4, 0x0b, 0x45, - 0xaa, 0xca, 0xe2, 0x2b, 0x1f, 0x1d, 0x61, 0x93, 0x48, 0xd6, 0xd6, 0xcb, 0x01, 0x0a, 0x69, 0xa8, - 0x55, 0x90, 0x6d, 0x28, 0xb7, 0x5d, 0x16, 0x2c, 0x77, 0x6c, 0x93, 0x51, 0x56, 0x79, 0x5e, 0xac, - 0x93, 0xcc, 0xfd, 0xed, 0x46, 0x28, 0x16, 0x2d, 0x93, 0x1b, 0x51, 0x4b, 0x8c, 0xc3, 0x10, 0x2a, - 0x22, 0xca, 0x9e, 0x98, 0x35, 0xd7, 0x09, 0xe8, 0x7b, 0x41, 0x65, 0x41, 0x3c, 0xcb, 0x4b, 0x59, - 0xc8, 0x5b, 0xae, 0x55, 0x4f, 0x4a, 0xcb, 0xb7, 0x3c, 0x45, 0xc4, 0x34, 0x26, 0x79, 0x1d, 0xa6, - 0x3c, 0xd7, 0xaa, 0x7b, 0xb4, 0xb1, 0x65, 0x06, 0x8d, 0x76, 0xa5, 0x9a, 0x3c, 0x67, 0xdf, 0x8a, - 0xf1, 0x30, 0x21, 0xc9, 0xa3, 0x01, 0x9f, 0xb2, 0xde, 0x4e, 0xd7, 0x0e, 0xb6, 0xa8, 0x63, 0xd9, - 0x4e, 0x6b, 0xcb, 0xb5, 0x58, 0xc5, 0x10, 0x53, 0x28, 0xa2, 0x01, 0xec, 0x67, 0x63, 0x56, 0x1b, - 0xd2, 0x80, 0xc9, 0xae, 0x2c, 0x8d, 0xa8, 0xbc, 0x30, 0x82, 0x5b, 0xa9, 0xca, 0x2b, 0xe4, 0xa6, - 0xa4, 0x7e, 0x60, 0x88, 0x3c, 0xff, 0x16, 0x9c, 0xe9, 0xf3, 0xff, 0x9e, 0xa8, 0x26, 0xe4, 0x2f, - 0x78, 0xb4, 0x16, 0xf3, 0xb8, 0x4f, 0x3a, 0x4e, 0xb9, 0x0e, 0x67, 0xd4, 0x37, 0x62, 0xb8, 0x73, - 0xd0, 0xe9, 0xe9, 0xbb, 0xc9, 0xb1, 0xdc, 0x1d, 0xa6, 0x05, 0xb0, 0xbf, 0x8d, 0xf1, 0x97, 0x39, - 0x98, 0x4e, 0x6c, 0x37, 0x27, 0x1e, 0xbf, 0xaf, 0x01, 0xe9, 0xda, 0xbe, 0xef, 0xfa, 0x72, 0xcf, - 0xbe, 0xc3, 0xdf, 0x3d, 0xa6, 0x6e, 0x27, 0x88, 0xaa, 0xd8, 0x3b, 0x7d, 0x5c, 0xcc, 0x68, 0x61, - 0xfc, 0x7b, 0x0e, 0xa2, 0x63, 0x02, 0x5d, 0x0a, 0x9e, 0x1b, 0x58, 0x0a, 0xfe, 0x0a, 0x14, 0x1f, - 0x30, 0xd7, 0xd9, 0x8a, 0x0a, 0xc6, 0xf5, 0x80, 0xde, 0xac, 0x6f, 0x6e, 0x08, 0x49, 0x2d, 0x21, - 0xa4, 0xbf, 0xb4, 0x66, 0x77, 0x82, 0xfe, 0xb2, 0xea, 0x9b, 0x9f, 0x95, 0x74, 0xd4, 0x12, 0x64, - 0x09, 0x4a, 0xfa, 0x64, 0x4a, 0x05, 0xfe, 0x7a, 0x10, 0xf4, 0xb1, 0x0c, 0x46, 0x32, 0xe4, 0xe5, - 0xe8, 0xfc, 0x25, 0x9f, 0xcc, 0xc4, 0xa4, 0xcf, 0x60, 0x8c, 0xef, 0x8c, 0x41, 0xf1, 0x19, 0x5e, - 0xd5, 0x6e, 0x24, 0xae, 0x6a, 0x9f, 0xc0, 0xbd, 0xde, 0xac, 0x6b, 0xda, 0xbb, 0xa9, 0x6b, 0xda, - 0x2b, 0x23, 0x9e, 0x8b, 0x3d, 0xf2, 0x8a, 0xf6, 0x3f, 0xe7, 0xe0, 0x4c, 0x28, 0x1a, 0xa5, 0x97, - 0x3f, 0x19, 0xab, 0xa7, 0x2c, 0xd5, 0x5e, 0x4c, 0x95, 0x14, 0x9d, 0xeb, 0x6b, 0x10, 0xab, 0x2f, - 0xfa, 0x82, 0xee, 0xbd, 0x5c, 0x47, 0x6b, 0x49, 0xc5, 0x47, 0x07, 0xd5, 0x63, 0x7d, 0x2f, 0x6a, - 0x51, 0x63, 0x27, 0x3b, 0x1c, 0xaf, 0x6a, 0x19, 0x7f, 0x74, 0x55, 0x8b, 0xf1, 0x83, 0x1c, 0x4c, - 0x3d, 0xc3, 0xab, 0xe7, 0x3b, 0xc9, 0xab, 0xe7, 0x6f, 0x8c, 0x34, 0x6d, 0x03, 0xae, 0x9d, 0xff, - 0xcb, 0x79, 0x48, 0x5c, 0xf9, 0x26, 0x0e, 0x94, 0x42, 0x3b, 0x17, 0x9e, 0xa3, 0xbe, 0x31, 0x52, - 0x2c, 0x1f, 0xbd, 0x9b, 0x21, 0x85, 0x61, 0xa4, 0x82, 0x5c, 0x01, 0xa0, 0xdc, 0xc0, 0xcb, 0x1c, - 0xf5, 0x58, 0xf2, 0x98, 0xf1, 0x9a, 0xe6, 0x60, 0x4c, 0xea, 0xd9, 0xe7, 0x89, 0xb2, 0x3d, 0xab, - 0x89, 0xa7, 0xe2, 0x59, 0x5d, 0x3c, 0x71, 0xcf, 0xea, 0xf9, 0xa7, 0xef, 0x59, 0xc5, 0xe2, 0xc8, - 0xfc, 0x08, 0x71, 0xe4, 0xfb, 0x30, 0x27, 0xff, 0x5d, 0xe9, 0x98, 0x76, 0x57, 0xaf, 0x17, 0x55, - 0x81, 0xfe, 0x72, 0xa6, 0x3f, 0x45, 0x7d, 0x66, 0xb3, 0x80, 0x3a, 0xc1, 0xbd, 0xa8, 0x65, 0x54, - 0x81, 0x77, 0x2f, 0x03, 0x0e, 0x33, 0x95, 0xa4, 0x03, 0x8f, 0xc9, 0x63, 0x04, 0x1e, 0xdf, 0xca, - 0xc1, 0x39, 0x33, 0xeb, 0x2b, 0x38, 0x2a, 0xfd, 0x74, 0x73, 0xa4, 0x30, 0x30, 0x81, 0xa8, 0xc2, - 0xb8, 0x2c, 0x16, 0x66, 0xf7, 0x81, 0xbc, 0x18, 0x65, 0x12, 0x4a, 0x62, 0x51, 0x65, 0xe7, 0x00, - 0xbe, 0x96, 0xce, 0xe0, 0x81, 0x18, 0xed, 0xfa, 0xc8, 0x9b, 0xd1, 0x09, 0x64, 0xf1, 0xca, 0x23, - 0x64, 0xf1, 0x52, 0x51, 0xe1, 0xd4, 0x09, 0x45, 0x85, 0x0e, 0xcc, 0xda, 0x5d, 0xb3, 0x45, 0xb7, - 0x7a, 0x9d, 0x8e, 0x3c, 0xe9, 0x61, 0x95, 0x69, 0x81, 0x9d, 0x79, 0x1f, 0x89, 0x47, 0xe9, 0x9d, - 0xf4, 0x17, 0x0a, 0xf4, 0x99, 0xea, 0x7a, 0x0a, 0x09, 0xfb, 0xb0, 0xf9, 0xb2, 0xe4, 0xd1, 0xc6, - 0x06, 0x0d, 0xf8, 0x68, 0x8b, 0x04, 0x97, 0xfa, 0x4e, 0xd9, 0x8d, 0x88, 0x8c, 0x71, 0x19, 0x72, - 0x0b, 0x4a, 0x96, 0xc3, 0xd4, 0x89, 0xea, 0x8c, 0xb0, 0x52, 0x1f, 0xe3, 0xb6, 0x6d, 0x75, 0xa3, - 0xae, 0xcf, 0x52, 0x2f, 0x66, 0x94, 0x0f, 0x6a, 0x3e, 0x46, 0xed, 0xc9, 0x1d, 0x01, 0xa6, 0x2e, - 0xe7, 0xc9, 0x8c, 0xd4, 0xa5, 0x01, 0x81, 0xcd, 0xea, 0x46, 0x78, 0x97, 0x70, 0x5a, 0xa9, 0x53, - 0x57, 0xee, 0x22, 0x84, 0xd8, 0xb5, 0xef, 0x33, 0x8f, 0xba, 0xf6, 0x4d, 0xee, 0xc2, 0x85, 0x20, - 0xe8, 0x24, 0x8e, 0x29, 0x54, 0x85, 0xa6, 0x28, 0xd7, 0xcd, 0xcb, 0xaf, 0x76, 0x6c, 0x6f, 0xdf, - 0xce, 0x12, 0xc1, 0x41, 0x6d, 0x45, 0xc6, 0x3f, 0xe8, 0xe8, 0xc4, 0xc6, 0xc2, 0x28, 0x19, 0xff, - 0xe8, 0x3c, 0x48, 0x65, 0xfc, 0x23, 0x02, 0xc6, 0xb5, 0x0c, 0x4e, 0xd0, 0x9c, 0x1d, 0x32, 0x41, - 0x13, 0xcf, 0x09, 0xcc, 0x3d, 0x32, 0x27, 0xd0, 0x97, 0xc3, 0x38, 0xf7, 0x04, 0x39, 0x8c, 0x77, - 0x44, 0x69, 0xe8, 0xf5, 0x15, 0x95, 0xff, 0x19, 0xae, 0x56, 0x45, 0xd4, 0xf8, 0xc8, 0x83, 0x7f, - 0xf1, 0x2f, 0x4a, 0x4c, 0xb2, 0x05, 0x73, 0x9e, 0x6b, 0xf5, 0xa5, 0x40, 0x44, 0xc2, 0x27, 0x56, - 0x42, 0xbd, 0x95, 0x21, 0x83, 0x99, 0x2d, 0x85, 0x01, 0x8f, 0xe8, 0x95, 0x8a, 0x18, 0x18, 0x69, - 0xc0, 0x23, 0x32, 0xc6, 0x65, 0xd2, 0x19, 0x81, 0xe7, 0x9e, 0x5a, 0x46, 0x60, 0xfe, 0x19, 0x64, - 0x04, 0x3e, 0x72, 0xec, 0x8c, 0xc0, 0x6f, 0xc3, 0x59, 0xcf, 0xb5, 0x56, 0x6d, 0xe6, 0xf7, 0xc4, - 0x37, 0x12, 0x6b, 0x3d, 0xab, 0x45, 0x03, 0x91, 0x52, 0x28, 0x5f, 0xb9, 0x12, 0xef, 0xa4, 0xfc, - 0x54, 0xeb, 0xa2, 0xfa, 0x54, 0xab, 0x78, 0xc9, 0x53, 0xad, 0x44, 0xd8, 0x21, 0xb2, 0x08, 0x19, - 0x4c, 0xcc, 0xd2, 0x13, 0xcf, 0x22, 0x5c, 0x7a, 0x5a, 0x59, 0x04, 0xf2, 0x36, 0x14, 0x59, 0xbb, - 0x17, 0x58, 0xee, 0x9e, 0x23, 0x72, 0x4b, 0x25, 0xfd, 0xcd, 0xa3, 0x62, 0x5d, 0xd1, 0x8f, 0x0e, - 0xaa, 0xb3, 0xe1, 0xff, 0xb1, 0x2a, 0x34, 0x45, 0x19, 0x3d, 0x0f, 0xf1, 0x5f, 0x53, 0x70, 0x3a, - 0xf5, 0x41, 0x1b, 0x5d, 0xa9, 0x9f, 0x3b, 0x6e, 0xa5, 0x7e, 0xa2, 0x94, 0x7e, 0xec, 0xa9, 0x96, - 0xd2, 0x8f, 0x9f, 0x78, 0x29, 0x7d, 0x2c, 0xb8, 0x9a, 0x78, 0xcc, 0x95, 0x81, 0x65, 0x98, 0x69, - 0xb8, 0x5d, 0x4f, 0x5c, 0xb4, 0x56, 0xa5, 0xd4, 0x32, 0x58, 0xd7, 0x15, 0x17, 0x2b, 0x49, 0x36, - 0xa6, 0xe5, 0xc9, 0x6f, 0x41, 0xde, 0x11, 0x0d, 0x0b, 0x23, 0x5c, 0xbd, 0x4a, 0x4e, 0x98, 0xf0, - 0x61, 0xd4, 0xed, 0xa7, 0xf0, 0x20, 0x22, 0x2f, 0x68, 0x47, 0xe1, 0x3f, 0x28, 0x95, 0x92, 0x77, - 0xa1, 0xe2, 0x36, 0x9b, 0x1d, 0xd7, 0xb4, 0xa2, 0x72, 0xff, 0x7b, 0xdc, 0x3b, 0x55, 0x47, 0x7b, - 0xa5, 0xda, 0x25, 0x05, 0x50, 0xd9, 0x1c, 0x20, 0x87, 0x03, 0x11, 0xb8, 0xab, 0x39, 0x93, 0xbc, - 0x86, 0xc2, 0x2a, 0x25, 0xf1, 0x98, 0xbf, 0x71, 0x12, 0x8f, 0x99, 0xbc, 0xf3, 0xa2, 0x1e, 0x38, - 0xaa, 0x75, 0x49, 0x72, 0x31, 0xdd, 0x13, 0xe2, 0xc3, 0x79, 0x2f, 0xcb, 0x11, 0x67, 0xea, 0x38, - 0xf8, 0x51, 0xe1, 0xc0, 0x82, 0xd2, 0x72, 0x3e, 0xd3, 0x95, 0x67, 0x38, 0x00, 0x39, 0x7e, 0x11, - 0xa0, 0xf8, 0xd4, 0x2e, 0x02, 0x7c, 0x59, 0x7c, 0x16, 0x47, 0x26, 0x0e, 0x42, 0x3f, 0x6f, 0x6d, - 0xa4, 0x01, 0xd7, 0x79, 0x88, 0xe8, 0xe5, 0xd1, 0x24, 0x86, 0x31, 0x6d, 0xe4, 0x67, 0x99, 0xf7, - 0x50, 0xa4, 0x1f, 0xfb, 0xf9, 0x93, 0x98, 0xf4, 0x9f, 0xb7, 0xbb, 0x28, 0xf3, 0xfb, 0xf2, 0xf2, - 0xdb, 0xc0, 0x6b, 0x80, 0x77, 0x93, 0x17, 0x81, 0xdf, 0x1a, 0xf1, 0x32, 0x4e, 0xfc, 0x0a, 0xe2, - 0xef, 0xe6, 0x60, 0x2e, 0xeb, 0x25, 0xc8, 0xe8, 0x45, 0x3d, 0xd9, 0x8b, 0xd1, 0xd2, 0x23, 0xf1, - 0x3e, 0x9c, 0xcc, 0x95, 0x90, 0xaf, 0x17, 0x62, 0x29, 0x9d, 0x80, 0x7a, 0xbf, 0x2c, 0xd3, 0x19, - 0xaa, 0x4c, 0x27, 0xf1, 0x11, 0xb1, 0xfc, 0x33, 0xfc, 0x88, 0x58, 0x61, 0x88, 0x8f, 0x88, 0x4d, - 0x3e, 0xcb, 0x8f, 0x88, 0x15, 0x8f, 0xf9, 0x11, 0xb1, 0xd2, 0xcf, 0xcf, 0x47, 0xc4, 0x3e, 0xcc, - 0xc1, 0x6c, 0xfa, 0x32, 0xe6, 0x33, 0x48, 0xef, 0xef, 0x26, 0xd2, 0xfb, 0xeb, 0x23, 0xd9, 0x6c, - 0x7d, 0x01, 0x74, 0x40, 0x9a, 0xdf, 0xf8, 0x71, 0x0e, 0xfa, 0x2e, 0x9c, 0x3e, 0x83, 0x2c, 0xf5, - 0x83, 0x64, 0x96, 0xfa, 0xda, 0x89, 0x3c, 0xe4, 0x80, 0x6c, 0xf5, 0x9f, 0x66, 0x3c, 0xa2, 0xc8, - 0x5a, 0xbf, 0xff, 0xb4, 0xbe, 0x93, 0x3a, 0x97, 0xf5, 0x9d, 0xd4, 0xe4, 0x77, 0x51, 0x6b, 0x8b, - 0xdf, 0xfb, 0x70, 0xe1, 0xd4, 0x0f, 0x3e, 0x5c, 0x38, 0xf5, 0xc3, 0x0f, 0x17, 0x4e, 0x7d, 0x70, - 0xb8, 0x90, 0xfb, 0xde, 0xe1, 0x42, 0xee, 0x07, 0x87, 0x0b, 0xb9, 0x1f, 0x1e, 0x2e, 0xe4, 0x7e, - 0x7c, 0xb8, 0x90, 0xfb, 0x93, 0x7f, 0x5b, 0x38, 0xf5, 0xf9, 0x62, 0xa8, 0xe0, 0x7f, 0x03, 0x00, - 0x00, 0xff, 0xff, 0x8c, 0x99, 0xd1, 0x6c, 0xd8, 0x63, 0x00, 0x00, + // 6044 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7c, 0x5b, 0x6c, 0x1c, 0xd9, + 0x71, 0xa8, 0x86, 0xe4, 0x0c, 0x67, 0x6a, 0x48, 0x91, 0x3a, 0xa2, 0xa4, 0x59, 0xae, 0x96, 0x23, + 0xf7, 0xde, 0xdd, 0xab, 0xbd, 0x77, 0x4d, 0x7a, 0xb5, 0xde, 0x7b, 0xd7, 0x8f, 0x7d, 0x70, 0x48, + 0x51, 0xa2, 0x1e, 0x24, 0x5d, 0x43, 0x49, 0xb1, 0x77, 0x61, 0xa7, 0x39, 0x7d, 0x66, 0xa6, 0xc5, + 0x99, 0xee, 0xde, 0x3e, 0x3d, 0xe2, 0xd2, 0x9b, 0x20, 0x1b, 0x23, 0x81, 0xf3, 0x80, 0x81, 0x18, + 0x01, 0x0c, 0x03, 0x46, 0x80, 0xc0, 0x3f, 0xf9, 0x49, 0x3e, 0x93, 0x4f, 0x7f, 0x18, 0xf9, 0x30, + 0xfc, 0x13, 0x27, 0x3f, 0x71, 0x82, 0x80, 0xf6, 0x32, 0x48, 0x60, 0xc0, 0x41, 0xfc, 0x91, 0x8f, + 0x00, 0x44, 0x3e, 0x82, 0xf3, 0xe8, 0xd3, 0x8f, 0xe9, 0x91, 0xa8, 0x19, 0x4a, 0x71, 0x62, 0x7f, + 0x91, 0x53, 0x55, 0xa7, 0xea, 0x3c, 0xea, 0xd4, 0xa9, 0xaa, 0x53, 0xa7, 0x61, 0xa5, 0x65, 0x07, + 0xed, 0xde, 0xce, 0x62, 0xc3, 0xed, 0x2e, 0x99, 0x7e, 0xcb, 0xf5, 0x7c, 0xf7, 0xbe, 0xf8, 0x67, + 0xc9, 0xdb, 0x6d, 0x2d, 0x99, 0x9e, 0xcd, 0x96, 0xf6, 0x5c, 0x7f, 0xb7, 0xd9, 0x71, 0xf7, 0x96, + 0x1e, 0xbc, 0x62, 0x76, 0xbc, 0xb6, 0xf9, 0xca, 0x52, 0x8b, 0x3a, 0xd4, 0x37, 0x03, 0x6a, 0x2d, + 0x7a, 0xbe, 0x1b, 0xb8, 0xe4, 0xd5, 0x88, 0xc9, 0x62, 0xc8, 0x44, 0xfc, 0xb3, 0xe8, 0xed, 0xb6, + 0x16, 0x39, 0x93, 0xc5, 0x90, 0xc9, 0x62, 0xc8, 0x64, 0xfe, 0xe3, 0x31, 0xc9, 0x2d, 0x97, 0x0b, + 0xe4, 0xbc, 0x76, 0x7a, 0x4d, 0xf1, 0x4b, 0xfc, 0x10, 0xff, 0x49, 0x19, 0xf3, 0xc6, 0xee, 0xeb, + 0x6c, 0xd1, 0x76, 0x79, 0x97, 0x96, 0x1a, 0xae, 0x4f, 0x97, 0x1e, 0xf4, 0xf5, 0x63, 0xfe, 0xa5, + 0x18, 0x8d, 0xe7, 0x76, 0xec, 0xc6, 0xfe, 0xd2, 0x83, 0x57, 0x76, 0x68, 0xd0, 0xdf, 0xe5, 0xf9, + 0x4f, 0x46, 0xa4, 0x5d, 0xb3, 0xd1, 0xb6, 0x1d, 0xea, 0xef, 0x47, 0x43, 0xee, 0xd2, 0xc0, 0xcc, + 0x12, 0xb0, 0x34, 0xa8, 0x95, 0xdf, 0x73, 0x02, 0xbb, 0x4b, 0xfb, 0x1a, 0xfc, 0xbf, 0x47, 0x35, + 0x60, 0x8d, 0x36, 0xed, 0x9a, 0xe9, 0x76, 0xc6, 0x5f, 0xe5, 0x60, 0x66, 0xd9, 0x6f, 0xb4, 0xed, + 0x07, 0xb4, 0x1e, 0x70, 0x44, 0x6b, 0x9f, 0xbc, 0x03, 0xe3, 0x81, 0xe9, 0x57, 0x72, 0x97, 0x72, + 0x97, 0xcb, 0x57, 0xde, 0x5e, 0x1c, 0x62, 0xce, 0x17, 0xb7, 0x4d, 0x3f, 0x64, 0x57, 0x9b, 0x3c, + 0x3c, 0xa8, 0x8e, 0x6f, 0x9b, 0x3e, 0x72, 0xae, 0xe4, 0x4b, 0x30, 0xe1, 0xb8, 0x0e, 0xad, 0x8c, + 0x09, 0xee, 0xcb, 0x43, 0x71, 0xdf, 0x70, 0x1d, 0xdd, 0xdb, 0x5a, 0xf1, 0xf0, 0xa0, 0x3a, 0xc1, + 0x21, 0x28, 0x18, 0x1b, 0x3f, 0xcb, 0x41, 0x69, 0xd9, 0x6f, 0xf5, 0xba, 0xd4, 0x09, 0x18, 0xf1, + 0x01, 0x3c, 0xd3, 0x37, 0xbb, 0x34, 0xa0, 0x3e, 0xab, 0xe4, 0x2e, 0x8d, 0x5f, 0x2e, 0x5f, 0x79, + 0x73, 0x28, 0xa1, 0x5b, 0x21, 0x9b, 0x1a, 0xf9, 0xde, 0x41, 0xf5, 0xd4, 0xe1, 0x41, 0x15, 0x34, + 0x88, 0x61, 0x4c, 0x0a, 0x71, 0xa0, 0x64, 0xfa, 0x81, 0xdd, 0x34, 0x1b, 0x01, 0xab, 0x8c, 0x09, + 0x91, 0x6f, 0x0c, 0x25, 0x72, 0x59, 0x71, 0xa9, 0x9d, 0x51, 0x12, 0x4b, 0x21, 0x84, 0x61, 0x24, + 0xc2, 0xf8, 0xe9, 0x38, 0x14, 0x43, 0x04, 0xb9, 0x04, 0x13, 0x8e, 0xd9, 0xa5, 0x62, 0xf5, 0x4a, + 0xb5, 0x29, 0xd5, 0x70, 0x62, 0xc3, 0xec, 0xf2, 0x09, 0x32, 0xbb, 0x94, 0x53, 0x78, 0x66, 0xd0, + 0x16, 0x2b, 0x10, 0xa3, 0xd8, 0x32, 0x83, 0x36, 0x0a, 0x0c, 0xb9, 0x08, 0x13, 0x5d, 0xd7, 0xa2, + 0x95, 0xf1, 0x4b, 0xb9, 0xcb, 0x79, 0x39, 0xc1, 0xb7, 0x5d, 0x8b, 0xa2, 0x80, 0xf2, 0xf6, 0x4d, + 0xdf, 0xed, 0x56, 0x26, 0x92, 0xed, 0xd7, 0x7c, 0xb7, 0x8b, 0x02, 0x43, 0x7e, 0x3f, 0x07, 0xb3, + 0x61, 0xf7, 0x6e, 0xb9, 0x0d, 0x33, 0xb0, 0x5d, 0xa7, 0x92, 0x17, 0x0b, 0x7e, 0x75, 0xa4, 0x89, + 0x08, 0x99, 0xd5, 0x2a, 0x4a, 0xea, 0x6c, 0x1a, 0x83, 0x7d, 0x82, 0xc9, 0x15, 0x80, 0x56, 0xc7, + 0xdd, 0x31, 0x3b, 0x7c, 0x0e, 0x2a, 0x05, 0xd1, 0x6b, 0xbd, 0x84, 0xd7, 0x34, 0x06, 0x63, 0x54, + 0x64, 0x17, 0x26, 0x4d, 0xb9, 0x2b, 0x2a, 0x93, 0xa2, 0xdf, 0xab, 0x43, 0xf6, 0x3b, 0xb1, 0xb3, + 0x6a, 0xe5, 0xc3, 0x83, 0xea, 0xa4, 0x02, 0x62, 0x28, 0x81, 0xbc, 0x0c, 0x45, 0xd7, 0xe3, 0x5d, + 0x35, 0x3b, 0x95, 0xe2, 0xa5, 0xdc, 0xe5, 0x62, 0x6d, 0x56, 0x75, 0xaf, 0xb8, 0xa9, 0xe0, 0xa8, + 0x29, 0x8c, 0xbf, 0x2e, 0x40, 0xdf, 0xa8, 0xc9, 0x2b, 0x50, 0x56, 0xdc, 0x6e, 0xb9, 0x2d, 0x26, + 0x16, 0xbf, 0x58, 0x9b, 0x39, 0x3c, 0xa8, 0x96, 0x97, 0x23, 0x30, 0xc6, 0x69, 0xc8, 0x3d, 0x18, + 0x63, 0xaf, 0xaa, 0x6d, 0xf8, 0xd6, 0x50, 0xa3, 0xab, 0xbf, 0xaa, 0x15, 0xb4, 0x70, 0x78, 0x50, + 0x1d, 0xab, 0xbf, 0x8a, 0x63, 0xec, 0x55, 0x6e, 0x3e, 0x5a, 0x76, 0x20, 0x94, 0x67, 0x58, 0xf3, + 0x71, 0xcd, 0x0e, 0x34, 0x6b, 0x61, 0x3e, 0xae, 0xd9, 0x01, 0x72, 0xae, 0xdc, 0x7c, 0xb4, 0x83, + 0xc0, 0x13, 0xca, 0x37, 0xac, 0xf9, 0xb8, 0xbe, 0xbd, 0xbd, 0xa5, 0xd9, 0x0b, 0xed, 0xe6, 0x10, + 0x14, 0x8c, 0xc9, 0x07, 0x7c, 0x26, 0x25, 0xce, 0xf5, 0xf7, 0x95, 0xd6, 0x5e, 0x1f, 0x49, 0x6b, + 0x5d, 0x7f, 0x5f, 0x8b, 0x53, 0x6b, 0xa2, 0x11, 0x18, 0x97, 0x26, 0x46, 0x67, 0x35, 0x99, 0x50, + 0xd2, 0xa1, 0x47, 0xb7, 0xba, 0x56, 0x4f, 0x8d, 0x6e, 0x75, 0xad, 0x8e, 0x82, 0x31, 0x5f, 0x1b, + 0xdf, 0xdc, 0x53, 0x3a, 0x3d, 0xdc, 0xda, 0xa0, 0xb9, 0x97, 0x5c, 0x1b, 0x34, 0xf7, 0x90, 0x73, + 0xe5, 0xcc, 0x5d, 0xc6, 0x84, 0x0a, 0x0f, 0xcb, 0x7c, 0xb3, 0x5e, 0x4f, 0x32, 0xdf, 0xac, 0xd7, + 0x91, 0x73, 0x15, 0x5a, 0xd5, 0x60, 0x95, 0xd2, 0x28, 0x5a, 0xb5, 0x92, 0x62, 0x7e, 0x6d, 0xa5, + 0x8e, 0x9c, 0xab, 0xd1, 0x82, 0x73, 0x21, 0x06, 0xa9, 0xe7, 0x32, 0x5b, 0x2c, 0x0d, 0x6d, 0x92, + 0x25, 0x28, 0x35, 0x5c, 0xa7, 0x69, 0xb7, 0x6e, 0x9b, 0x9e, 0x32, 0xa9, 0xda, 0x16, 0xaf, 0x84, + 0x08, 0x8c, 0x68, 0xc8, 0x73, 0x30, 0xbe, 0x4b, 0xf7, 0x95, 0x6d, 0x2d, 0x2b, 0xd2, 0xf1, 0x9b, + 0x74, 0x1f, 0x39, 0xdc, 0xf8, 0x4e, 0x0e, 0xce, 0x66, 0xa8, 0x05, 0x6f, 0xd6, 0xf3, 0x3b, 0x4a, + 0x82, 0x6e, 0x76, 0x07, 0x6f, 0x21, 0x87, 0x93, 0xaf, 0xe6, 0x60, 0x26, 0xa6, 0x27, 0xcb, 0x3d, + 0x65, 0xbe, 0x87, 0xb7, 0x4b, 0x09, 0x5e, 0xb5, 0x0b, 0x4a, 0xe2, 0x4c, 0x0a, 0x81, 0x69, 0xa9, + 0xc6, 0xdf, 0x0a, 0x7f, 0x21, 0x01, 0x23, 0x26, 0x9c, 0xee, 0x31, 0xea, 0xf3, 0xc3, 0xa5, 0x4e, + 0x1b, 0x3e, 0x0d, 0x94, 0xeb, 0xf0, 0xc2, 0xa2, 0x74, 0x4a, 0x78, 0x2f, 0x16, 0xb9, 0x2b, 0xb5, + 0xf8, 0xe0, 0x95, 0x45, 0x49, 0x71, 0x93, 0xee, 0xd7, 0x69, 0x87, 0x72, 0x1e, 0x35, 0x72, 0x78, + 0x50, 0x3d, 0x7d, 0x27, 0xc1, 0x00, 0x53, 0x0c, 0xb9, 0x08, 0xcf, 0x64, 0x6c, 0xcf, 0xf5, 0x2d, + 0x25, 0x62, 0xec, 0xb1, 0x45, 0x6c, 0x25, 0x18, 0x60, 0x8a, 0xa1, 0xf1, 0x8d, 0x1c, 0x4c, 0xd6, + 0xcc, 0xc6, 0xae, 0xdb, 0x6c, 0x72, 0x8b, 0x6c, 0xf5, 0x7c, 0x79, 0x6e, 0xc9, 0x35, 0xd1, 0x16, + 0x79, 0x55, 0xc1, 0x51, 0x53, 0x90, 0x17, 0xa1, 0x20, 0xa7, 0x43, 0x74, 0x2a, 0x5f, 0x3b, 0xad, + 0x68, 0x0b, 0x6b, 0x02, 0x8a, 0x0a, 0x4b, 0x5e, 0x83, 0x72, 0xd7, 0x7c, 0x3f, 0x64, 0x20, 0x0c, + 0x64, 0xa9, 0x76, 0x56, 0x11, 0x97, 0x6f, 0x47, 0x28, 0x8c, 0xd3, 0x19, 0x3f, 0xc9, 0xc1, 0x85, + 0x95, 0x4e, 0x8f, 0x05, 0xd4, 0xbf, 0xa7, 0x56, 0x72, 0x9b, 0x76, 0xbd, 0x8e, 0x19, 0x50, 0xf2, + 0xab, 0x50, 0xe4, 0x2e, 0xa4, 0x65, 0x06, 0xa6, 0x9a, 0xf4, 0x4f, 0xc4, 0x66, 0x44, 0x7b, 0x82, + 0x91, 0x2e, 0x70, 0x6a, 0x3e, 0x47, 0x9b, 0x3b, 0xf7, 0x69, 0x23, 0xb8, 0x4d, 0x03, 0x33, 0x3a, + 0x0b, 0x23, 0x18, 0x6a, 0xae, 0x64, 0x17, 0x26, 0x98, 0x47, 0x1b, 0x6a, 0xbe, 0xd7, 0x87, 0x52, + 0xb7, 0x74, 0xb7, 0xeb, 0x1e, 0x6d, 0x44, 0x8e, 0x03, 0xff, 0x85, 0x42, 0x88, 0xf1, 0xaf, 0x39, + 0x78, 0x76, 0xc0, 0x50, 0x6f, 0xd9, 0x2c, 0x20, 0xef, 0xf6, 0x0d, 0x77, 0xf1, 0x78, 0xc3, 0xe5, + 0xad, 0xc5, 0x60, 0xf5, 0x3a, 0x86, 0x90, 0xd8, 0x50, 0xdf, 0x83, 0xbc, 0x1d, 0xd0, 0x6e, 0xe8, + 0xb3, 0xdd, 0x1a, 0x6a, 0xac, 0x03, 0xba, 0x5f, 0x9b, 0x56, 0x82, 0xf3, 0xeb, 0x5c, 0x04, 0x4a, + 0x49, 0xc6, 0xe7, 0x01, 0x56, 0x5c, 0x27, 0xb0, 0x9d, 0x1e, 0xdd, 0x74, 0xc8, 0xf3, 0x90, 0xa7, + 0xbe, 0xef, 0xfa, 0xea, 0xfc, 0xd6, 0x4d, 0xae, 0x72, 0x20, 0x4a, 0x9c, 0xd4, 0x36, 0xbb, 0x43, + 0x2d, 0xb1, 0x24, 0xc5, 0xb8, 0xb6, 0x71, 0x28, 0x2a, 0xac, 0xb1, 0x08, 0x93, 0x2b, 0x6e, 0xcf, + 0x09, 0xa8, 0xcf, 0xf9, 0x3e, 0x30, 0x3b, 0xbd, 0xd0, 0x29, 0xd4, 0x7c, 0xef, 0x72, 0x20, 0x4a, + 0x9c, 0xf1, 0xfd, 0x31, 0x98, 0x5a, 0xf1, 0x5d, 0x27, 0xec, 0xf9, 0x53, 0xd0, 0xad, 0x56, 0x42, + 0xb7, 0x86, 0x73, 0x0d, 0xe3, 0x5d, 0x1e, 0xa4, 0x57, 0xc4, 0x85, 0x02, 0x0b, 0xcc, 0xa0, 0xc7, + 0x94, 0x57, 0x72, 0x6d, 0x74, 0x51, 0x82, 0x5d, 0x34, 0xf9, 0xf2, 0x37, 0x2a, 0x31, 0xc6, 0x0f, + 0x73, 0x30, 0x1b, 0x27, 0x7f, 0x0a, 0xda, 0xdb, 0x4c, 0x6a, 0xef, 0xf2, 0xc8, 0x43, 0x1c, 0xa0, + 0xb2, 0xff, 0x91, 0x4f, 0x0e, 0x8d, 0x4f, 0x33, 0xf7, 0xf8, 0xa7, 0xf6, 0x62, 0x00, 0x35, 0xbe, + 0xe5, 0x91, 0xcc, 0x85, 0x58, 0xce, 0xff, 0xa5, 0x3a, 0x31, 0x15, 0x87, 0x1e, 0xa5, 0x7e, 0x63, + 0x42, 0x38, 0x37, 0xdf, 0x3c, 0xdc, 0xb5, 0x7a, 0x1d, 0xaa, 0x4e, 0x62, 0x3d, 0x71, 0x75, 0x05, + 0x47, 0x4d, 0x41, 0xde, 0x85, 0x33, 0x0d, 0xd7, 0x69, 0xf4, 0x7c, 0x9f, 0x3a, 0x8d, 0xfd, 0x2d, + 0x11, 0xce, 0x2b, 0xe3, 0xbc, 0xa8, 0x9a, 0x9d, 0x59, 0x49, 0x13, 0x1c, 0x65, 0x01, 0xb1, 0x9f, + 0x11, 0x79, 0x09, 0x26, 0x59, 0x8f, 0x79, 0xd4, 0xb1, 0x84, 0xcf, 0x5a, 0xac, 0xcd, 0x28, 0x9e, + 0x93, 0x75, 0x09, 0xc6, 0x10, 0x4f, 0xee, 0xc0, 0x05, 0x16, 0xf0, 0x03, 0xd7, 0x69, 0xad, 0x52, + 0xd3, 0xea, 0xd8, 0x0e, 0x3f, 0xfe, 0x5c, 0xc7, 0x62, 0xc2, 0x0d, 0x1d, 0xaf, 0x3d, 0x7b, 0x78, + 0x50, 0xbd, 0x50, 0xcf, 0x26, 0xc1, 0x41, 0x6d, 0xc9, 0x17, 0x61, 0x9e, 0xf5, 0x1a, 0x0d, 0xca, + 0x58, 0xb3, 0xd7, 0xb9, 0xe1, 0xee, 0xb0, 0xeb, 0x36, 0xe3, 0x67, 0xf7, 0x2d, 0xbb, 0x6b, 0x07, + 0xc2, 0xd5, 0xcc, 0xd7, 0x16, 0x0e, 0x0f, 0xaa, 0xf3, 0xf5, 0x81, 0x54, 0xf8, 0x10, 0x0e, 0x04, + 0xe1, 0xbc, 0x34, 0x39, 0x7d, 0xbc, 0x27, 0x05, 0xef, 0xf9, 0xc3, 0x83, 0xea, 0xf9, 0xb5, 0x4c, + 0x0a, 0x1c, 0xd0, 0x92, 0xaf, 0x60, 0x60, 0x77, 0xe9, 0x97, 0x5d, 0x87, 0x0a, 0x7f, 0x32, 0xb6, + 0x82, 0xdb, 0x0a, 0x8e, 0x9a, 0x82, 0xdc, 0x8f, 0x94, 0x8f, 0x6f, 0x0a, 0xe5, 0x24, 0x3e, 0xbe, + 0xb5, 0x9a, 0xe3, 0x11, 0xe5, 0xbd, 0x18, 0x27, 0xbe, 0xb1, 0x30, 0xc1, 0xdb, 0xf8, 0x9b, 0x1c, + 0x90, 0x7e, 0x43, 0x40, 0x6e, 0x42, 0xc1, 0x6c, 0x04, 0x3c, 0x5e, 0x94, 0x39, 0x86, 0xe7, 0xb3, + 0x1c, 0x13, 0x29, 0x0a, 0x69, 0x93, 0x72, 0x0d, 0xa1, 0x91, 0xf5, 0x58, 0x16, 0x4d, 0x51, 0xb1, + 0x20, 0x2e, 0x9c, 0xe9, 0x98, 0x2c, 0x08, 0x75, 0xd5, 0xe2, 0x43, 0x56, 0x46, 0xf2, 0xff, 0x1c, + 0x6f, 0x50, 0xbc, 0x45, 0xed, 0x1c, 0xd7, 0xdc, 0x5b, 0x69, 0x46, 0xd8, 0xcf, 0xdb, 0xf8, 0x6e, + 0x01, 0x26, 0x57, 0x97, 0xaf, 0x6d, 0x9b, 0x6c, 0xf7, 0x18, 0x09, 0x04, 0xbe, 0x38, 0xea, 0x58, + 0x4b, 0x6f, 0xaf, 0xf0, 0xb8, 0x43, 0x4d, 0x41, 0x5c, 0x28, 0x99, 0x61, 0x3a, 0x46, 0x99, 0xdf, + 0x37, 0x87, 0x74, 0x5a, 0x15, 0x97, 0x78, 0x3a, 0x44, 0x81, 0x30, 0x92, 0x41, 0x18, 0x94, 0x43, + 0xe1, 0x48, 0x9b, 0x2a, 0x52, 0x1c, 0x32, 0x8d, 0x15, 0xf1, 0x91, 0x91, 0x5b, 0x0c, 0x80, 0x71, + 0x29, 0xe4, 0x93, 0x30, 0x65, 0x51, 0xbe, 0x8b, 0xa9, 0xd3, 0xb0, 0x29, 0xdf, 0xb0, 0xe3, 0x7c, + 0x5e, 0xb8, 0xe1, 0x5a, 0x8d, 0xc1, 0x31, 0x41, 0x45, 0xee, 0x43, 0x69, 0xcf, 0x0e, 0xda, 0xc2, + 0xbe, 0x56, 0x0a, 0x42, 0x71, 0x3e, 0x35, 0x54, 0x47, 0x39, 0x87, 0x68, 0x5a, 0xee, 0x85, 0x3c, + 0x31, 0x62, 0xcf, 0x43, 0x19, 0xfe, 0x43, 0xe4, 0xac, 0xc4, 0xce, 0x2c, 0x25, 0x1b, 0x08, 0x04, + 0x46, 0x34, 0x84, 0xc1, 0x14, 0xff, 0x51, 0xa7, 0xef, 0xf5, 0xb8, 0xb6, 0xaa, 0xb8, 0x6e, 0xb8, + 0x4c, 0x56, 0xc8, 0x44, 0xce, 0xc8, 0xbd, 0x18, 0x5b, 0x4c, 0x08, 0xe1, 0xda, 0xb7, 0xd7, 0xa6, + 0x8e, 0xd8, 0xc2, 0x31, 0xed, 0xbb, 0xd7, 0xa6, 0x0e, 0x0a, 0x0c, 0x71, 0x01, 0x1a, 0xda, 0x65, + 0xaa, 0xc0, 0x08, 0xf9, 0x8b, 0xc8, 0xf3, 0xaa, 0x9d, 0xe6, 0x3e, 0x4a, 0xf4, 0x1b, 0x63, 0x22, + 0xb8, 0xc3, 0xe5, 0x3a, 0x57, 0xdf, 0xb7, 0x83, 0x4a, 0x59, 0x74, 0x4a, 0xef, 0xda, 0x4d, 0x01, + 0x45, 0x85, 0x35, 0xbe, 0x9b, 0x83, 0x32, 0xdf, 0x44, 0xa1, 0xe2, 0xbf, 0x08, 0x85, 0xc0, 0xf4, + 0x5b, 0x2a, 0x1c, 0x8a, 0xb5, 0xdb, 0x16, 0x50, 0x54, 0x58, 0x62, 0x42, 0x3e, 0x30, 0xd9, 0x6e, + 0x78, 0x70, 0x7f, 0x76, 0xa8, 0xb1, 0xa8, 0xdd, 0x1b, 0x9d, 0xd9, 0xfc, 0x17, 0x43, 0xc9, 0x99, + 0x5c, 0x86, 0x22, 0x37, 0xb4, 0x6b, 0x26, 0x93, 0x79, 0x99, 0x62, 0x6d, 0x8a, 0xef, 0xd6, 0x35, + 0x05, 0x43, 0x8d, 0x35, 0xde, 0x85, 0xd3, 0x57, 0xdf, 0xa7, 0x8d, 0x5e, 0xe0, 0xfa, 0x32, 0xbe, + 0x25, 0x37, 0x80, 0x30, 0xea, 0x3f, 0xb0, 0x1b, 0x74, 0xb9, 0xd1, 0xe0, 0x0e, 0xe5, 0x46, 0x64, + 0x1d, 0xe6, 0x95, 0x34, 0x52, 0xef, 0xa3, 0xc0, 0x8c, 0x56, 0xc6, 0x1f, 0xe5, 0xa0, 0x1c, 0x8b, + 0xc2, 0xb9, 0x6d, 0x68, 0xad, 0xd4, 0x6b, 0xbd, 0xc6, 0xae, 0x0e, 0x1a, 0xdf, 0x1c, 0x36, 0xb4, + 0x97, 0x5c, 0x22, 0x9d, 0xd6, 0x20, 0x8c, 0x64, 0x3c, 0x2a, 0x3c, 0xff, 0x8b, 0x1c, 0x44, 0xed, + 0xf8, 0x02, 0xee, 0x44, 0x5d, 0x8b, 0x2d, 0xa0, 0xe2, 0xab, 0xb0, 0xe4, 0xc3, 0x1c, 0x5c, 0x48, + 0x0e, 0x56, 0xc4, 0x9e, 0x8f, 0x1f, 0xa6, 0x56, 0x95, 0x80, 0x0b, 0xf5, 0x6c, 0x6e, 0x38, 0x48, + 0x8c, 0x71, 0x17, 0xf2, 0xd7, 0xcc, 0x5e, 0x8b, 0x1e, 0xcb, 0xd5, 0xe7, 0xea, 0xe0, 0x53, 0xb3, + 0x13, 0x84, 0xc7, 0x8a, 0x52, 0x07, 0x54, 0x30, 0xd4, 0x58, 0xe3, 0x4f, 0x27, 0xa0, 0x1c, 0x4b, + 0xc6, 0xf1, 0xed, 0xe9, 0x53, 0xcf, 0x4d, 0x1f, 0x0e, 0x48, 0x3d, 0x17, 0x05, 0x86, 0x1f, 0x0e, + 0x3e, 0x7d, 0x60, 0x33, 0x1e, 0xe1, 0xa6, 0x0e, 0x07, 0x54, 0x70, 0xd4, 0x14, 0xa4, 0x0a, 0x79, + 0x8b, 0x7a, 0x41, 0x5b, 0x68, 0xe5, 0x44, 0xad, 0xc4, 0xbb, 0xba, 0xca, 0x01, 0x28, 0xe1, 0x9c, + 0xa0, 0x49, 0x83, 0x46, 0xbb, 0x32, 0x21, 0x0c, 0xaa, 0x20, 0x58, 0xe3, 0x00, 0x94, 0xf0, 0x8c, + 0xe4, 0x43, 0xfe, 0xc9, 0x27, 0x1f, 0x0a, 0x27, 0x9c, 0x7c, 0x20, 0x1e, 0x9c, 0x65, 0xac, 0xbd, + 0xe5, 0xdb, 0x0f, 0xcc, 0x80, 0x46, 0xda, 0x33, 0xf9, 0x38, 0x72, 0x2e, 0x1c, 0x1e, 0x54, 0xcf, + 0xd6, 0xeb, 0xd7, 0xd3, 0x5c, 0x30, 0x8b, 0x35, 0xa9, 0xc3, 0x39, 0xdb, 0x61, 0xb4, 0xd1, 0xf3, + 0xe9, 0x7a, 0xcb, 0x71, 0x7d, 0x7a, 0xdd, 0x65, 0x9c, 0x9d, 0xca, 0x40, 0x3f, 0xa7, 0x16, 0xed, + 0xdc, 0x7a, 0x16, 0x11, 0x66, 0xb7, 0x35, 0xbe, 0x9f, 0x83, 0xa9, 0x78, 0xfe, 0x91, 0x30, 0x80, + 0xf6, 0xea, 0x5a, 0x5d, 0x9a, 0x12, 0xb5, 0xc3, 0xdf, 0x1a, 0x3a, 0xad, 0x29, 0xd9, 0x44, 0x41, + 0x65, 0x04, 0xc3, 0x98, 0x98, 0x63, 0x5c, 0x70, 0x3c, 0x0f, 0xf9, 0xa6, 0xeb, 0x37, 0xa8, 0x32, + 0x86, 0x7a, 0x97, 0xac, 0x71, 0x20, 0x4a, 0x9c, 0xf1, 0x93, 0x1c, 0xc4, 0x24, 0x90, 0xdf, 0x80, + 0x69, 0x2e, 0xe3, 0xa6, 0xbf, 0x93, 0x18, 0x4d, 0x6d, 0xe8, 0xd1, 0x68, 0x4e, 0xb5, 0x73, 0x4a, + 0xfe, 0x74, 0x02, 0x8c, 0x49, 0x79, 0xe4, 0xff, 0x42, 0xc9, 0xb4, 0x2c, 0x9f, 0x32, 0x46, 0xe5, + 0x59, 0x51, 0xaa, 0x4d, 0x0b, 0x27, 0x28, 0x04, 0x62, 0x84, 0xe7, 0xdb, 0xb0, 0x6d, 0x35, 0x19, + 0xd7, 0x6c, 0x15, 0xcb, 0xe8, 0x6d, 0xc8, 0x85, 0x70, 0x38, 0x6a, 0x0a, 0xe3, 0x6b, 0x13, 0x90, + 0x94, 0x4d, 0x2c, 0x98, 0xd9, 0xf5, 0x77, 0x56, 0x56, 0xcc, 0x46, 0x7b, 0xa8, 0xa4, 0xde, 0xd9, + 0xc3, 0x83, 0xea, 0xcc, 0xcd, 0x24, 0x07, 0x4c, 0xb3, 0x54, 0x52, 0x6e, 0xd2, 0xfd, 0xc0, 0xdc, + 0x19, 0xc6, 0x60, 0x86, 0x52, 0xe2, 0x1c, 0x30, 0xcd, 0x92, 0xbc, 0x06, 0xe5, 0x5d, 0x7f, 0x27, + 0xdc, 0xe4, 0xe9, 0xbc, 0xdb, 0xcd, 0x08, 0x85, 0x71, 0x3a, 0x3e, 0x85, 0xbb, 0xfe, 0x0e, 0x37, + 0x8a, 0xe1, 0x5d, 0x97, 0x9e, 0xc2, 0x9b, 0x0a, 0x8e, 0x9a, 0x82, 0x78, 0x40, 0x76, 0xc3, 0xd9, + 0xd3, 0x99, 0x61, 0x65, 0x8b, 0x2e, 0x67, 0x8d, 0x46, 0x13, 0xc5, 0x07, 0x74, 0x9e, 0x1f, 0xa6, + 0x37, 0xfb, 0xf8, 0x60, 0x06, 0x6f, 0xf2, 0x79, 0xb8, 0xb0, 0xeb, 0xef, 0xa8, 0xa3, 0x62, 0xcb, + 0xb7, 0x9d, 0x86, 0xed, 0x25, 0x2e, 0xb9, 0xf4, 0x71, 0x72, 0x33, 0x9b, 0x0c, 0x07, 0xb5, 0x37, + 0x3e, 0x0e, 0x53, 0xf1, 0x4b, 0x92, 0x47, 0xa4, 0xa7, 0x8d, 0x7b, 0x50, 0x12, 0xd1, 0x5b, 0x8b, + 0xbb, 0x8d, 0xfa, 0x04, 0x1a, 0x7f, 0xc8, 0x09, 0xf4, 0x02, 0x4c, 0xca, 0xc3, 0x93, 0x09, 0xc3, + 0x9e, 0x93, 0x37, 0x63, 0xf2, 0x5c, 0x65, 0x18, 0xe2, 0x8c, 0x7f, 0xc9, 0x41, 0x61, 0xdd, 0xf1, + 0x7a, 0xbf, 0x20, 0x17, 0xb9, 0xdf, 0x9e, 0x80, 0x09, 0xee, 0xac, 0x93, 0xcb, 0x30, 0x11, 0xec, + 0x7b, 0xf2, 0x10, 0x1f, 0xaf, 0xcd, 0x85, 0x16, 0x6c, 0x7b, 0xdf, 0xa3, 0x47, 0xea, 0x2f, 0x0a, + 0x0a, 0xf2, 0x26, 0x14, 0x9c, 0x5e, 0xf7, 0xae, 0xd9, 0x51, 0xd6, 0xee, 0xc5, 0xd0, 0x47, 0xd9, + 0x10, 0xd0, 0xa3, 0x83, 0xea, 0x1c, 0x75, 0x1a, 0xae, 0x65, 0x3b, 0xad, 0xa5, 0xfb, 0xcc, 0x75, + 0x16, 0x37, 0x7a, 0xdd, 0x1d, 0xea, 0xa3, 0x6a, 0x45, 0x5e, 0x82, 0xc9, 0x1d, 0xd7, 0xed, 0x70, + 0x06, 0xe3, 0xc9, 0xf4, 0x44, 0x4d, 0x82, 0x31, 0xc4, 0x73, 0x77, 0x88, 0x05, 0x3e, 0xa7, 0x9c, + 0x48, 0xba, 0x43, 0x75, 0x01, 0x45, 0x85, 0x25, 0x5d, 0x28, 0x74, 0x4d, 0x8f, 0xd3, 0xe5, 0xc5, + 0x94, 0x5d, 0x1d, 0x3a, 0xa2, 0x59, 0xbc, 0x2d, 0xf8, 0x5c, 0x75, 0x02, 0x7f, 0x3f, 0x12, 0x27, + 0x81, 0xa8, 0x84, 0x10, 0x1b, 0x26, 0x3b, 0x36, 0x0b, 0xb8, 0xbc, 0xc2, 0x08, 0x5a, 0xc1, 0xe5, + 0x09, 0x15, 0x8d, 0x66, 0xe0, 0x96, 0x64, 0x8b, 0x21, 0xff, 0xf9, 0x7d, 0x28, 0xc7, 0x7a, 0x44, + 0x66, 0xa5, 0x33, 0x29, 0x76, 0x85, 0xf0, 0x1f, 0xc9, 0x76, 0xa8, 0xfb, 0x63, 0x23, 0xf8, 0xb2, + 0xba, 0x27, 0x6a, 0xb3, 0x7c, 0x7a, 0xec, 0xf5, 0xdc, 0xa7, 0x8b, 0xdf, 0xfc, 0xe3, 0xea, 0xa9, + 0x0f, 0xff, 0xe1, 0xd2, 0x29, 0xe3, 0x2f, 0xc7, 0xa1, 0xa4, 0x49, 0xfe, 0x7b, 0x6b, 0x8a, 0x9f, + 0xd2, 0x94, 0x1b, 0xa3, 0xcd, 0xd7, 0xb1, 0xd4, 0x65, 0x39, 0xa9, 0x2e, 0x53, 0xb5, 0xff, 0x1d, + 0x5b, 0xea, 0xa3, 0x83, 0x6a, 0x25, 0x39, 0x09, 0x68, 0xee, 0xdd, 0xa6, 0x8c, 0x99, 0x2d, 0x1a, + 0xa9, 0xc1, 0xa7, 0x1e, 0xa5, 0x06, 0x73, 0x71, 0x35, 0x28, 0x65, 0x2f, 0x63, 0x07, 0x26, 0x6e, + 0xd9, 0xce, 0x71, 0xd2, 0x2d, 0xcf, 0x43, 0x9e, 0x35, 0x5c, 0x2f, 0xcc, 0xb5, 0x68, 0x83, 0x5a, + 0xe7, 0x40, 0x94, 0xb8, 0xd0, 0x42, 0x8f, 0x0f, 0xb0, 0xd0, 0x1f, 0x8e, 0x43, 0x31, 0x4c, 0x68, + 0x91, 0xdf, 0xce, 0x41, 0xd9, 0x74, 0x1c, 0x37, 0x10, 0xf7, 0x4b, 0xa1, 0x31, 0xdd, 0x18, 0x6a, + 0xf2, 0x43, 0xa6, 0x8b, 0xcb, 0x11, 0x43, 0xb9, 0x00, 0xfa, 0x80, 0x8d, 0x61, 0x30, 0x2e, 0x97, + 0xbc, 0x07, 0x85, 0x8e, 0xb9, 0x43, 0x3b, 0xa1, 0x6d, 0x5d, 0x1f, 0xad, 0x07, 0xb7, 0x04, 0xaf, + 0xd4, 0xea, 0x4b, 0x20, 0x2a, 0x41, 0xf3, 0x6f, 0xc2, 0x6c, 0xba, 0xa3, 0x8f, 0xb3, 0x7e, 0x7c, + 0xe9, 0x63, 0x62, 0x1e, 0xa7, 0xa9, 0xf1, 0x39, 0x28, 0xdf, 0xa6, 0x81, 0x6f, 0x37, 0x04, 0x83, + 0x30, 0x12, 0xcd, 0x65, 0x47, 0xa2, 0xd1, 0x29, 0x3a, 0xf6, 0x90, 0x2b, 0x9b, 0x2f, 0xc3, 0xa4, + 0x64, 0xc9, 0x88, 0x0b, 0xe0, 0xf9, 0x6e, 0x97, 0x06, 0x6d, 0xda, 0x0b, 0x57, 0x74, 0x38, 0x47, + 0x7b, 0x4b, 0xb3, 0x91, 0x59, 0x91, 0xe8, 0x37, 0xc6, 0x44, 0x18, 0x7f, 0x36, 0x05, 0xb0, 0xe1, + 0x5a, 0x54, 0xe5, 0x3f, 0xe7, 0x61, 0xcc, 0xb6, 0xd4, 0x68, 0x40, 0x75, 0x76, 0x6c, 0x7d, 0x15, + 0xc7, 0x6c, 0x4b, 0xab, 0xf8, 0xd8, 0x40, 0x15, 0x7f, 0x0d, 0xca, 0x96, 0xcd, 0xbc, 0x8e, 0xb9, + 0xbf, 0x91, 0xe1, 0xa1, 0xad, 0x46, 0x28, 0x8c, 0xd3, 0x91, 0x97, 0x95, 0xf1, 0x93, 0x56, 0xa6, + 0x92, 0x32, 0x7e, 0x45, 0xde, 0xbd, 0x98, 0x01, 0x7c, 0x1d, 0xa6, 0xc2, 0x8c, 0x9d, 0x90, 0x92, + 0x17, 0xad, 0x42, 0x93, 0x39, 0xb5, 0x1d, 0xc3, 0x61, 0x82, 0x32, 0x9d, 0x51, 0x2c, 0x3c, 0x95, + 0x8c, 0xe2, 0x2a, 0xcc, 0xb2, 0xc0, 0xf5, 0xa9, 0x15, 0x52, 0xac, 0xaf, 0x56, 0x48, 0x62, 0xa0, + 0xb3, 0xf5, 0x14, 0x1e, 0xfb, 0x5a, 0x90, 0x2d, 0x98, 0xdb, 0x4b, 0x5d, 0x45, 0x8a, 0xc1, 0x9f, + 0x15, 0x9c, 0x2e, 0x2a, 0x4e, 0x73, 0xf7, 0x32, 0x68, 0x30, 0xb3, 0x25, 0xf9, 0x0c, 0x4c, 0x87, + 0xdd, 0x14, 0x16, 0xa8, 0x32, 0x27, 0x58, 0xe9, 0x18, 0x66, 0x3b, 0x8e, 0xc4, 0x24, 0x2d, 0xf9, + 0x04, 0xe4, 0xbd, 0xb6, 0xc9, 0xa8, 0x4a, 0x40, 0x86, 0xf9, 0xa3, 0xfc, 0x16, 0x07, 0x1e, 0x1d, + 0x54, 0x4b, 0x7c, 0xcd, 0xc4, 0x0f, 0x94, 0x84, 0xe4, 0x0a, 0xc0, 0x8e, 0xdb, 0x73, 0x2c, 0xd3, + 0xdf, 0x5f, 0x5f, 0x55, 0x77, 0x01, 0xda, 0x6f, 0xab, 0x69, 0x0c, 0xc6, 0xa8, 0xf8, 0x51, 0xd5, + 0x95, 0x46, 0x5b, 0xe5, 0x11, 0xf5, 0x51, 0xa5, 0x6d, 0xb9, 0xc2, 0x93, 0x77, 0xa0, 0x24, 0xee, + 0x4d, 0xa8, 0xb5, 0x1c, 0xa8, 0x64, 0xe2, 0xe3, 0xa4, 0xd8, 0xb5, 0x3f, 0x57, 0x0f, 0x99, 0x60, + 0xc4, 0x8f, 0x7c, 0x11, 0xa0, 0x69, 0x3b, 0x36, 0x6b, 0x0b, 0xee, 0xe5, 0xc7, 0xe6, 0xae, 0xc7, + 0xb9, 0xa6, 0xb9, 0x60, 0x8c, 0x23, 0xf9, 0x69, 0x0e, 0xce, 0xf8, 0x94, 0xb9, 0x3d, 0xbf, 0x41, + 0x99, 0xae, 0x2b, 0x38, 0x27, 0x36, 0xff, 0xdd, 0x21, 0x2b, 0x2b, 0xc3, 0x1d, 0xbd, 0x88, 0x69, + 0xc6, 0xd2, 0xb2, 0xd2, 0xf0, 0x4a, 0xac, 0x0f, 0x7f, 0x94, 0x05, 0xfc, 0xca, 0x8f, 0xaa, 0xd5, + 0xfe, 0x82, 0x5a, 0xcd, 0x9c, 0x6b, 0xd4, 0xef, 0xfd, 0xa8, 0x3a, 0x1b, 0xfe, 0xd6, 0x15, 0x10, + 0xfd, 0xe3, 0xe2, 0x26, 0xd1, 0x73, 0xad, 0xf5, 0xad, 0xca, 0x54, 0xd2, 0x24, 0x6e, 0x71, 0x20, + 0x4a, 0x1c, 0xb9, 0x0c, 0x45, 0xcb, 0xa4, 0x5d, 0xd7, 0xa1, 0x56, 0x65, 0x3a, 0x4a, 0x6d, 0xad, + 0x2a, 0x18, 0x6a, 0x2c, 0xf9, 0x12, 0x14, 0x6c, 0x11, 0x5a, 0x54, 0x4e, 0x8b, 0x85, 0xf9, 0xcc, + 0x70, 0xce, 0x87, 0x60, 0x51, 0x03, 0x7e, 0xd6, 0xc8, 0xff, 0x51, 0xb1, 0x25, 0x0d, 0x98, 0x74, + 0x7b, 0x81, 0x90, 0x30, 0x23, 0x24, 0x0c, 0x97, 0xd9, 0xdd, 0x94, 0x3c, 0x64, 0x84, 0xa4, 0x7e, + 0x60, 0xc8, 0x99, 0x8f, 0xb7, 0xd1, 0xb6, 0x3b, 0x96, 0x4f, 0x9d, 0xca, 0xac, 0xc8, 0x09, 0x88, + 0xf1, 0xae, 0x28, 0x18, 0x6a, 0x2c, 0xf9, 0xff, 0x30, 0xed, 0xf6, 0x02, 0xb1, 0x4b, 0xf8, 0x2a, + 0xb3, 0xca, 0x19, 0x41, 0x7e, 0x86, 0xef, 0xd9, 0xcd, 0x38, 0x02, 0x93, 0x74, 0xf3, 0xab, 0x70, + 0x3e, 0x5b, 0x17, 0x1e, 0x75, 0xfc, 0x8d, 0xc7, 0x8f, 0xbf, 0xd3, 0x30, 0x15, 0x2f, 0xdb, 0x15, + 0xa9, 0xe0, 0x58, 0xb5, 0x17, 0x71, 0xa1, 0xe4, 0xd6, 0x4f, 0x22, 0x15, 0xbc, 0x59, 0xef, 0x4b, + 0x05, 0x6b, 0x10, 0x46, 0x32, 0x1e, 0x95, 0x0a, 0xfe, 0xf3, 0x31, 0x88, 0xda, 0x91, 0x97, 0xa1, + 0x48, 0x1d, 0xcb, 0x73, 0x6d, 0x27, 0x48, 0x17, 0x04, 0x5d, 0x55, 0x70, 0xd4, 0x14, 0xb1, 0xc4, + 0xf1, 0xd8, 0x43, 0x13, 0xc7, 0x6d, 0x98, 0x31, 0xc5, 0xb5, 0x6a, 0x94, 0xf1, 0x1b, 0x7f, 0xac, + 0x8c, 0x9f, 0x2e, 0xdb, 0x4a, 0x72, 0xc1, 0x34, 0x5b, 0x2e, 0x89, 0x45, 0xcd, 0x85, 0xa4, 0x89, + 0xa1, 0x24, 0xd5, 0x93, 0x5c, 0x30, 0xcd, 0xd6, 0xf8, 0xfa, 0x18, 0x84, 0x5a, 0xfa, 0x8b, 0x10, + 0xb3, 0x13, 0x03, 0x0a, 0x3e, 0x65, 0xbd, 0x4e, 0xa0, 0xbc, 0x16, 0x61, 0x09, 0x50, 0x40, 0x50, + 0x61, 0x8c, 0x3d, 0x98, 0xe6, 0xbd, 0xed, 0x74, 0x68, 0xa7, 0x1e, 0x50, 0x8f, 0x91, 0x26, 0xe4, + 0x19, 0xff, 0x47, 0xcd, 0xc9, 0x88, 0x65, 0x12, 0x01, 0xf5, 0x62, 0x51, 0x01, 0xe7, 0x8b, 0x92, + 0xbd, 0xf1, 0x8d, 0x31, 0x28, 0xe9, 0x79, 0x3a, 0x46, 0xa8, 0xf1, 0x02, 0x4c, 0x5a, 0xb4, 0x69, + 0xf2, 0xd1, 0xa8, 0x7d, 0xc1, 0x8d, 0xce, 0xaa, 0x04, 0x61, 0x88, 0x23, 0xd5, 0x64, 0x8a, 0xa7, + 0xd4, 0x97, 0xde, 0xd9, 0x85, 0x92, 0xf8, 0x67, 0x2d, 0xac, 0x13, 0x1f, 0x76, 0xdd, 0xef, 0x86, + 0x5c, 0x64, 0xaa, 0x53, 0xff, 0xc4, 0x88, 0x7f, 0xaa, 0xbe, 0x3b, 0x7f, 0x9c, 0xfa, 0x6e, 0x63, + 0x0d, 0xf8, 0xb1, 0x71, 0x6d, 0x85, 0xbc, 0x01, 0x45, 0xa6, 0x4c, 0x92, 0x9a, 0x97, 0x8f, 0xe9, + 0x52, 0x11, 0x05, 0x3f, 0x3a, 0xa8, 0x4e, 0x0b, 0xe2, 0x10, 0x80, 0xba, 0x89, 0xf1, 0xd5, 0x09, + 0x88, 0x39, 0xc8, 0xc7, 0x98, 0x61, 0x2b, 0x15, 0xf3, 0xbc, 0x3d, 0x6c, 0xcc, 0x13, 0x06, 0x12, + 0x52, 0xe1, 0x92, 0x61, 0x0e, 0xef, 0x47, 0x9b, 0x76, 0x3c, 0xb5, 0x3e, 0xba, 0x1f, 0xd7, 0x69, + 0xc7, 0x43, 0x81, 0xd1, 0xf7, 0xac, 0x13, 0x03, 0xef, 0x59, 0xdf, 0x81, 0x7c, 0xcb, 0xec, 0xb5, + 0xa8, 0xca, 0x61, 0x7e, 0x7a, 0xb8, 0x7b, 0x39, 0xce, 0x41, 0x2a, 0x88, 0xf8, 0x17, 0x25, 0x4f, + 0xae, 0x20, 0xed, 0x30, 0x63, 0xa8, 0xfc, 0xe9, 0xe1, 0x14, 0x44, 0xe7, 0x1d, 0xa5, 0x82, 0xe8, + 0x9f, 0x18, 0xf1, 0xe7, 0x07, 0x71, 0x43, 0x56, 0xc2, 0xa9, 0x0b, 0x95, 0xcf, 0x0e, 0x79, 0x5d, + 0x2c, 0x78, 0xc8, 0x3d, 0xa1, 0x7e, 0x60, 0xc8, 0xd9, 0x58, 0x82, 0x72, 0xac, 0x32, 0x9a, 0xcf, + 0xaf, 0xae, 0xf3, 0x8a, 0xcd, 0xef, 0xaa, 0x19, 0x98, 0x28, 0x30, 0xc6, 0xd1, 0x18, 0x68, 0xb7, + 0x27, 0x7e, 0x67, 0x6c, 0x36, 0x62, 0x65, 0xa7, 0x89, 0x0a, 0x11, 0xd7, 0x41, 0x85, 0xe5, 0x4e, + 0x78, 0x97, 0xfa, 0x2d, 0x7d, 0x9c, 0xaa, 0xed, 0xaa, 0x9d, 0xf0, 0xdb, 0x71, 0x24, 0x26, 0x69, + 0xf9, 0x61, 0xd6, 0x35, 0x1d, 0xbb, 0x49, 0x59, 0x90, 0xbe, 0x1b, 0xb8, 0xad, 0xe0, 0xa8, 0x29, + 0xc8, 0x35, 0x38, 0xc3, 0x68, 0xb0, 0xb9, 0xe7, 0x50, 0x5f, 0x57, 0xae, 0xa8, 0x52, 0xa6, 0x67, + 0x42, 0x5f, 0xb0, 0x9e, 0x26, 0xc0, 0xfe, 0x36, 0x22, 0xa0, 0x91, 0x55, 0x44, 0x2b, 0xae, 0x63, + 0xd9, 0xfa, 0x51, 0x48, 0x3c, 0xa0, 0x49, 0xe1, 0xb1, 0xaf, 0x05, 0xe7, 0xd2, 0x34, 0xed, 0x4e, + 0xcf, 0xa7, 0x11, 0x97, 0x42, 0x92, 0xcb, 0x5a, 0x0a, 0x8f, 0x7d, 0x2d, 0x8c, 0x7f, 0xce, 0xc1, + 0x34, 0xd2, 0xc0, 0xdf, 0xd7, 0x93, 0x52, 0x85, 0x7c, 0x47, 0x14, 0x2d, 0xe5, 0x44, 0xd1, 0x92, + 0x50, 0x59, 0x59, 0xa3, 0x24, 0xe1, 0x64, 0x15, 0xca, 0x3e, 0x6f, 0xa1, 0x0a, 0xc4, 0xe4, 0x84, + 0x1b, 0x61, 0x8c, 0x8a, 0x11, 0xea, 0x28, 0xf9, 0x13, 0xe3, 0xcd, 0x88, 0x03, 0x93, 0x3b, 0xb2, + 0xc8, 0x58, 0x1d, 0xf5, 0xc3, 0xe9, 0xa2, 0x2a, 0x54, 0x16, 0xf7, 0x05, 0x61, 0xd5, 0xf2, 0x51, + 0xf4, 0x2f, 0x86, 0x42, 0x8c, 0x6f, 0xe6, 0x00, 0xa2, 0x77, 0x1a, 0x64, 0x17, 0x8a, 0xec, 0xd5, + 0x84, 0x93, 0x35, 0x64, 0x3d, 0x87, 0x62, 0x12, 0x2b, 0xac, 0x53, 0x10, 0xd4, 0x02, 0x1e, 0xe5, + 0x61, 0xfd, 0x64, 0x1c, 0x74, 0xab, 0x27, 0xe4, 0x60, 0xbd, 0xc8, 0x0f, 0xe7, 0x56, 0x54, 0x6c, + 0xad, 0xe9, 0x50, 0x40, 0x51, 0x61, 0xb9, 0x17, 0x1d, 0x5e, 0x68, 0x2a, 0xd5, 0x16, 0x5e, 0x74, + 0x78, 0xf7, 0x89, 0x1a, 0x9b, 0xe5, 0xb2, 0xe5, 0x9f, 0x9a, 0xcb, 0x56, 0x78, 0x22, 0x2e, 0x1b, + 0x0f, 0x97, 0x7d, 0xb7, 0x43, 0x97, 0x71, 0x43, 0x85, 0xe5, 0x3a, 0x5c, 0x46, 0x09, 0xc6, 0x10, + 0x4f, 0x5e, 0x83, 0x72, 0x8f, 0xd1, 0xfa, 0xea, 0xcd, 0x15, 0x9f, 0x5a, 0x4c, 0xdd, 0x15, 0xeb, + 0x44, 0xcd, 0x9d, 0x08, 0x85, 0x71, 0x3a, 0xe3, 0x77, 0x72, 0x70, 0xba, 0xde, 0xf0, 0x6d, 0x2f, + 0xd0, 0x96, 0x6e, 0x43, 0xbc, 0xac, 0x08, 0x4c, 0x1e, 0xff, 0x2a, 0x55, 0x7c, 0x6e, 0xc0, 0x35, + 0x99, 0x24, 0x4a, 0x3c, 0xbc, 0x90, 0x20, 0x8c, 0x58, 0x88, 0x9c, 0xb3, 0xb0, 0xa5, 0x69, 0x95, + 0xa8, 0x0b, 0x28, 0x2a, 0xac, 0xf1, 0xad, 0x1c, 0x14, 0x75, 0xb5, 0xd1, 0xf3, 0x90, 0x17, 0xf6, + 0x3b, 0x5d, 0x2d, 0x21, 0xac, 0x3b, 0x4a, 0x9c, 0xc8, 0xbf, 0xf2, 0x90, 0xbe, 0x2f, 0xff, 0xca, + 0x81, 0x28, 0x71, 0x5c, 0xd7, 0xa9, 0x63, 0xa5, 0xf3, 0xaf, 0x57, 0x1d, 0x0b, 0x39, 0x5c, 0x14, + 0x6d, 0xbb, 0x7e, 0xd7, 0x0c, 0xd2, 0x19, 0xf1, 0x35, 0x01, 0x45, 0x85, 0x35, 0xde, 0x82, 0x19, + 0x55, 0x16, 0xaa, 0x27, 0xea, 0xb1, 0xde, 0x22, 0x18, 0xff, 0x9e, 0x83, 0xf2, 0xf6, 0xf6, 0x2d, + 0x6d, 0xd6, 0x10, 0xce, 0x33, 0x59, 0x07, 0xba, 0xdc, 0x0c, 0xa8, 0xbf, 0xe2, 0x76, 0xbd, 0x0e, + 0xd5, 0xbc, 0x54, 0x71, 0x66, 0x3d, 0x93, 0x02, 0x07, 0xb4, 0x24, 0xeb, 0x70, 0x36, 0x8e, 0x51, + 0x46, 0x5b, 0x3d, 0x7e, 0x90, 0x55, 0x08, 0xfd, 0x68, 0xcc, 0x6a, 0x93, 0x66, 0xa5, 0x2c, 0xb7, + 0x7a, 0x78, 0xd8, 0xc7, 0x4a, 0xa1, 0x31, 0xab, 0x8d, 0x31, 0x0d, 0xe5, 0xd8, 0xab, 0x53, 0xe3, + 0xdb, 0xcf, 0x80, 0xae, 0x46, 0xfc, 0x65, 0x4d, 0xe3, 0x50, 0x19, 0xc8, 0x86, 0xce, 0x90, 0xe4, + 0x47, 0xcf, 0x90, 0x68, 0x8d, 0x4f, 0x65, 0x49, 0x5a, 0x51, 0x96, 0xa4, 0x70, 0x02, 0x59, 0x12, + 0x6d, 0xba, 0xfa, 0x32, 0x25, 0xbf, 0x9b, 0x83, 0x29, 0xc7, 0xb5, 0x68, 0x68, 0x20, 0x2b, 0x93, + 0xc2, 0x01, 0xdf, 0x1c, 0x69, 0x12, 0x65, 0xc2, 0x4c, 0x71, 0x94, 0x09, 0x32, 0x9d, 0x50, 0x8e, + 0xa3, 0x30, 0x21, 0x9a, 0xac, 0x41, 0xd1, 0x6c, 0x36, 0x6d, 0xc7, 0x0e, 0xf6, 0x55, 0x59, 0xe5, + 0xc5, 0x2c, 0xdb, 0xb7, 0xac, 0x68, 0xe4, 0x69, 0x14, 0xfe, 0x42, 0xdd, 0x96, 0x1f, 0xe7, 0xfa, + 0x45, 0x41, 0x69, 0x84, 0xe3, 0x3c, 0xbc, 0x43, 0x89, 0x39, 0x82, 0x61, 0xf5, 0x73, 0xf4, 0xc0, + 0xc0, 0x80, 0x82, 0x4c, 0x9e, 0x89, 0x3c, 0x69, 0x51, 0x06, 0x1e, 0x32, 0xb1, 0x86, 0x0a, 0x43, + 0x5a, 0x61, 0x60, 0x5b, 0x16, 0x93, 0x5b, 0x1b, 0x3a, 0xd8, 0xd7, 0xb1, 0x72, 0x76, 0x64, 0x4b, + 0x6e, 0xc4, 0x8f, 0x8f, 0xa9, 0xe3, 0x1c, 0x1f, 0xd3, 0x03, 0x8f, 0x8e, 0x16, 0x14, 0x98, 0x38, + 0x9c, 0x44, 0xc6, 0xb0, 0x7c, 0x65, 0x65, 0x38, 0x97, 0x28, 0x71, 0xbe, 0xc9, 0xd9, 0x91, 0x30, + 0x54, 0xec, 0x89, 0x0b, 0xc5, 0x30, 0xad, 0xa9, 0x92, 0x8e, 0xc3, 0xdd, 0x8d, 0xa7, 0xc3, 0x86, + 0xb0, 0x7c, 0x4f, 0x42, 0x51, 0x0b, 0x21, 0xef, 0xc0, 0xb8, 0x65, 0xb6, 0x54, 0xfa, 0xf1, 0xed, + 0xa1, 0x0b, 0x4b, 0x43, 0x31, 0xe2, 0xd1, 0xe4, 0xea, 0xf2, 0x35, 0xe4, 0x5c, 0xc9, 0x6e, 0xf4, + 0xb2, 0x61, 0x76, 0x84, 0xb7, 0x88, 0xa9, 0xf3, 0x4e, 0x86, 0x57, 0x7d, 0x6f, 0x23, 0xae, 0xc2, + 0xe4, 0x03, 0xb7, 0xd3, 0xeb, 0xaa, 0xbc, 0x65, 0xf9, 0xca, 0x7c, 0xd6, 0x6a, 0xdf, 0x15, 0x24, + 0x91, 0x11, 0x90, 0xbf, 0x19, 0x86, 0x6d, 0xc9, 0x57, 0x72, 0x70, 0x9a, 0x6f, 0x1d, 0xad, 0x07, + 0xac, 0x42, 0x46, 0xd0, 0xd4, 0x3b, 0x8c, 0x1f, 0x8c, 0xa1, 0x86, 0x9d, 0x57, 0x62, 0x4f, 0xaf, + 0x27, 0x24, 0x60, 0x4a, 0x22, 0xf1, 0xa0, 0xc8, 0x6c, 0x8b, 0x36, 0x4c, 0x9f, 0x55, 0xce, 0x9e, + 0x98, 0xf4, 0xc8, 0x13, 0x57, 0xbc, 0x51, 0x4b, 0x21, 0xbf, 0x25, 0xde, 0x8f, 0xaa, 0xb7, 0xdf, + 0xea, 0x3d, 0xfe, 0xdc, 0x49, 0xbe, 0xc7, 0x3f, 0x2b, 0x1f, 0x8f, 0x26, 0x24, 0x60, 0x5a, 0x24, + 0xd9, 0x84, 0x73, 0xf2, 0x85, 0x43, 0xfa, 0x79, 0xcb, 0x39, 0x51, 0xbd, 0xf0, 0xcc, 0xe1, 0x41, + 0xf5, 0xdc, 0x72, 0x16, 0x01, 0x66, 0xb7, 0x23, 0x1f, 0xc0, 0xb4, 0x1f, 0x8f, 0xe2, 0x2a, 0xe7, + 0x47, 0xa8, 0xc9, 0x4b, 0xc4, 0x83, 0x32, 0x2f, 0x9e, 0x00, 0x61, 0x52, 0x16, 0x79, 0x05, 0xca, + 0x9e, 0xb2, 0x54, 0x36, 0xeb, 0x56, 0x2e, 0x88, 0x31, 0x88, 0x13, 0x75, 0x2b, 0x02, 0x63, 0x9c, + 0x86, 0xdc, 0x81, 0x72, 0xe0, 0x76, 0xa8, 0xaf, 0x2e, 0xde, 0x2b, 0x62, 0xf1, 0x17, 0xb2, 0x34, + 0x79, 0x5b, 0x93, 0x45, 0xee, 0x75, 0x04, 0x63, 0x18, 0xe7, 0x43, 0x3e, 0x03, 0xd3, 0xe1, 0x6b, + 0x26, 0x5f, 0x24, 0xc1, 0x9e, 0x49, 0x66, 0x03, 0xea, 0x71, 0x24, 0x26, 0x69, 0x79, 0x7c, 0xef, + 0xf9, 0xb6, 0xeb, 0xdb, 0xc1, 0xfe, 0x4a, 0xc7, 0x64, 0x4c, 0x30, 0x98, 0x17, 0x0c, 0x74, 0x7c, + 0xbf, 0x95, 0x26, 0xc0, 0xfe, 0x36, 0x3c, 0x88, 0x0a, 0x81, 0x95, 0x67, 0x85, 0x03, 0x27, 0xcc, + 0x52, 0xd8, 0x16, 0x35, 0x76, 0x40, 0x49, 0xf9, 0xc5, 0x61, 0x4a, 0xca, 0x89, 0x05, 0x17, 0xcd, + 0x5e, 0xe0, 0x76, 0x39, 0x20, 0xd9, 0x64, 0xdb, 0xdd, 0xa5, 0x4e, 0xe5, 0x92, 0x38, 0xab, 0x2e, + 0x1d, 0x1e, 0x54, 0x2f, 0x2e, 0x3f, 0x84, 0x0e, 0x1f, 0xca, 0x85, 0x74, 0xa1, 0x48, 0x55, 0x59, + 0x7c, 0xe5, 0x63, 0x23, 0x1c, 0x12, 0xc9, 0xda, 0x7a, 0x39, 0x41, 0x21, 0x0c, 0xb5, 0x08, 0xb2, + 0x0d, 0xe5, 0xb6, 0xcb, 0x82, 0xe5, 0x8e, 0x6d, 0x32, 0xca, 0x2a, 0xcf, 0x09, 0x3d, 0xc9, 0x3c, + 0xdf, 0xae, 0x87, 0x64, 0x91, 0x9a, 0x5c, 0x8f, 0x5a, 0x62, 0x9c, 0x0d, 0xa1, 0x22, 0xa2, 0xec, + 0x89, 0x55, 0x73, 0x9d, 0x80, 0xbe, 0x1f, 0x54, 0x16, 0xc4, 0x58, 0x5e, 0xcc, 0xe2, 0xbc, 0xe5, + 0x5a, 0xf5, 0x24, 0xb5, 0xdc, 0xe5, 0x29, 0x20, 0xa6, 0x79, 0x92, 0xd7, 0x61, 0xca, 0x73, 0xad, + 0xba, 0x47, 0x1b, 0x5b, 0x66, 0xd0, 0x68, 0x57, 0xaa, 0xc9, 0x7b, 0xf6, 0xad, 0x18, 0x0e, 0x13, + 0x94, 0x3c, 0x1a, 0xf0, 0x29, 0xeb, 0xed, 0x74, 0xed, 0x60, 0x8b, 0x3a, 0x96, 0xed, 0xb4, 0xb6, + 0x5c, 0x8b, 0x55, 0x0c, 0xb1, 0x84, 0x22, 0x1a, 0xc0, 0x7e, 0x34, 0x66, 0xb5, 0x21, 0x0d, 0x98, + 0xec, 0xca, 0xd2, 0x88, 0xca, 0xf3, 0x23, 0xb8, 0x95, 0xaa, 0xbc, 0x42, 0x1e, 0x4a, 0xea, 0x07, + 0x86, 0x9c, 0xe7, 0xdf, 0x82, 0x33, 0x7d, 0xfe, 0xdf, 0x63, 0xd5, 0x84, 0xfc, 0x98, 0x47, 0x6b, + 0x31, 0x8f, 0xfb, 0xa4, 0xe3, 0x94, 0x6b, 0x70, 0x46, 0x7d, 0xff, 0x87, 0x3b, 0x07, 0x9d, 0x9e, + 0x7e, 0x77, 0x1e, 0xcb, 0xdd, 0x61, 0x9a, 0x00, 0xfb, 0xdb, 0xf0, 0x35, 0x6d, 0xc8, 0x87, 0xcd, + 0xb2, 0x14, 0x49, 0x26, 0x49, 0xf4, 0x9a, 0xaa, 0x47, 0xcf, 0xf2, 0xca, 0x3f, 0x41, 0x69, 0xfc, + 0x49, 0x0e, 0xa6, 0x13, 0x07, 0xd5, 0x89, 0x47, 0xfe, 0x6b, 0x40, 0xba, 0xb6, 0xef, 0xbb, 0xbe, + 0x3c, 0xed, 0x6f, 0xf3, 0x5d, 0xcb, 0xd4, 0xbb, 0x06, 0x51, 0x4f, 0x7b, 0xbb, 0x0f, 0x8b, 0x19, + 0x2d, 0x8c, 0x7f, 0xca, 0x41, 0x74, 0xc1, 0xa0, 0x8b, 0xc8, 0x73, 0x03, 0x8b, 0xc8, 0x5f, 0x86, + 0xe2, 0x7d, 0xe6, 0x3a, 0x5b, 0x51, 0xa9, 0xb9, 0x5e, 0x8a, 0x1b, 0xf5, 0xcd, 0x0d, 0x41, 0xa9, + 0x29, 0x04, 0xf5, 0x7b, 0x6b, 0x76, 0x27, 0xe8, 0x2f, 0xc8, 0xbe, 0xf1, 0x39, 0x09, 0x47, 0x4d, + 0x41, 0x96, 0xa0, 0xa4, 0xef, 0xb4, 0x54, 0xca, 0x40, 0x4f, 0x82, 0xbe, 0xd0, 0xc1, 0x88, 0x86, + 0xbc, 0x14, 0xdd, 0xdc, 0xe4, 0x93, 0x39, 0x9c, 0xf4, 0xed, 0x8d, 0xf1, 0x9d, 0x31, 0x28, 0x3e, + 0xc5, 0x47, 0xde, 0x8d, 0xc4, 0x23, 0xef, 0x13, 0x78, 0x11, 0x9c, 0xf5, 0xc0, 0x7b, 0x37, 0xf5, + 0xc0, 0x7b, 0x65, 0xc4, 0x1b, 0xb5, 0x87, 0x3e, 0xee, 0xfe, 0xbb, 0x1c, 0x9c, 0x09, 0x49, 0xa3, + 0xc4, 0xf4, 0xa7, 0x62, 0x95, 0x98, 0xa5, 0xda, 0x0b, 0xa9, 0x62, 0xa4, 0x73, 0x7d, 0x0d, 0x62, + 0x95, 0x49, 0x5f, 0xd4, 0xbd, 0x97, 0x7a, 0xb4, 0x96, 0x14, 0x7c, 0x74, 0x50, 0x3d, 0xd6, 0x57, + 0xc4, 0x16, 0x35, 0xef, 0x64, 0x87, 0xe3, 0xf5, 0x30, 0xe3, 0x0f, 0xaf, 0x87, 0x31, 0x7e, 0x90, + 0x83, 0xa9, 0xa7, 0xf8, 0x68, 0x7d, 0x27, 0xf9, 0x68, 0xfd, 0x8d, 0x91, 0x96, 0x6d, 0xc0, 0x83, + 0xf5, 0xbf, 0x3f, 0x0f, 0x89, 0xc7, 0xe2, 0xc4, 0x81, 0x52, 0x68, 0x21, 0xc3, 0x1b, 0xd8, 0x37, + 0x46, 0xca, 0x02, 0x44, 0x7b, 0x33, 0x84, 0x30, 0x8c, 0x44, 0x90, 0x2b, 0x00, 0x94, 0x1f, 0x0d, + 0x32, 0xbb, 0x3d, 0x96, 0xbc, 0xa0, 0xbc, 0xaa, 0x31, 0x18, 0xa3, 0x7a, 0xfa, 0x19, 0xa6, 0x6c, + 0x9f, 0x6c, 0xe2, 0x89, 0xf8, 0x64, 0x17, 0x4f, 0xdc, 0x27, 0x7b, 0xee, 0xc9, 0xfb, 0x64, 0xb1, + 0x08, 0x34, 0x3f, 0x42, 0x04, 0xfa, 0x01, 0xcc, 0xc9, 0x7f, 0x57, 0x3a, 0xa6, 0xdd, 0xd5, 0xfa, + 0xa2, 0x6a, 0xd7, 0x5f, 0xca, 0xf4, 0xc4, 0xa8, 0xcf, 0x6c, 0x16, 0x50, 0x27, 0xb8, 0x1b, 0xb5, + 0x8c, 0x6a, 0xf7, 0xee, 0x66, 0xb0, 0xc3, 0x4c, 0x21, 0xe9, 0x90, 0x65, 0xf2, 0x18, 0x21, 0xcb, + 0xb7, 0x72, 0x70, 0xce, 0xcc, 0xfa, 0x36, 0x92, 0x4a, 0x5c, 0xdd, 0x18, 0x29, 0x80, 0x4c, 0x70, + 0x54, 0x01, 0x60, 0x16, 0x0a, 0xb3, 0xfb, 0x40, 0x5e, 0x88, 0x72, 0x10, 0x25, 0xa1, 0x54, 0xd9, + 0xd9, 0x83, 0xaf, 0xa5, 0x73, 0x7f, 0x20, 0x66, 0xbb, 0x3e, 0xf2, 0x61, 0x74, 0x02, 0xf9, 0xbf, + 0xf2, 0x08, 0xf9, 0xbf, 0x54, 0x3c, 0x39, 0x75, 0x42, 0xf1, 0xa4, 0x03, 0xb3, 0x76, 0xd7, 0x6c, + 0xd1, 0xad, 0x5e, 0xa7, 0x23, 0xef, 0x88, 0x58, 0x65, 0x5a, 0xf0, 0xce, 0x7c, 0xc9, 0xc4, 0xe3, + 0xfb, 0x4e, 0xfa, 0xdb, 0x06, 0xfa, 0x36, 0x76, 0x3d, 0xc5, 0x09, 0xfb, 0x78, 0x73, 0xb5, 0xe4, + 0x71, 0xca, 0x06, 0x0d, 0xf8, 0x6c, 0x8b, 0xd4, 0x98, 0xfa, 0x7a, 0xdd, 0xf5, 0x08, 0x8c, 0x71, + 0x1a, 0x72, 0x13, 0x4a, 0x96, 0xc3, 0xd4, 0x5d, 0xec, 0x8c, 0xb0, 0x52, 0x1f, 0xe7, 0xb6, 0x6d, + 0x75, 0xa3, 0xae, 0x6f, 0x61, 0x2f, 0x66, 0x14, 0x1e, 0x6a, 0x3c, 0x46, 0xed, 0xc9, 0x6d, 0xc1, + 0x4c, 0x3d, 0xeb, 0x93, 0xb9, 0xac, 0x4b, 0x03, 0x42, 0xa2, 0xd5, 0x8d, 0xf0, 0x15, 0xe2, 0xb4, + 0x12, 0xa7, 0x1e, 0xeb, 0x45, 0x1c, 0x62, 0x0f, 0xc6, 0xcf, 0x3c, 0xec, 0xc1, 0x38, 0xb9, 0x03, + 0x17, 0x82, 0xa0, 0x93, 0xb8, 0xe0, 0x50, 0xb5, 0x9d, 0xa2, 0xd0, 0x37, 0x2f, 0xbf, 0xf7, 0xb1, + 0xbd, 0x7d, 0x2b, 0x8b, 0x04, 0x07, 0xb5, 0x15, 0x77, 0x05, 0x41, 0x47, 0xa7, 0x44, 0x16, 0x46, + 0xb9, 0x2b, 0x88, 0x6e, 0x92, 0xd4, 0x5d, 0x41, 0x04, 0xc0, 0xb8, 0x94, 0xc1, 0xa9, 0x9d, 0xb3, + 0x43, 0xa6, 0x76, 0xe2, 0xd9, 0x84, 0xb9, 0x87, 0x66, 0x13, 0xfa, 0xb2, 0x1f, 0xe7, 0x1e, 0x23, + 0xfb, 0xf1, 0x8e, 0x28, 0x2a, 0xbd, 0xb6, 0xa2, 0x32, 0x47, 0xc3, 0x55, 0xb9, 0x88, 0xea, 0x20, + 0x59, 0x32, 0x20, 0xfe, 0x45, 0xc9, 0x93, 0x6c, 0xc1, 0x9c, 0xe7, 0x5a, 0x7d, 0xc9, 0x13, 0x91, + 0x2a, 0x8a, 0x15, 0x5f, 0x6f, 0x65, 0xd0, 0x60, 0x66, 0x4b, 0x61, 0xc0, 0x23, 0x78, 0xa5, 0x22, + 0x26, 0x46, 0x1a, 0xf0, 0x08, 0x8c, 0x71, 0x9a, 0x74, 0x2e, 0xe1, 0x99, 0x27, 0x96, 0x4b, 0x98, + 0x7f, 0x0a, 0xb9, 0x84, 0x67, 0x8f, 0x9d, 0x4b, 0xf8, 0x75, 0x38, 0xeb, 0xb9, 0xd6, 0xaa, 0xcd, + 0xfc, 0x9e, 0xf8, 0x72, 0x66, 0xad, 0x67, 0xb5, 0x68, 0x20, 0x92, 0x11, 0xe5, 0x2b, 0x57, 0xe2, + 0x9d, 0x94, 0x1f, 0xf0, 0x5d, 0x54, 0x1f, 0xf0, 0x15, 0x9b, 0x3c, 0xd5, 0x4a, 0x84, 0x1d, 0x22, + 0xff, 0x90, 0x81, 0xc4, 0x2c, 0x39, 0xf1, 0xfc, 0xc3, 0xa5, 0x27, 0x95, 0x7f, 0x20, 0x6f, 0x43, + 0x91, 0xb5, 0x7b, 0x81, 0xe5, 0xee, 0x39, 0x22, 0x2b, 0x55, 0xd2, 0x5f, 0x4b, 0x2a, 0xd6, 0x15, + 0xfc, 0xe8, 0xa0, 0x3a, 0x1b, 0xfe, 0x1f, 0xab, 0x5f, 0x53, 0x90, 0xd1, 0x33, 0x18, 0xff, 0x36, + 0x05, 0xa7, 0x53, 0x9f, 0xc2, 0xd1, 0x35, 0xfe, 0xb9, 0xe3, 0xd6, 0xf8, 0x27, 0x8a, 0xf0, 0xc7, + 0x9e, 0x68, 0x11, 0xfe, 0xf8, 0x89, 0x17, 0xe1, 0xc7, 0x82, 0xab, 0x89, 0x47, 0x3c, 0x36, 0x58, + 0x86, 0x99, 0x86, 0xdb, 0xf5, 0xc4, 0x13, 0x6d, 0x55, 0x84, 0x2d, 0x83, 0x75, 0x5d, 0xab, 0xb1, + 0x92, 0x44, 0x63, 0x9a, 0x9e, 0xfc, 0x1a, 0xe4, 0x1d, 0xd1, 0xb0, 0x30, 0xc2, 0xa3, 0xad, 0xe4, + 0x82, 0x09, 0x1f, 0x46, 0xbd, 0x9b, 0x0a, 0xaf, 0x30, 0xf2, 0x02, 0x76, 0x14, 0xfe, 0x83, 0x52, + 0x28, 0x79, 0x17, 0x2a, 0x6e, 0xb3, 0xd9, 0x71, 0x4d, 0x2b, 0x7a, 0x28, 0x70, 0x97, 0x7b, 0xa7, + 0xea, 0x52, 0xb0, 0x54, 0xbb, 0xa4, 0x18, 0x54, 0x36, 0x07, 0xd0, 0xe1, 0x40, 0x0e, 0xdc, 0xd5, + 0x9c, 0x49, 0x3e, 0x60, 0x61, 0x95, 0x92, 0x18, 0xe6, 0xaf, 0x9c, 0xc4, 0x30, 0x93, 0xaf, 0x65, + 0xd4, 0x80, 0xa3, 0x2a, 0x99, 0x24, 0x16, 0xd3, 0x3d, 0x21, 0x3e, 0x9c, 0xf7, 0xb2, 0x1c, 0x71, + 0xa6, 0x2e, 0x92, 0x1f, 0x16, 0x0e, 0x2c, 0x28, 0x29, 0xe7, 0x33, 0x5d, 0x79, 0x86, 0x03, 0x38, + 0xc7, 0x9f, 0x10, 0x14, 0x9f, 0xd8, 0x13, 0x82, 0x2f, 0x8b, 0x0f, 0xea, 0xc8, 0xc4, 0x41, 0xe8, + 0xe7, 0xad, 0x8d, 0x34, 0xe1, 0x3a, 0x0f, 0x11, 0x6d, 0x1e, 0x0d, 0x62, 0x18, 0x93, 0x46, 0x7e, + 0x96, 0xf9, 0x82, 0x45, 0xfa, 0xb1, 0x5f, 0x38, 0x89, 0x45, 0xff, 0x79, 0x7b, 0xc5, 0x32, 0xbf, + 0x2f, 0x9f, 0xcd, 0x0d, 0x7c, 0x40, 0x78, 0x27, 0xf9, 0x84, 0xf8, 0xad, 0x11, 0x9f, 0xf1, 0xc4, + 0x1f, 0x2f, 0xfe, 0x66, 0x0e, 0xe6, 0xb2, 0x36, 0x41, 0x46, 0x2f, 0xea, 0xc9, 0x5e, 0x8c, 0x96, + 0x1e, 0x89, 0xf7, 0xe1, 0x64, 0x1e, 0x93, 0x7c, 0xbd, 0x10, 0x4b, 0xe9, 0x04, 0xd4, 0xfb, 0x65, + 0x81, 0xcf, 0x50, 0x05, 0x3e, 0x89, 0xcf, 0x8f, 0xe5, 0x9f, 0xe2, 0xe7, 0xc7, 0x0a, 0x43, 0x7c, + 0x7e, 0x6c, 0xf2, 0x69, 0x7e, 0x7e, 0xac, 0x78, 0xcc, 0xcf, 0x8f, 0x95, 0x7e, 0x7e, 0x3e, 0x3f, + 0xf6, 0x51, 0x0e, 0x66, 0xff, 0xa7, 0x7f, 0x1f, 0xf8, 0xc7, 0x39, 0x98, 0xfb, 0x2f, 0xf8, 0x30, + 0xf0, 0xfd, 0x64, 0x96, 0xfa, 0xea, 0x89, 0x0c, 0x72, 0x40, 0xb6, 0xfa, 0x0f, 0x33, 0x86, 0x28, + 0xb2, 0xd6, 0x1f, 0x3c, 0xa9, 0x2f, 0xac, 0xce, 0x65, 0x7d, 0x61, 0x35, 0xf9, 0x45, 0xd5, 0xda, + 0xe2, 0xf7, 0x3e, 0x5a, 0x38, 0xf5, 0x83, 0x8f, 0x16, 0x4e, 0xfd, 0xf0, 0xa3, 0x85, 0x53, 0x1f, + 0x1e, 0x2e, 0xe4, 0xbe, 0x77, 0xb8, 0x90, 0xfb, 0xc1, 0xe1, 0x42, 0xee, 0x87, 0x87, 0x0b, 0xb9, + 0x1f, 0x1f, 0x2e, 0xe4, 0xfe, 0xe0, 0x1f, 0x17, 0x4e, 0x7d, 0xa1, 0x18, 0x0a, 0xf8, 0xcf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x04, 0x30, 0x19, 0x70, 0xee, 0x65, 0x00, 0x00, } func (m *ArchiveStrategy) Marshal() (dAtA []byte, err error) { @@ -2774,6 +2836,96 @@ func (m *Backoff) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClusterWorkflowTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterWorkflowTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterWorkflowTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterWorkflowTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContinueOn) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3692,9 +3844,9 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = l if len(m.Buckets) > 0 { for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { - f37 := math.Float64bits(float64(m.Buckets[iNdEx])) + f40 := math.Float64bits(float64(m.Buckets[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f37)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f40)) i-- dAtA[i] = 0x21 } @@ -5555,6 +5707,14 @@ func (m *TemplateRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- if m.RuntimeResolution { dAtA[i] = 1 } else { @@ -6777,6 +6937,36 @@ func (m *Backoff) Size() (n int) { return n } +func (m *ClusterWorkflowTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterWorkflowTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ContinueOn) Size() (n int) { if m == nil { return 0 @@ -7790,6 +7980,7 @@ func (m *TemplateRef) Size() (n int) { l = len(m.Template) n += 1 + l + sovGenerated(uint64(l)) n += 2 + n += 2 return n } @@ -8267,6 +8458,33 @@ func (this *Backoff) String() string { }, "") return s } +func (this *ClusterWorkflowTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterWorkflowTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowTemplateSpec", "WorkflowTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterWorkflowTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterWorkflowTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterWorkflowTemplate", "ClusterWorkflowTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterWorkflowTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *ContinueOn) String() string { if this == nil { return "nil" @@ -8969,6 +9187,7 @@ func (this *TemplateRef) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Template:` + fmt.Sprintf("%v", this.Template) + `,`, `RuntimeResolution:` + fmt.Sprintf("%v", this.RuntimeResolution) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, `}`, }, "") return s @@ -10644,6 +10863,245 @@ func (m *Backoff) Unmarshal(dAtA []byte) error { } return nil } +func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterWorkflowTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ContinueOn) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -19773,6 +20231,26 @@ func (m *TemplateRef) Unmarshal(dAtA []byte) error { } } m.RuntimeResolution = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/workflow/v1alpha1/generated.proto b/pkg/apis/workflow/v1alpha1/generated.proto index 57ca7dd8c16d..5481d4964078 100644 --- a/pkg/apis/workflow/v1alpha1/generated.proto +++ b/pkg/apis/workflow/v1alpha1/generated.proto @@ -127,6 +127,25 @@ message Backoff { optional string maxDuration = 3; } +// ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope +// +genclient +// +genclient:noStatus +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message ClusterWorkflowTemplate { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowTemplateSpec spec = 2; +} + +// ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message ClusterWorkflowTemplateList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated ClusterWorkflowTemplate items = 2; +} + // ContinueOn defines if a workflow should continue even if a task or step fails/errors. // It can be specified if the workflow should continue when the pod errors, fails or both. message ContinueOn { @@ -920,6 +939,9 @@ message TemplateRef { // RuntimeResolution skips validation at creation time. // By enabling this option, you can create the referred workflow template before the actual runtime. optional bool runtimeResolution = 3; + + // ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate). + optional bool clusterscope = 4; } // UserContainer is a container specified by a user. diff --git a/pkg/apis/workflow/v1alpha1/register.go b/pkg/apis/workflow/v1alpha1/register.go index 588a7901072a..76b1a8cabe42 100644 --- a/pkg/apis/workflow/v1alpha1/register.go +++ b/pkg/apis/workflow/v1alpha1/register.go @@ -38,6 +38,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &WorkflowTemplateList{}, &CronWorkflow{}, &CronWorkflowList{}, + &ClusterWorkflowTemplate{}, + &ClusterWorkflowTemplateList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/apis/workflow/v1alpha1/workflow_template_types.go b/pkg/apis/workflow/v1alpha1/workflow_template_types.go index cc15a29d6e8c..671fd64dc0fd 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_template_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_template_types.go @@ -57,5 +57,10 @@ func (wftmpl *WorkflowTemplate) GetTemplateByName(name string) *Template { // GetTemplateScope returns the template scope of workflow template. func (wftmpl *WorkflowTemplate) GetTemplateScope() string { - return wftmpl.Name + return "namespaced/" + wftmpl.Name +} + +// GetAllTemplates returns the list of templates of workflow template +func (wftmpl *WorkflowTemplate) GetAllTemplates() []Template { + return wftmpl.Spec.Templates } diff --git a/pkg/apis/workflow/v1alpha1/workflow_types.go b/pkg/apis/workflow/v1alpha1/workflow_types.go index f35198066cc4..a9c1dbc4825a 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -11,7 +11,6 @@ import ( apiv1 "k8s.io/api/core/v1" policyv1beta "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) // TemplateType is the type of a template @@ -67,28 +66,6 @@ const ( PodGCOnWorkflowSuccess PodGCStrategy = "OnWorkflowSuccess" ) -// TemplateGetter is an interface to get templates. -type TemplateGetter interface { - GetNamespace() string - GetName() string - GroupVersionKind() schema.GroupVersionKind - GetTemplateByName(name string) *Template - GetTemplateScope() string -} - -// TemplateHolder is an interface for holders of templates. -type TemplateHolder interface { - GetTemplateName() string - GetTemplateRef() *TemplateRef - IsResolvable() bool -} - -// TemplateStorage is an interface of template storage getter and setter. -type TemplateStorage interface { - GetStoredTemplate(templateScope string, holder TemplateHolder) *Template - SetStoredTemplate(templateScope string, holder TemplateHolder, tmpl *Template) (bool, error) -} - // Workflow is the definition of a workflow resource // +genclient // +genclient:noStatus @@ -822,6 +799,8 @@ type TemplateRef struct { // RuntimeResolution skips validation at creation time. // By enabling this option, you can create the referred workflow template before the actual runtime. RuntimeResolution bool `json:"runtimeResolution,omitempty" protobuf:"varint,3,opt,name=runtimeResolution"` + // ClusterScope indicates the referred template is cluster scoped (i.e., a ClusterWorkflowTemplate). + ClusterScope bool `json:"clusterscope,omitempty" protobuf:"varint,4,opt,name=clusterscope"` } type ArgumentsProvider interface { @@ -1691,6 +1670,11 @@ func (wf *Workflow) GetTemplateScope() string { return "" } +// GetAllTemplates returns the list of templates of workflow. +func (wf *Workflow) GetAllTemplates() []Template { + return wf.Spec.Templates +} + // NodeID creates a deterministic node ID based on a node name func (wf *Workflow) NodeID(name string) string { if name == wf.ObjectMeta.Name { diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index 6ec76b80c14b..591bdd86b889 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -254,6 +254,88 @@ func (in *Backoff) DeepCopy() *Backoff { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterWorkflowTemplate) DeepCopyInto(out *ClusterWorkflowTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplate. +func (in *ClusterWorkflowTemplate) DeepCopy() *ClusterWorkflowTemplate { + if in == nil { + return nil + } + out := new(ClusterWorkflowTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterWorkflowTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterWorkflowTemplateList) DeepCopyInto(out *ClusterWorkflowTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make(ClusterWorkflowTemplates, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplateList. +func (in *ClusterWorkflowTemplateList) DeepCopy() *ClusterWorkflowTemplateList { + if in == nil { + return nil + } + out := new(ClusterWorkflowTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterWorkflowTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ClusterWorkflowTemplates) DeepCopyInto(out *ClusterWorkflowTemplates) { + { + in := &in + *out = make(ClusterWorkflowTemplates, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplates. +func (in ClusterWorkflowTemplates) DeepCopy() ClusterWorkflowTemplates { + if in == nil { + return nil + } + out := new(ClusterWorkflowTemplates) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContinueOn) DeepCopyInto(out *ContinueOn) { *out = *in diff --git a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go new file mode 100644 index 000000000000..88963aef9cb3 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go @@ -0,0 +1,148 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + scheme "github.com/argoproj/argo/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterWorkflowTemplatesGetter has a method to return a ClusterWorkflowTemplateInterface. +// A group's client should implement this interface. +type ClusterWorkflowTemplatesGetter interface { + ClusterWorkflowTemplates() ClusterWorkflowTemplateInterface +} + +// ClusterWorkflowTemplateInterface has methods to work with ClusterWorkflowTemplate resources. +type ClusterWorkflowTemplateInterface interface { + Create(*v1alpha1.ClusterWorkflowTemplate) (*v1alpha1.ClusterWorkflowTemplate, error) + Update(*v1alpha1.ClusterWorkflowTemplate) (*v1alpha1.ClusterWorkflowTemplate, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterWorkflowTemplate, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterWorkflowTemplateList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) + ClusterWorkflowTemplateExpansion +} + +// clusterWorkflowTemplates implements ClusterWorkflowTemplateInterface +type clusterWorkflowTemplates struct { + client rest.Interface +} + +// newClusterWorkflowTemplates returns a ClusterWorkflowTemplates +func newClusterWorkflowTemplates(c *ArgoprojV1alpha1Client) *clusterWorkflowTemplates { + return &clusterWorkflowTemplates{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterWorkflowTemplate, and returns the corresponding clusterWorkflowTemplate object, and an error if there is any. +func (c *clusterWorkflowTemplates) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Get(). + Resource("clusterworkflowtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterWorkflowTemplates that match those selectors. +func (c *clusterWorkflowTemplates) List(opts v1.ListOptions) (result *v1alpha1.ClusterWorkflowTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterWorkflowTemplateList{} + err = c.client.Get(). + Resource("clusterworkflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterWorkflowTemplates. +func (c *clusterWorkflowTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterworkflowtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterWorkflowTemplate and creates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. +func (c *clusterWorkflowTemplates) Create(clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Post(). + Resource("clusterworkflowtemplates"). + Body(clusterWorkflowTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterWorkflowTemplate and updates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. +func (c *clusterWorkflowTemplates) Update(clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Put(). + Resource("clusterworkflowtemplates"). + Name(clusterWorkflowTemplate.Name). + Body(clusterWorkflowTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterWorkflowTemplate and deletes it. Returns an error if one occurs. +func (c *clusterWorkflowTemplates) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterworkflowtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterWorkflowTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterworkflowtemplates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterWorkflowTemplate. +func (c *clusterWorkflowTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + result = &v1alpha1.ClusterWorkflowTemplate{} + err = c.client.Patch(pt). + Resource("clusterworkflowtemplates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_clusterworkflowtemplate.go b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_clusterworkflowtemplate.go new file mode 100644 index 000000000000..c08f090efea8 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_clusterworkflowtemplate.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterWorkflowTemplates implements ClusterWorkflowTemplateInterface +type FakeClusterWorkflowTemplates struct { + Fake *FakeArgoprojV1alpha1 +} + +var clusterworkflowtemplatesResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "clusterworkflowtemplates"} + +var clusterworkflowtemplatesKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "ClusterWorkflowTemplate"} + +// Get takes name of the clusterWorkflowTemplate, and returns the corresponding clusterWorkflowTemplate object, and an error if there is any. +func (c *FakeClusterWorkflowTemplates) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterworkflowtemplatesResource, name), &v1alpha1.ClusterWorkflowTemplate{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterWorkflowTemplate), err +} + +// List takes label and field selectors, and returns the list of ClusterWorkflowTemplates that match those selectors. +func (c *FakeClusterWorkflowTemplates) List(opts v1.ListOptions) (result *v1alpha1.ClusterWorkflowTemplateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterworkflowtemplatesResource, clusterworkflowtemplatesKind, opts), &v1alpha1.ClusterWorkflowTemplateList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterWorkflowTemplateList{ListMeta: obj.(*v1alpha1.ClusterWorkflowTemplateList).ListMeta} + for _, item := range obj.(*v1alpha1.ClusterWorkflowTemplateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterWorkflowTemplates. +func (c *FakeClusterWorkflowTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterworkflowtemplatesResource, opts)) +} + +// Create takes the representation of a clusterWorkflowTemplate and creates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. +func (c *FakeClusterWorkflowTemplates) Create(clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterworkflowtemplatesResource, clusterWorkflowTemplate), &v1alpha1.ClusterWorkflowTemplate{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterWorkflowTemplate), err +} + +// Update takes the representation of a clusterWorkflowTemplate and updates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. +func (c *FakeClusterWorkflowTemplates) Update(clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterworkflowtemplatesResource, clusterWorkflowTemplate), &v1alpha1.ClusterWorkflowTemplate{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterWorkflowTemplate), err +} + +// Delete takes name of the clusterWorkflowTemplate and deletes it. Returns an error if one occurs. +func (c *FakeClusterWorkflowTemplates) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterworkflowtemplatesResource, name), &v1alpha1.ClusterWorkflowTemplate{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterWorkflowTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterworkflowtemplatesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterWorkflowTemplateList{}) + return err +} + +// Patch applies the patch and returns the patched clusterWorkflowTemplate. +func (c *FakeClusterWorkflowTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterworkflowtemplatesResource, name, pt, data, subresources...), &v1alpha1.ClusterWorkflowTemplate{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterWorkflowTemplate), err +} diff --git a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow_client.go b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow_client.go index 376ca146d2d1..0c838b82b2df 100644 --- a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow_client.go +++ b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/fake/fake_workflow_client.go @@ -12,6 +12,10 @@ type FakeArgoprojV1alpha1 struct { *testing.Fake } +func (c *FakeArgoprojV1alpha1) ClusterWorkflowTemplates() v1alpha1.ClusterWorkflowTemplateInterface { + return &FakeClusterWorkflowTemplates{c} +} + func (c *FakeArgoprojV1alpha1) CronWorkflows(namespace string) v1alpha1.CronWorkflowInterface { return &FakeCronWorkflows{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go index a4837a52f2a3..bdc2433ab1bd 100644 --- a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go @@ -2,6 +2,8 @@ package v1alpha1 +type ClusterWorkflowTemplateExpansion interface{} + type CronWorkflowExpansion interface{} type WorkflowExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go index 2a93aa9b0c4c..c22f7ed3a6c5 100644 --- a/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go +++ b/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go @@ -10,6 +10,7 @@ import ( type ArgoprojV1alpha1Interface interface { RESTClient() rest.Interface + ClusterWorkflowTemplatesGetter CronWorkflowsGetter WorkflowsGetter WorkflowTemplatesGetter @@ -20,6 +21,10 @@ type ArgoprojV1alpha1Client struct { restClient rest.Interface } +func (c *ArgoprojV1alpha1Client) ClusterWorkflowTemplates() ClusterWorkflowTemplateInterface { + return newClusterWorkflowTemplates(c) +} + func (c *ArgoprojV1alpha1Client) CronWorkflows(namespace string) CronWorkflowInterface { return newCronWorkflows(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index c870b30bb283..f6980cf7b881 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -37,6 +37,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=argoproj.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("clusterworkflowtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Argoproj().V1alpha1().ClusterWorkflowTemplates().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("cronworkflows"): return &genericInformer{resource: resource.GroupResource(), informer: f.Argoproj().V1alpha1().CronWorkflows().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("workflows"): diff --git a/pkg/client/informers/externalversions/workflow/v1alpha1/clusterworkflowtemplate.go b/pkg/client/informers/externalversions/workflow/v1alpha1/clusterworkflowtemplate.go new file mode 100644 index 000000000000..a7a4a12858e7 --- /dev/null +++ b/pkg/client/informers/externalversions/workflow/v1alpha1/clusterworkflowtemplate.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + workflowv1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + versioned "github.com/argoproj/argo/pkg/client/clientset/versioned" + internalinterfaces "github.com/argoproj/argo/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/argoproj/argo/pkg/client/listers/workflow/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterWorkflowTemplateInformer provides access to a shared informer and lister for +// ClusterWorkflowTemplates. +type ClusterWorkflowTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterWorkflowTemplateLister +} + +type clusterWorkflowTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterWorkflowTemplateInformer constructs a new informer for ClusterWorkflowTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterWorkflowTemplateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterWorkflowTemplateInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterWorkflowTemplateInformer constructs a new informer for ClusterWorkflowTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterWorkflowTemplateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ArgoprojV1alpha1().ClusterWorkflowTemplates().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ArgoprojV1alpha1().ClusterWorkflowTemplates().Watch(options) + }, + }, + &workflowv1alpha1.ClusterWorkflowTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterWorkflowTemplateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterWorkflowTemplateInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterWorkflowTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&workflowv1alpha1.ClusterWorkflowTemplate{}, f.defaultInformer) +} + +func (f *clusterWorkflowTemplateInformer) Lister() v1alpha1.ClusterWorkflowTemplateLister { + return v1alpha1.NewClusterWorkflowTemplateLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/workflow/v1alpha1/interface.go b/pkg/client/informers/externalversions/workflow/v1alpha1/interface.go index db28d4b2b1f4..2e3eaf8b0085 100644 --- a/pkg/client/informers/externalversions/workflow/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/workflow/v1alpha1/interface.go @@ -8,6 +8,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ClusterWorkflowTemplates returns a ClusterWorkflowTemplateInformer. + ClusterWorkflowTemplates() ClusterWorkflowTemplateInformer // CronWorkflows returns a CronWorkflowInformer. CronWorkflows() CronWorkflowInformer // Workflows returns a WorkflowInformer. @@ -27,6 +29,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ClusterWorkflowTemplates returns a ClusterWorkflowTemplateInformer. +func (v *version) ClusterWorkflowTemplates() ClusterWorkflowTemplateInformer { + return &clusterWorkflowTemplateInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // CronWorkflows returns a CronWorkflowInformer. func (v *version) CronWorkflows() CronWorkflowInformer { return &cronWorkflowInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/listers/workflow/v1alpha1/clusterworkflowtemplate.go b/pkg/client/listers/workflow/v1alpha1/clusterworkflowtemplate.go new file mode 100644 index 000000000000..1494d6108cf8 --- /dev/null +++ b/pkg/client/listers/workflow/v1alpha1/clusterworkflowtemplate.go @@ -0,0 +1,49 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterWorkflowTemplateLister helps list ClusterWorkflowTemplates. +type ClusterWorkflowTemplateLister interface { + // List lists all ClusterWorkflowTemplates in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ClusterWorkflowTemplate, err error) + // Get retrieves the ClusterWorkflowTemplate from the index for a given name. + Get(name string) (*v1alpha1.ClusterWorkflowTemplate, error) + ClusterWorkflowTemplateListerExpansion +} + +// clusterWorkflowTemplateLister implements the ClusterWorkflowTemplateLister interface. +type clusterWorkflowTemplateLister struct { + indexer cache.Indexer +} + +// NewClusterWorkflowTemplateLister returns a new ClusterWorkflowTemplateLister. +func NewClusterWorkflowTemplateLister(indexer cache.Indexer) ClusterWorkflowTemplateLister { + return &clusterWorkflowTemplateLister{indexer: indexer} +} + +// List lists all ClusterWorkflowTemplates in the indexer. +func (s *clusterWorkflowTemplateLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterWorkflowTemplate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterWorkflowTemplate)) + }) + return ret, err +} + +// Get retrieves the ClusterWorkflowTemplate from the index for a given name. +func (s *clusterWorkflowTemplateLister) Get(name string) (*v1alpha1.ClusterWorkflowTemplate, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clusterworkflowtemplate"), name) + } + return obj.(*v1alpha1.ClusterWorkflowTemplate), nil +} diff --git a/pkg/client/listers/workflow/v1alpha1/expansion_generated.go b/pkg/client/listers/workflow/v1alpha1/expansion_generated.go index 948b5db9290d..a57d415bdf9e 100644 --- a/pkg/client/listers/workflow/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/workflow/v1alpha1/expansion_generated.go @@ -2,6 +2,10 @@ package v1alpha1 +// ClusterWorkflowTemplateListerExpansion allows custom methods to be added to +// ClusterWorkflowTemplateLister. +type ClusterWorkflowTemplateListerExpansion interface{} + // CronWorkflowListerExpansion allows custom methods to be added to // CronWorkflowLister. type CronWorkflowListerExpansion interface{} diff --git a/server/apiserver/argoserver.go b/server/apiserver/argoserver.go index 1dabbc29859d..3a76c112dd66 100644 --- a/server/apiserver/argoserver.go +++ b/server/apiserver/argoserver.go @@ -21,6 +21,7 @@ import ( "github.com/argoproj/argo/config" "github.com/argoproj/argo/errors" "github.com/argoproj/argo/persist/sqldb" + clusterwftemplatepkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" cronworkflowpkg "github.com/argoproj/argo/pkg/apiclient/cronworkflow" infopkg "github.com/argoproj/argo/pkg/apiclient/info" workflowpkg "github.com/argoproj/argo/pkg/apiclient/workflow" @@ -30,6 +31,7 @@ import ( "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/server/artifacts" "github.com/argoproj/argo/server/auth" + "github.com/argoproj/argo/server/clusterworkflowtemplate" "github.com/argoproj/argo/server/cronworkflow" "github.com/argoproj/argo/server/info" "github.com/argoproj/argo/server/static" @@ -197,7 +199,7 @@ func (as *argoServer) newGRPCServer(instanceID string, offloadNodeStatusRepo sql workflowtemplatepkg.RegisterWorkflowTemplateServiceServer(grpcServer, workflowtemplate.NewWorkflowTemplateServer()) cronworkflowpkg.RegisterCronWorkflowServiceServer(grpcServer, cronworkflow.NewCronWorkflowServer(instanceID)) workflowarchivepkg.RegisterArchivedWorkflowServiceServer(grpcServer, workflowarchive.NewWorkflowArchiveServer(wfArchive)) - + clusterwftemplatepkg.RegisterClusterWorkflowTemplateServiceServer(grpcServer, clusterworkflowtemplate.NewClusterWorkflowTemplateServer()) return grpcServer } @@ -231,6 +233,8 @@ func (as *argoServer) newHTTPServer(ctx context.Context, port int, artifactServe mustRegisterGWHandler(workflowtemplatepkg.RegisterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(cronworkflowpkg.RegisterCronWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(workflowarchivepkg.RegisterArchivedWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) + mustRegisterGWHandler(clusterwftemplatepkg.RegisterClusterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) + mux.Handle("/api/", gwmux) mux.HandleFunc("/artifacts/", artifactServer.GetArtifact) mux.HandleFunc("/artifacts-by-uid/", artifactServer.GetArtifactByUID) diff --git a/server/clusterworkflowtemplate/cluster_workflow_template_server.go b/server/clusterworkflowtemplate/cluster_workflow_template_server.go new file mode 100644 index 000000000000..a11a9e347723 --- /dev/null +++ b/server/clusterworkflowtemplate/cluster_workflow_template_server.go @@ -0,0 +1,107 @@ +package clusterworkflowtemplate + +import ( + "context" + "fmt" + "sort" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterwftmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/server/auth" + "github.com/argoproj/argo/workflow/templateresolution" + "github.com/argoproj/argo/workflow/validate" +) + +type ClusterWorkflowTemplateServer struct { +} + +func NewClusterWorkflowTemplateServer() clusterwftmplpkg.ClusterWorkflowTemplateServiceServer { + return &ClusterWorkflowTemplateServer{} +} + +func (cwts *ClusterWorkflowTemplateServer) CreateClusterWorkflowTemplate(ctx context.Context, req *clusterwftmplpkg.ClusterWorkflowTemplateCreateRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + wfClient := auth.GetWfClient(ctx) + if req.Template == nil { + return nil, fmt.Errorf("cluster workflow template was not found in the request body") + } + + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + err := validate.ValidateWorkflowTemplate(nil, cwftmplGetter, req.Template) + if err != nil { + return nil, err + } + + return wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates().Create(req.Template) + +} + +func (cwts *ClusterWorkflowTemplateServer) GetClusterWorkflowTemplate(ctx context.Context, req *clusterwftmplpkg.ClusterWorkflowTemplateGetRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + wfClient := auth.GetWfClient(ctx) + + wfTmpl, err := wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates().Get(req.Name, v1.GetOptions{}) + + if err != nil { + return nil, err + } + + return wfTmpl, err +} + +func (cwts *ClusterWorkflowTemplateServer) ListClusterWorkflowTemplates(ctx context.Context, req *clusterwftmplpkg.ClusterWorkflowTemplateListRequest) (*v1alpha1.ClusterWorkflowTemplateList, error) { + wfClient := auth.GetWfClient(ctx) + options := v1.ListOptions{} + if req.ListOptions != nil { + options = *req.ListOptions + } + cwfList, err := wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates().List(options) + if err != nil { + return nil, err + } + + sort.Sort(cwfList.Items) + + return cwfList, nil +} + +func (cwts *ClusterWorkflowTemplateServer) DeleteClusterWorkflowTemplate(ctx context.Context, req *clusterwftmplpkg.ClusterWorkflowTemplateDeleteRequest) (*clusterwftmplpkg.ClusterWorkflowTemplateDeleteResponse, error) { + wfClient := auth.GetWfClient(ctx) + + err := wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates().Delete(req.Name, &v1.DeleteOptions{}) + if err != nil { + return nil, err + } + + return &clusterwftmplpkg.ClusterWorkflowTemplateDeleteResponse{}, nil +} + +func (cwts *ClusterWorkflowTemplateServer) LintClusterWorkflowTemplate(ctx context.Context, req *clusterwftmplpkg.ClusterWorkflowTemplateLintRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + wfClient := auth.GetWfClient(ctx) + + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + err := validate.ValidateWorkflowTemplate(nil, cwftmplGetter, req.Template) + if err != nil { + return nil, err + } + + return req.Template, nil +} + +func (cwts *ClusterWorkflowTemplateServer) UpdateClusterWorkflowTemplate(ctx context.Context, req *clusterwftmplpkg.ClusterWorkflowTemplateUpdateRequest) (*v1alpha1.ClusterWorkflowTemplate, error) { + if req.Template == nil { + return nil, fmt.Errorf("ClusterWorkflowTemplate is not found in Request body") + } + wfClient := auth.GetWfClient(ctx) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + err := validate.ValidateWorkflowTemplate(nil, cwftmplGetter, req.Template) + if err != nil { + return nil, err + } + + res, err := wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates().Update(req.Template) + return res, err +} diff --git a/server/clusterworkflowtemplate/cluster_workflow_template_server_test.go b/server/clusterworkflowtemplate/cluster_workflow_template_server_test.go new file mode 100644 index 000000000000..750a21c399d1 --- /dev/null +++ b/server/clusterworkflowtemplate/cluster_workflow_template_server_test.go @@ -0,0 +1,215 @@ +package clusterworkflowtemplate + +import ( + "context" + "encoding/json" + "testing" + + clusterwftmplpkg "github.com/argoproj/argo/pkg/apiclient/clusterworkflowtemplate" + "github.com/argoproj/argo/server/auth" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes/fake" + + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + wftFake "github.com/argoproj/argo/pkg/client/clientset/versioned/fake" +) + +const cwftStr1 = `{ + "template": { + "apiVersion": "argoproj.io/v1alpha1", + "kind": "ClusterWorkflowTemplate", + "metadata": { + "name": "cluster-workflow-template-whalesay-template" + }, + "spec": { + "arguments": { + "parameters": [ + { + "name": "message", + "value": "Hello Argo" + } + ] + }, + "templates": [ + { + "name": "whalesay-template", + "inputs": { + "parameters": [ + { + "name": "message" + } + ] + }, + "container": { + "image": "docker/whalesay", + "command": [ + "cowsay" + ], + "args": [ + "{{inputs.parameters.message}}" + ] + } + } + ] + } + } +}` + +const cwftStr2 = `{ + "apiVersion": "argoproj.io/v1alpha1", + "kind": "ClusterWorkflowTemplate", + "metadata": { + "name": "cluster-workflow-template-whalesay-template2" + }, + "spec": { + "arguments": { + "parameters": [ + { + "name": "message", + "value": "Hello Argo" + } + ] + }, + "templates": [ + { + "name": "whalesay-template", + "inputs": { + "parameters": [ + { + "name": "message", + "value": "Hello Argo" + } + ] + }, + "container": { + "image": "docker/whalesay", + "command": [ + "cowsay" + ], + "args": [ + "{{inputs.parameters.message}}" + ] + } + } + ] + } +}` + +const cwftStr3 = `{ + "apiVersion": "argoproj.io/v1alpha1", + "kind": "ClusterWorkflowTemplate", + "metadata": { + "name": "cluster-workflow-template-whalesay-template3" + }, + "spec": { + "arguments": { + "parameters": [ + { + "name": "message", + "value": "Hello Argo" + } + ] + }, + "templates": [ + { + "name": "whalesay-template", + "inputs": { + "parameters": [ + { + "name": "message" + } + ] + }, + "container": { + "image": "docker/whalesay", + "command": [ + "cowsay" + ], + "args": [ + "{{inputs.parameters.message}}" + ] + } + } + ] + } +}` + +func getClusterWorkflowTemplateServer() (clusterwftmplpkg.ClusterWorkflowTemplateServiceServer, context.Context) { + var cwftObj1, cwftObj2 v1alpha1.ClusterWorkflowTemplate + err := json.Unmarshal([]byte(cwftStr2), &cwftObj1) + if err != nil { + panic(err) + } + err = json.Unmarshal([]byte(cwftStr3), &cwftObj2) + if err != nil { + panic(err) + } + kubeClientSet := fake.NewSimpleClientset() + wfClientset := wftFake.NewSimpleClientset(&cwftObj1, &cwftObj2) + ctx := context.WithValue(context.WithValue(context.TODO(), auth.WfKey, wfClientset), auth.KubeKey, kubeClientSet) + return NewClusterWorkflowTemplateServer(), ctx +} + +func TestWorkflowTemplateServer_CreateClusterWorkflowTemplate(t *testing.T) { + server, ctx := getClusterWorkflowTemplateServer() + var cwftReq clusterwftmplpkg.ClusterWorkflowTemplateCreateRequest + err := json.Unmarshal([]byte(cwftStr1), &cwftReq) + if err != nil { + panic(err) + } + cwftRsp, err := server.CreateClusterWorkflowTemplate(ctx, &cwftReq) + if assert.NoError(t, err) { + assert.NotNil(t, cwftRsp) + } +} + +func TestWorkflowTemplateServer_GetClusterWorkflowTemplate(t *testing.T) { + server, ctx := getClusterWorkflowTemplateServer() + cwftReq := clusterwftmplpkg.ClusterWorkflowTemplateGetRequest{ + Name: "cluster-workflow-template-whalesay-template2", + } + cwftRsp, err := server.GetClusterWorkflowTemplate(ctx, &cwftReq) + if assert.NoError(t, err) { + assert.NotNil(t, cwftRsp) + assert.Equal(t, "cluster-workflow-template-whalesay-template2", cwftRsp.Name) + } +} + +func TestWorkflowTemplateServer_ListClusterWorkflowTemplates(t *testing.T) { + server, ctx := getClusterWorkflowTemplateServer() + cwftReq := clusterwftmplpkg.ClusterWorkflowTemplateListRequest{} + cwftRsp, err := server.ListClusterWorkflowTemplates(ctx, &cwftReq) + if assert.NoError(t, err) { + assert.Len(t, cwftRsp.Items, 2) + } +} + +func TestWorkflowTemplateServer_DeleteClusterWorkflowTemplate(t *testing.T) { + server, ctx := getClusterWorkflowTemplateServer() + cwftReq := clusterwftmplpkg.ClusterWorkflowTemplateDeleteRequest{ + Name: "cluster-workflow-template-whalesay-template2", + } + _, err := server.DeleteClusterWorkflowTemplate(ctx, &cwftReq) + assert.NoError(t, err) + +} + +func TestWorkflowTemplateServer_UpdateClusterWorkflowTemplate(t *testing.T) { + server, ctx := getClusterWorkflowTemplateServer() + var cwftObj1 v1alpha1.ClusterWorkflowTemplate + err := json.Unmarshal([]byte(cwftStr2), &cwftObj1) + if err != nil { + panic(err) + } + cwftObj1.Spec.Templates[0].Container.Image = "alpine:latest" + cwftReq := clusterwftmplpkg.ClusterWorkflowTemplateUpdateRequest{ + Name: "cluster-workflow-template-whalesay-template2", + Template: &cwftObj1, + } + cwftRsp, err := server.UpdateClusterWorkflowTemplate(ctx, &cwftReq) + + if assert.NoError(t, err) { + assert.Equal(t, "alpine:latest", cwftRsp.Spec.Templates[0].Container.Image) + } +} diff --git a/server/cronworkflow/cron_workflow_server.go b/server/cronworkflow/cron_workflow_server.go index 1c3568de2d03..aa4d52941c12 100644 --- a/server/cronworkflow/cron_workflow_server.go +++ b/server/cronworkflow/cron_workflow_server.go @@ -26,7 +26,8 @@ func NewCronWorkflowServer(instanceID string) cronworkflowpkg.CronWorkflowServic func (c *cronWorkflowServiceServer) LintCronWorkflow(ctx context.Context, req *cronworkflowpkg.LintCronWorkflowRequest) (*v1alpha1.CronWorkflow, error) { wfClient := auth.GetWfClient(ctx) wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) - err := validate.ValidateCronWorkflow(wftmplGetter, req.CronWorkflow) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + err := validate.ValidateCronWorkflow(wftmplGetter, cwftmplGetter, req.CronWorkflow) if err != nil { return nil, err } @@ -49,8 +50,9 @@ func (c *cronWorkflowServiceServer) CreateCronWorkflow(ctx context.Context, req } wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) - err := validate.ValidateCronWorkflow(wftmplGetter, req.CronWorkflow) + err := validate.ValidateCronWorkflow(wftmplGetter, cwftmplGetter, req.CronWorkflow) if err != nil { return nil, err } diff --git a/server/workflow/workflow_server.go b/server/workflow/workflow_server.go index 5f22f7721540..2b30564eb160 100644 --- a/server/workflow/workflow_server.go +++ b/server/workflow/workflow_server.go @@ -58,8 +58,10 @@ func (s *workflowServer) CreateWorkflow(ctx context.Context, req *workflowpkg.Wo } wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + _, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, req.Workflow, validate.ValidateOpts{}) - _, err := validate.ValidateWorkflow(wftmplGetter, req.Workflow, validate.ValidateOpts{}) if err != nil { return nil, err } @@ -325,8 +327,10 @@ func (s *workflowServer) LintWorkflow(ctx context.Context, req *workflowpkg.Work wfClient := auth.GetWfClient(ctx) wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + _, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, req.Workflow, validate.ValidateOpts{Lint: true}) - _, err := validate.ValidateWorkflow(wftmplGetter, req.Workflow, validate.ValidateOpts{Lint: true}) if err != nil { return nil, err } diff --git a/server/workflowtemplate/workflow_template_server.go b/server/workflowtemplate/workflow_template_server.go index 6f11d42f06cf..ac334b5621a9 100644 --- a/server/workflowtemplate/workflow_template_server.go +++ b/server/workflowtemplate/workflow_template_server.go @@ -28,7 +28,9 @@ func (wts *WorkflowTemplateServer) CreateWorkflowTemplate(ctx context.Context, r } wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) - err := validate.ValidateWorkflowTemplate(wftmplGetter, req.Template) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + err := validate.ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, req.Template) if err != nil { return nil, err } @@ -81,7 +83,9 @@ func (wts *WorkflowTemplateServer) LintWorkflowTemplate(ctx context.Context, req wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) - err := validate.ValidateWorkflowTemplate(wftmplGetter, req.Template) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + err := validate.ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, req.Template) if err != nil { return nil, err } @@ -96,7 +100,9 @@ func (wts *WorkflowTemplateServer) UpdateWorkflowTemplate(ctx context.Context, r wfClient := auth.GetWfClient(ctx) wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(req.Namespace)) - err := validate.ValidateWorkflowTemplate(wftmplGetter, req.Template) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + err := validate.ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, req.Template) if err != nil { return nil, err } diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go index 9b58ee7fdaa0..a3707b022dfe 100644 --- a/test/e2e/cli_test.go +++ b/test/e2e/cli_test.go @@ -649,6 +649,41 @@ func (s *CLISuite) TestCron() { }) } +func (s *CLISuite) TestClusterTemplateCommands() { + s.Run("Create", func() { + s.Given(). + RunCli([]string{"cluster-template", "create", "smoke/cluster-workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "cluster-workflow-template-whalesay-template") + } + }) + }) + s.Run("Get", func() { + s.Given(). + RunCli([]string{"cluster-template", "get", "cluster-workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "cluster-workflow-template-whalesay-template") + } + }) + }) + s.Run("list", func() { + s.Given(). + RunCli([]string{"cluster-template", "list"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "cluster-workflow-template-whalesay-template") + } + }) + }) + s.Run("Delete", func() { + s.Given(). + RunCli([]string{"cluster-template", "delete", "cluster-workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "cluster-workflow-template-whalesay-template") + } + }) + }) +} + func TestCLISuite(t *testing.T) { suite.Run(t, new(CLISuite)) } diff --git a/test/e2e/cluster_workflow_template_test.go b/test/e2e/cluster_workflow_template_test.go new file mode 100644 index 000000000000..e3e7d87f0019 --- /dev/null +++ b/test/e2e/cluster_workflow_template_test.go @@ -0,0 +1,69 @@ +package e2e + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/test/e2e/fixtures" +) + +type ClusterWorkflowTemplateSuite struct { + fixtures.E2ESuite +} + +func (s *ClusterWorkflowTemplateSuite) TestSubmitClusterWorkflowTemplate() { + s.Given(). + ClusterWorkflowTemplate("@smoke/cluster-workflow-template-whalesay-template.yaml"). + WorkflowName("my-wf"). + When(). + CreateClusterWorkflowTemplates(). + RunCli([]string{"submit", "--from", "clusterworkflowtemplate/cluster-workflow-template-whalesay-template", "--name", "my-wf"}, func(t *testing.T, output string, err error) { + assert.NoError(t, err) + }). + WaitForWorkflow(15 * time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, status.Phase, v1alpha1.NodeSucceeded) + }) +} + +func (s *ClusterWorkflowTemplateSuite) TestNestedClusterWorkflowTemplate() { + s.Given().WorkflowTemplate("@smoke/cluster-workflow-template-whalesay-template.yaml"). + WorkflowTemplate("@testdata/cluster-workflow-template-nested-template.yaml"). + Workflow(`apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: workflow-template-nested- + labels: + argo-e2e: true +spec: + entrypoint: whalesay + templates: + - name: whalesay + inputs: + parameters: + - name: message + value: hello from nested + templateRef: + name: cluster-workflow-template-nested-template + template: whalesay-template + clusterscope: true +`).When(). + CreateClusterWorkflowTemplates(). + SubmitWorkflow(). + WaitForWorkflow(30 * time.Second). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { + assert.Equal(t, status.Phase, v1alpha1.NodeSucceeded) + }) + +} + +func TestClusterWorkflowTemplateSuite(t *testing.T) { + suite.Run(t, new(ClusterWorkflowTemplateSuite)) +} diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go index 87fb10fcb5f2..2f84945b793d 100644 --- a/test/e2e/fixtures/e2e_suite.go +++ b/test/e2e/fixtures/e2e_suite.go @@ -36,13 +36,14 @@ func init() { type E2ESuite struct { suite.Suite - Diagnostics *Diagnostics - Persistence *Persistence - RestConfig *rest.Config - wfClient v1alpha1.WorkflowInterface - wfTemplateClient v1alpha1.WorkflowTemplateInterface - cronClient v1alpha1.CronWorkflowInterface - KubeClient kubernetes.Interface + Diagnostics *Diagnostics + Persistence *Persistence + RestConfig *rest.Config + wfClient v1alpha1.WorkflowInterface + wfTemplateClient v1alpha1.WorkflowTemplateInterface + cwfTemplateClient v1alpha1.ClusterWorkflowTemplateInterface + cronClient v1alpha1.CronWorkflowInterface + KubeClient kubernetes.Interface } func (s *E2ESuite) SetupSuite() { @@ -60,6 +61,7 @@ func (s *E2ESuite) SetupSuite() { s.wfTemplateClient = versioned.NewForConfigOrDie(s.RestConfig).ArgoprojV1alpha1().WorkflowTemplates(Namespace) s.cronClient = versioned.NewForConfigOrDie(s.RestConfig).ArgoprojV1alpha1().CronWorkflows(Namespace) s.Persistence = newPersistence(s.KubeClient) + s.cwfTemplateClient = versioned.NewForConfigOrDie(s.RestConfig).ArgoprojV1alpha1().ClusterWorkflowTemplates() } func (s *E2ESuite) TearDownSuite() { @@ -198,6 +200,19 @@ func (s *E2ESuite) DeleteResources(label string) { } } + // delete all cluster workflow templates + cwfTmpl, err := s.cwfTemplateClient.List(metav1.ListOptions{LabelSelector: label}) + if err != nil { + panic(err) + } + for _, cwfTmpl := range cwfTmpl.Items { + log.WithField("template", cwfTmpl.Name).Info("Deleting cluster workflow template") + err = s.cwfTemplateClient.Delete(cwfTmpl.Name, nil) + if err != nil { + panic(err) + } + } + // Delete all resourcequotas rqList, err := s.KubeClient.CoreV1().ResourceQuotas(Namespace).List(metav1.ListOptions{LabelSelector: label}) if err != nil { @@ -336,6 +351,7 @@ func (s *E2ESuite) Given() *Given { diagnostics: s.Diagnostics, client: s.wfClient, wfTemplateClient: s.wfTemplateClient, + cwfTemplateClient: s.cwfTemplateClient, cronClient: s.cronClient, offloadNodeStatusRepo: s.Persistence.offloadNodeStatusRepo, kubeClient: s.KubeClient, diff --git a/test/e2e/fixtures/given.go b/test/e2e/fixtures/given.go index 46141fc3fd5c..f854cab15f4e 100644 --- a/test/e2e/fixtures/given.go +++ b/test/e2e/fixtures/given.go @@ -19,10 +19,12 @@ type Given struct { diagnostics *Diagnostics client v1alpha1.WorkflowInterface wfTemplateClient v1alpha1.WorkflowTemplateInterface + cwfTemplateClient v1alpha1.ClusterWorkflowTemplateInterface cronClient v1alpha1.CronWorkflowInterface offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo wf *wfv1.Workflow wfTemplates []*wfv1.WorkflowTemplate + cwfTemplates []*wfv1.ClusterWorkflowTemplate cronWf *wfv1.CronWorkflow workflowName string kubeClient kubernetes.Interface @@ -159,15 +161,53 @@ func (g *Given) RunCli(args []string, block func(t *testing.T, output string, er return g } +func (g *Given) ClusterWorkflowTemplate(text string) *Given { + var file string + if strings.HasPrefix(text, "@") { + file = strings.TrimPrefix(text, "@") + } else { + f, err := ioutil.TempFile("", "argo_e2e") + if err != nil { + g.t.Fatal(err) + } + _, err = f.Write([]byte(text)) + if err != nil { + g.t.Fatal(err) + } + err = f.Close() + if err != nil { + g.t.Fatal(err) + } + file = f.Name() + } + // read the file in + { + file, err := ioutil.ReadFile(file) + if err != nil { + g.t.Fatal(err) + } + cwfTemplate := &wfv1.ClusterWorkflowTemplate{} + err = yaml.Unmarshal(file, cwfTemplate) + if err != nil { + g.t.Fatal(err) + } + g.checkLabels(cwfTemplate.ObjectMeta) + g.cwfTemplates = append(g.cwfTemplates, cwfTemplate) + } + return g +} + func (g *Given) When() *When { return &When{ t: g.t, diagnostics: g.diagnostics, wf: g.wf, wfTemplates: g.wfTemplates, + cwfTemplates: g.cwfTemplates, cronWf: g.cronWf, client: g.client, wfTemplateClient: g.wfTemplateClient, + cwfTemplateClient: g.cwfTemplateClient, cronClient: g.cronClient, offloadNodeStatusRepo: g.offloadNodeStatusRepo, workflowName: g.workflowName, diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 4cc862d51cd1..6c756f4fb9fe 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -25,9 +25,11 @@ type When struct { diagnostics *Diagnostics wf *wfv1.Workflow wfTemplates []*wfv1.WorkflowTemplate + cwfTemplates []*wfv1.ClusterWorkflowTemplate cronWf *wfv1.CronWorkflow client v1alpha1.WorkflowInterface wfTemplateClient v1alpha1.WorkflowTemplateInterface + cwfTemplateClient v1alpha1.ClusterWorkflowTemplateInterface cronClient v1alpha1.CronWorkflowInterface offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo workflowName string @@ -69,6 +71,23 @@ func (w *When) CreateWorkflowTemplates() *When { return w } +func (w *When) CreateClusterWorkflowTemplates() *When { + if len(w.cwfTemplates) == 0 { + w.t.Fatal("No cluster workflow templates to create") + } + for _, cwfTmpl := range w.cwfTemplates { + log.WithField("template", cwfTmpl.Name).Info("Creating cluster workflow template") + wfTmpl, err := w.cwfTemplateClient.Create(cwfTmpl) + if err != nil { + w.t.Fatal(err) + } else { + w.wfTemplateNames = append(w.wfTemplateNames, wfTmpl.Name) + } + log.WithField("template", wfTmpl.Name).Info("Cluster Workflow template created") + } + return w +} + func (w *When) CreateCronWorkflow() *When { if w.cronWf == nil { w.t.Fatal("No cron workflow to create") @@ -214,6 +233,7 @@ func (w *When) Given() *Given { diagnostics: w.diagnostics, client: w.client, wfTemplateClient: w.wfTemplateClient, + cwfTemplateClient: w.cwfTemplateClient, cronClient: w.cronClient, offloadNodeStatusRepo: w.offloadNodeStatusRepo, wf: w.wf, diff --git a/test/e2e/manifests/mixins/cluster-rbac.yaml b/test/e2e/manifests/mixins/cluster-rbac.yaml new file mode 100644 index 000000000000..8e7823e26d9b --- /dev/null +++ b/test/e2e/manifests/mixins/cluster-rbac.yaml @@ -0,0 +1,40 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-clusterworkflowtemplate-role +rules: + - apiGroups: + - argoproj.io + resources: + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: + - kind: ServiceAccount + name: argo-server + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: + - kind: ServiceAccount + name: argo + namespace: argo \ No newline at end of file diff --git a/test/e2e/manifests/mixins/workflow-controller-deployment.yaml b/test/e2e/manifests/mixins/workflow-controller-deployment.yaml index 31070d649c10..70faac5a7872 100644 --- a/test/e2e/manifests/mixins/workflow-controller-deployment.yaml +++ b/test/e2e/manifests/mixins/workflow-controller-deployment.yaml @@ -23,4 +23,4 @@ spec: - name: WORKFLOW_GC_PERIOD value: 30s - name: UPPERIO_DB_DEBUG - value: "1" \ No newline at end of file + value: "1" diff --git a/test/e2e/manifests/mysql.yaml b/test/e2e/manifests/mysql.yaml index 6604fb227f8f..29b4e7e9539f 100644 --- a/test/e2e/manifests/mysql.yaml +++ b/test/e2e/manifests/mysql.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: @@ -230,6 +245,21 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-clusterworkflowtemplate-role +rules: +- apiGroups: + - argoproj.io + resources: + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: argo-binding @@ -265,6 +295,32 @@ subjects: - kind: ServiceAccount name: default --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo +--- apiVersion: v1 data: artifactRepository: | diff --git a/test/e2e/manifests/mysql/kustomization.yaml b/test/e2e/manifests/mysql/kustomization.yaml index cc4b5453c6b3..9b16f0d0494b 100644 --- a/test/e2e/manifests/mysql/kustomization.yaml +++ b/test/e2e/manifests/mysql/kustomization.yaml @@ -3,7 +3,8 @@ kind: Kustomization bases: - ../../../../manifests/quick-start/mysql - +resources: + - ../mixins/cluster-rbac.yaml patchesStrategicMerge: - ../mixins/argo-server-deployment.yaml - ../mixins/workflow-controller-configmap.yaml diff --git a/test/e2e/manifests/no-db.yaml b/test/e2e/manifests/no-db.yaml index 14593383464a..292bdbba72c9 100644 --- a/test/e2e/manifests/no-db.yaml +++ b/test/e2e/manifests/no-db.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: @@ -230,6 +245,21 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-clusterworkflowtemplate-role +rules: +- apiGroups: + - argoproj.io + resources: + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: argo-binding @@ -265,6 +295,32 @@ subjects: - kind: ServiceAccount name: default --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo +--- apiVersion: v1 data: artifactRepository: | diff --git a/test/e2e/manifests/no-db/kustomization.yaml b/test/e2e/manifests/no-db/kustomization.yaml index 1c86142e6105..307475fd4e20 100644 --- a/test/e2e/manifests/no-db/kustomization.yaml +++ b/test/e2e/manifests/no-db/kustomization.yaml @@ -1,9 +1,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization - bases: - ../../../../manifests/quick-start/no-db - +resources: + - ../mixins/cluster-rbac.yaml patchesStrategicMerge: - ../mixins/argo-server-deployment.yaml - ../mixins/workflow-controller-configmap.yaml diff --git a/test/e2e/manifests/postgres.yaml b/test/e2e/manifests/postgres.yaml index f21a82cf57e7..01d7033e1ebc 100644 --- a/test/e2e/manifests/postgres.yaml +++ b/test/e2e/manifests/postgres.yaml @@ -1,6 +1,21 @@ # This is an auto-generated file. DO NOT EDIT apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + scope: Cluster + version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: cronworkflows.argoproj.io spec: @@ -230,6 +245,21 @@ rules: - watch --- apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-clusterworkflowtemplate-role +rules: +- apiGroups: + - argoproj.io + resources: + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: argo-binding @@ -265,6 +295,32 @@ subjects: - kind: ServiceAccount name: default --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-server-clusterworkflowtemplate-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-clusterworkflowtemplate-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo +--- apiVersion: v1 data: artifactRepository: | diff --git a/test/e2e/manifests/postgres/kustomization.yaml b/test/e2e/manifests/postgres/kustomization.yaml index 963fea890750..53dba34e976e 100644 --- a/test/e2e/manifests/postgres/kustomization.yaml +++ b/test/e2e/manifests/postgres/kustomization.yaml @@ -3,7 +3,8 @@ kind: Kustomization bases: - ../../../../manifests/quick-start/postgres - +resources: + - ../mixins/cluster-rbac.yaml patchesStrategicMerge: - ../mixins/argo-server-deployment.yaml - ../mixins/workflow-controller-configmap.yaml diff --git a/test/e2e/smoke/cluster-workflow-template-whalesay-template.yaml b/test/e2e/smoke/cluster-workflow-template-whalesay-template.yaml new file mode 100644 index 000000000000..ee2219632f3c --- /dev/null +++ b/test/e2e/smoke/cluster-workflow-template-whalesay-template.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-whalesay-template + labels: + argo-e2e: true +spec: + entrypoint: whalesay-template + arguments: + parameters: + - name: message + value: hello world + templates: + - name: whalesay-template + inputs: + parameters: + - name: message + container: + image: cowsay:v1 + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + imagePullPolicy: IfNotPresent diff --git a/test/e2e/smoke_test.go b/test/e2e/smoke_test.go index 8e82a0c19280..fc5f4d9de4fb 100644 --- a/test/e2e/smoke_test.go +++ b/test/e2e/smoke_test.go @@ -35,7 +35,7 @@ func (s *SmokeSuite) TestArtifactPassing() { Workflow("@smoke/artifact-passing.yaml"). When(). SubmitWorkflow(). - WaitForWorkflow(30 * time.Second). + WaitForWorkflow(45 * time.Second). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.NodeSucceeded, status.Phase) diff --git a/test/e2e/testdata/cluster-workflow-template-nested-template.yaml b/test/e2e/testdata/cluster-workflow-template-nested-template.yaml new file mode 100644 index 000000000000..532ed0d71b9a --- /dev/null +++ b/test/e2e/testdata/cluster-workflow-template-nested-template.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: cluster-workflow-template-nested-template + labels: + argo-e2e: true +spec: + templates: + - name: whalesay-inner-template + templateRef: + name: workflow-template-whalesay-template + template: whalesay-template + inputs: + parameters: + - name: message + - name: whalesay-template + template: whalesay-inner-template + inputs: + parameters: + - name: message diff --git a/workflow/common/common.go b/workflow/common/common.go index 62773dab489e..01449a2f7f9d 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -57,6 +57,8 @@ const ( LabelKeyCronWorkflow = workflow.WorkflowFullName + "/cron-workflow" // LabelKeyWorkflowTemplate is a label applied to Workflows that are submitted from Workflowtemplate LabelKeyWorkflowTemplate = workflow.WorkflowFullName + "/workflow-template" + // LabelKeyWorkflowTemplate is a label applied to Workflows that are submitted from ClusterWorkflowtemplate + LabelKeyClusterWorkflowTemplate = workflow.WorkflowFullName + "/cluster-workflow-template" // LabelKeyOnExit is a label applied to Pods that are run from onExit nodes, so that they are not shut down when stopping a Workflow LabelKeyOnExit = workflow.WorkflowFullName + "/on-exit" diff --git a/workflow/common/convert.go b/workflow/common/convert.go index c3275ae67696..68161507b51f 100644 --- a/workflow/common/convert.go +++ b/workflow/common/convert.go @@ -40,6 +40,17 @@ func ConvertWorkflowTemplateToWorkflow(template *wfv1.WorkflowTemplate) *wfv1.Wo return wf } +func ConvertClusterWorkflowTemplateToWorkflow(template *wfv1.ClusterWorkflowTemplate) *wfv1.Workflow { + wf := toWorkflow(template.TypeMeta, template.ObjectMeta, template.Spec.WorkflowSpec) + wfLabel := wf.ObjectMeta.GetLabels() + if wfLabel == nil { + wf.Labels = make(map[string]string) + } + wf.Labels[LabelKeyClusterWorkflowTemplate] = template.ObjectMeta.Name + + return wf +} + func toWorkflow(typeMeta metav1.TypeMeta, objectMeta metav1.ObjectMeta, spec wfv1.WorkflowSpec) *wfv1.Workflow { wf := &wfv1.Workflow{ TypeMeta: metav1.TypeMeta{ diff --git a/workflow/common/util.go b/workflow/common/util.go index 9f51b43a96b9..e2fea48ab695 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -668,3 +668,31 @@ func GetTemplateHolderString(tmplHolder wfv1.TemplateHolder) string { return fmt.Sprintf("%T (%s)", tmplHolder, tmplName) } } + +// SplitClusterWorkflowTemplateYAMLFile is a helper to split a body into multiple cluster workflow template objects +func SplitClusterWorkflowTemplateYAMLFile(body []byte, strict bool) ([]wfv1.ClusterWorkflowTemplate, error) { + manifestsStrings := yamlSeparator.Split(string(body), -1) + manifests := make([]wfv1.ClusterWorkflowTemplate, 0) + for _, manifestStr := range manifestsStrings { + if strings.TrimSpace(manifestStr) == "" { + continue + } + var cwftmpl wfv1.ClusterWorkflowTemplate + var opts []yaml.JSONOpt + if strict { + opts = append(opts, yaml.DisallowUnknownFields) // nolint + } + err := yaml.Unmarshal([]byte(manifestStr), &cwftmpl, opts...) + if cwftmpl.Kind != "" && cwftmpl.Kind != workflow.ClusterWorkflowTemplateKind { + log.Warnf("%s is not a cluster workflow template", cwftmpl.Kind) + // If we get here, it was a k8s manifest which was not of type 'WorkflowTemplate' + // We ignore these since we only care about WorkflowTemplate manifests. + continue + } + if err != nil { + return nil, errors.New(errors.CodeBadRequest, err.Error()) + } + manifests = append(manifests, cwftmpl) + } + return manifests, nil +} diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index eb94f6a80860..e91412c4c494 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -70,6 +70,7 @@ type WorkflowController struct { // only complete (i.e. not running) workflows completedWfInformer cache.SharedIndexInformer wftmplInformer wfextvv1alpha1.WorkflowTemplateInformer + cwftmplInformer wfextvv1alpha1.ClusterWorkflowTemplateInformer podInformer cache.SharedIndexInformer wfQueue workqueue.RateLimitingInterface podQueue workqueue.RateLimitingInterface @@ -83,10 +84,11 @@ type WorkflowController struct { } const ( - workflowResyncPeriod = 20 * time.Minute - workflowTemplateResyncPeriod = 20 * time.Minute - workflowMetricsResyncPeriod = 1 * time.Minute - podResyncPeriod = 30 * time.Minute + workflowResyncPeriod = 20 * time.Minute + workflowTemplateResyncPeriod = 20 * time.Minute + workflowMetricsResyncPeriod = 1 * time.Minute + podResyncPeriod = 30 * time.Minute + clusterWorkflowTemplateResyncPeriod = 20 * time.Minute ) // NewWorkflowController instantiates a new WorkflowController @@ -169,6 +171,7 @@ func (wfc *WorkflowController) Run(ctx context.Context, wfWorkers, podWorkers in wfc.incompleteWfInformer = util.NewWorkflowInformer(wfc.restConfig, wfc.GetManagedNamespace(), workflowResyncPeriod, wfc.incompleteWorkflowTweakListOptions) wfc.completedWfInformer = util.NewWorkflowInformer(wfc.restConfig, wfc.GetManagedNamespace(), workflowResyncPeriod, wfc.completedWorkflowTweakListOptions) wfc.wftmplInformer = wfc.newWorkflowTemplateInformer() + wfc.cwftmplInformer = wfc.newClusterWorkflowTemplateInformer() wfc.addWorkflowInformerHandler() wfc.podInformer = wfc.newPodInformer() @@ -177,13 +180,14 @@ func (wfc *WorkflowController) Run(ctx context.Context, wfWorkers, podWorkers in go wfc.incompleteWfInformer.Run(ctx.Done()) go wfc.completedWfInformer.Run(ctx.Done()) go wfc.wftmplInformer.Informer().Run(ctx.Done()) + go wfc.cwftmplInformer.Informer().Run(ctx.Done()) go wfc.podInformer.Run(ctx.Done()) go wfc.podLabeler(ctx.Done()) go wfc.podGarbageCollector(ctx.Done()) go wfc.periodicWorkflowGarbageCollector(ctx.Done()) // Wait for all involved caches to be synced, before processing items from the queue is started - for _, informer := range []cache.SharedIndexInformer{wfc.incompleteWfInformer, wfc.wftmplInformer.Informer(), wfc.podInformer} { + for _, informer := range []cache.SharedIndexInformer{wfc.incompleteWfInformer, wfc.wftmplInformer.Informer(), wfc.cwftmplInformer.Informer(), wfc.podInformer} { if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { log.Error("Timed out waiting for caches to sync") return @@ -649,6 +653,10 @@ func (wfc *WorkflowController) newWorkflowTemplateInformer() wfextvv1alpha1.Work return wfextv.NewSharedInformerFactoryWithOptions(wfc.wfclientset, workflowTemplateResyncPeriod, wfextv.WithNamespace(wfc.GetManagedNamespace())).Argoproj().V1alpha1().WorkflowTemplates() } +func (wfc *WorkflowController) newClusterWorkflowTemplateInformer() wfextvv1alpha1.ClusterWorkflowTemplateInformer { + return wfextv.NewSharedInformerFactoryWithOptions(wfc.wfclientset, clusterWorkflowTemplateResyncPeriod).Argoproj().V1alpha1().ClusterWorkflowTemplates() +} + func (wfc *WorkflowController) GetManagedNamespace() string { if wfc.managedNamespace != "" { return wfc.managedNamespace diff --git a/workflow/controller/controller_test.go b/workflow/controller/controller_test.go index 18fd4290d1a5..74afab0fb934 100644 --- a/workflow/controller/controller_test.go +++ b/workflow/controller/controller_test.go @@ -100,22 +100,28 @@ func newController() *WorkflowController { wfclientset := fakewfclientset.NewSimpleClientset() informerFactory := wfextv.NewSharedInformerFactory(wfclientset, 10*time.Minute) wftmplInformer := informerFactory.Argoproj().V1alpha1().WorkflowTemplates() + cwftmplInformer := informerFactory.Argoproj().V1alpha1().ClusterWorkflowTemplates() ctx := context.Background() go wftmplInformer.Informer().Run(ctx.Done()) + go cwftmplInformer.Informer().Run(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), wftmplInformer.Informer().HasSynced) { panic("Timed out waiting for caches to sync") } + if !cache.WaitForCacheSync(ctx.Done(), cwftmplInformer.Informer().HasSynced) { + panic("Timed out waiting for caches to sync") + } return &WorkflowController{ Config: config.Config{ ExecutorImage: "executor:latest", }, - kubeclientset: fake.NewSimpleClientset(), - wfclientset: wfclientset, - completedPods: make(chan string, 512), - wftmplInformer: wftmplInformer, - wfQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - wfArchive: sqldb.NullWorkflowArchive, - Metrics: make(map[string]prometheus.Metric), + kubeclientset: fake.NewSimpleClientset(), + wfclientset: wfclientset, + completedPods: make(chan string, 512), + wftmplInformer: wftmplInformer, + cwftmplInformer: cwftmplInformer, + wfQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + wfArchive: sqldb.NullWorkflowArchive, + Metrics: make(map[string]prometheus.Metric), } } @@ -211,6 +217,15 @@ func unmarshalWFTmpl(yamlStr string) *wfv1.WorkflowTemplate { return &wftmpl } +func unmarshalCWFTmpl(yamlStr string) *wfv1.ClusterWorkflowTemplate { + var cwftmpl wfv1.ClusterWorkflowTemplate + err := yaml.Unmarshal([]byte(yamlStr), &cwftmpl) + if err != nil { + panic(err) + } + return &cwftmpl +} + // makePodsPhase acts like a pod controller and simulates the transition of pods transitioning into a specified state func makePodsPhase(t *testing.T, phase apiv1.PodPhase, kubeclientset kubernetes.Interface, namespace string) { podcs := kubeclientset.CoreV1().Pods(namespace) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 5654eda8ec60..395ea0eb2219 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -192,7 +192,10 @@ func (woc *wfOperationCtx) operate() { woc.auditLogger.LogWorkflowEvent(woc.wf, argo.EventInfo{Type: apiv1.EventTypeNormal, Reason: argo.EventReasonWorkflowRunning}, "Workflow Running") validateOpts := validate.ValidateOpts{ContainerRuntimeExecutor: woc.controller.GetContainerRuntimeExecutor()} wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().WorkflowTemplates(woc.wf.Namespace)) - wfConditions, err := validate.ValidateWorkflow(wftmplGetter, woc.wf, validateOpts) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + wfConditions, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, woc.wf, validateOpts) + if err != nil { msg := fmt.Sprintf("invalid spec: %s", err.Error()) woc.markWorkflowFailed(msg) @@ -2335,10 +2338,8 @@ func (woc *wfOperationCtx) substituteParamsInVolumes(params map[string]string) e // createTemplateContext creates a new template context. func (woc *wfOperationCtx) createTemplateContext(templateScope string) (*templateresolution.Context, error) { - ctx := templateresolution.NewContext(woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace), woc.wf, woc) + ctx := templateresolution.NewContext(woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace), woc.controller.cwftmplInformer.Lister(), woc.wf, woc) if templateScope != "" { - fmt.Printf("templateScope: %s\n", templateScope) - // ctx = ctx.WithLazyWorkflowTemplate(woc.wf.Namespace, templateScope) return ctx.WithWorkflowTemplate(templateScope) } return ctx, nil diff --git a/workflow/controller/operator_template_scope_test.go b/workflow/controller/operator_template_scope_test.go index f8d72cb6ebbb..1caaad3a779b 100644 --- a/workflow/controller/operator_template_scope_test.go +++ b/workflow/controller/operator_template_scope_test.go @@ -96,31 +96,31 @@ func TestTemplateScope(t *testing.T) { node = findNodeByName(wf.Status.Nodes, "test-template-scope[0]") if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0]") { assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) - assert.Equal(t, "test-template-scope-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].hello") if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0].hello") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].other-wftmpl") if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl") { assert.Equal(t, wfv1.NodeTypeSteps, node.Type) - assert.Equal(t, "test-template-scope-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].other-wftmpl[0]") if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0]") { assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) - assert.Equal(t, "test-template-scope-2", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].other-wftmpl[0].hello") if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0].hello") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-2", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope) } } @@ -192,25 +192,25 @@ func TestTemplateScopeWithParam(t *testing.T) { node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0]") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0]") { assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) - assert.Equal(t, "test-template-scope-with-param-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].print-string(0:x)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(0:x)") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-with-param-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].print-string(1:y)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(1:y)") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-with-param-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-with-param[0].print-string(2:z)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0].print-string(2:z)") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-with-param-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-with-param-1", node.TemplateScope) } } @@ -286,37 +286,37 @@ func TestTemplateScopeNestedStepsWithParams(t *testing.T) { node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0]") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-with-param[0]") { assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) - assert.Equal(t, "test-template-scope-nested-steps-with-params-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].main") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main") { assert.Equal(t, wfv1.NodeTypeSteps, node.Type) - assert.Equal(t, "test-template-scope-nested-steps-with-params-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].main[0]") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0]") { assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) - assert.Equal(t, "test-template-scope-nested-steps-with-params-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].main[0].print-string(0:x)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(0:x)") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-nested-steps-with-params-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].main[0].print-string(1:y)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(1:y)") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-nested-steps-with-params-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-nested-steps-with-params[0].main[0].print-string(2:z)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-nested-steps-with-params[0].main[0].print-string(2:z)") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-nested-steps-with-params-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-nested-steps-with-params-1", node.TemplateScope) } } @@ -395,31 +395,31 @@ func TestTemplateScopeDAG(t *testing.T) { node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag.A") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.A") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-dag-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag.B") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B") { assert.Equal(t, wfv1.NodeTypeTaskGroup, node.Type) - assert.Equal(t, "test-template-scope-dag-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag.B(0:x)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-dag-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag.B(1:y)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-dag-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope) } node = findNodeByName(wf.Status.Nodes, "test-template-scope-dag.B(2:z)") if assert.NotNil(t, node, "Node %s not found", "test-template-scope-dag.B(0:x") { assert.Equal(t, wfv1.NodeTypePod, node.Type) - assert.Equal(t, "test-template-scope-dag-1", node.TemplateScope) + assert.Equal(t, "namespaced/test-template-scope-dag-1", node.TemplateScope) } } @@ -431,3 +431,101 @@ func findNodeByName(nodes map[string]wfv1.NodeStatus, name string) *wfv1.NodeSta } return nil } + +var testTemplateClusterScopeWorkflowYaml = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: test-template-scope + namespace: default +spec: + entrypoint: entry + templates: + - name: entry + templateRef: + name: test-template-scope-1 + template: steps + clusterscope: true +` + +var testTemplateClusterScopeWorkflowTemplateYaml1 = ` +apiVersion: argoproj.io/v1alpha1 +kind: ClusterWorkflowTemplate +metadata: + name: test-template-scope-1 +spec: + templates: + - name: steps + steps: + - - name: hello + template: hello + - name: other-wftmpl + templateRef: + name: test-template-scope-2 + template: steps + - name: hello + script: + image: python:alpine3.6 + command: [python] + source: | + print("hello world") +` + +func TestTemplateClusterScope(t *testing.T) { + //t.SkipNow() + controller := newController() + wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("default") + wfctmplset := controller.wfclientset.ArgoprojV1alpha1().ClusterWorkflowTemplates() + wftmplset := controller.wfclientset.ArgoprojV1alpha1().WorkflowTemplates("default") + wf := unmarshalWF(testTemplateClusterScopeWorkflowYaml) + _, err := wfcset.Create(wf) + assert.NoError(t, err) + cwftmpl := unmarshalCWFTmpl(testTemplateClusterScopeWorkflowTemplateYaml1) + _, err = wfctmplset.Create(cwftmpl) + + assert.NoError(t, err) + wftmpl := unmarshalWFTmpl(testTemplateScopeWorkflowTemplateYaml2) + _, err = wftmplset.Create(wftmpl) + assert.NoError(t, err) + + woc := newWorkflowOperationCtx(wf, controller) + woc.operate() + wf, err = wfcset.Get(wf.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + node := findNodeByName(wf.Status.Nodes, "test-template-scope") + if assert.NotNil(t, node, "Node %s not found", "test-templte-scope") { + assert.Equal(t, wfv1.NodeTypeSteps, node.Type) + assert.Equal(t, "", node.TemplateScope) + } + + node = findNodeByName(wf.Status.Nodes, "test-template-scope[0]") + if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0]") { + assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) + assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope) + } + + node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].hello") + if assert.NotNil(t, node, "Node %s not found", "test-templte-scope[0].hello") { + assert.Equal(t, wfv1.NodeTypePod, node.Type) + assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope) + } + + node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].other-wftmpl") + if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl") { + assert.Equal(t, wfv1.NodeTypeSteps, node.Type) + assert.Equal(t, "cluster/test-template-scope-1", node.TemplateScope) + } + + node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].other-wftmpl[0]") + if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0]") { + assert.Equal(t, wfv1.NodeTypeStepGroup, node.Type) + assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope) + } + + node = findNodeByName(wf.Status.Nodes, "test-template-scope[0].other-wftmpl[0].hello") + if assert.NotNil(t, node, "Node %s not found", "test-template-scope[0].other-wftmpl[0].hello") { + assert.Equal(t, wfv1.NodeTypePod, node.Type) + assert.Equal(t, "namespaced/test-template-scope-2", node.TemplateScope) + } +} diff --git a/workflow/templateresolution/context.go b/workflow/templateresolution/context.go index f999f07a6b73..3613d8ddf12a 100644 --- a/workflow/templateresolution/context.go +++ b/workflow/templateresolution/context.go @@ -1,6 +1,8 @@ package templateresolution import ( + "strings" + "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" apierr "k8s.io/apimachinery/pkg/api/errors" @@ -36,10 +38,32 @@ type WorkflowTemplateNamespacedGetter interface { Get(name string) (*wfv1.WorkflowTemplate, error) } +// clusterWorkflowTemplateInterfaceWrapper is an internal struct to wrap clientset. +type clusterWorkflowTemplateInterfaceWrapper struct { + clientset typed.ClusterWorkflowTemplateInterface +} + +// WorkflowTemplateNamespaceLister helps get WorkflowTemplates. +type ClusterWorkflowTemplateGetter interface { + // Get retrieves the WorkflowTemplate from the indexer for a given name. + Get(name string) (*wfv1.ClusterWorkflowTemplate, error) +} + +func WrapClusterWorkflowTemplateInterface(clusterClientset v1alpha1.ClusterWorkflowTemplateInterface) ClusterWorkflowTemplateGetter { + return &clusterWorkflowTemplateInterfaceWrapper{clientset: clusterClientset} +} + +// Get retrieves the WorkflowTemplate of a given name. +func (wrapper *clusterWorkflowTemplateInterfaceWrapper) Get(name string) (*wfv1.ClusterWorkflowTemplate, error) { + return wrapper.clientset.Get(name, metav1.GetOptions{}) +} + // Context is a context of template search. type Context struct { // wftmplGetter is an interface to get WorkflowTemplates. wftmplGetter WorkflowTemplateNamespacedGetter + // cwftmplGetter is an interface to get ClusterWorkflowTemplates + cwftmplGetter ClusterWorkflowTemplateGetter // tmplBase is the base of local template search. tmplBase wfv1.TemplateGetter // storage is an implementation of TemplateStorage. @@ -49,22 +73,24 @@ type Context struct { } // NewContext returns new Context. -func NewContext(wftmplGetter WorkflowTemplateNamespacedGetter, tmplBase wfv1.TemplateGetter, storage wfv1.TemplateStorage) *Context { +func NewContext(wftmplGetter WorkflowTemplateNamespacedGetter, cwftmplGetter ClusterWorkflowTemplateGetter, tmplBase wfv1.TemplateGetter, storage wfv1.TemplateStorage) *Context { return &Context{ - wftmplGetter: wftmplGetter, - tmplBase: tmplBase, - storage: storage, - log: log.WithFields(logrus.Fields{}), + wftmplGetter: wftmplGetter, + cwftmplGetter: cwftmplGetter, + tmplBase: tmplBase, + storage: storage, + log: log.WithFields(logrus.Fields{}), } } // NewContext returns new Context. -func NewContextFromClientset(clientset typed.WorkflowTemplateInterface, tmplBase wfv1.TemplateGetter, storage wfv1.TemplateStorage) *Context { +func NewContextFromClientset(wftmplClientset typed.WorkflowTemplateInterface, clusterWftmplClient typed.ClusterWorkflowTemplateInterface, tmplBase wfv1.TemplateGetter, storage wfv1.TemplateStorage) *Context { return &Context{ - wftmplGetter: WrapWorkflowTemplateInterface(clientset), - tmplBase: tmplBase, - storage: storage, - log: log.WithFields(logrus.Fields{}), + wftmplGetter: WrapWorkflowTemplateInterface(wftmplClientset), + cwftmplGetter: WrapClusterWorkflowTemplateInterface(clusterWftmplClient), + tmplBase: tmplBase, + storage: storage, + log: log.WithFields(logrus.Fields{}), } } @@ -79,22 +105,38 @@ func (ctx *Context) GetTemplateByName(name string) (*wfv1.Template, error) { return tmpl.DeepCopy(), nil } +func (ctx *Context) GetTemplateGetterFromRef(tmplRef *wfv1.TemplateRef) (wfv1.TemplateGetter, error) { + if tmplRef.ClusterScope { + return ctx.cwftmplGetter.Get(tmplRef.Name) + } + return ctx.wftmplGetter.Get(tmplRef.Name) +} + // GetTemplateFromRef returns a template found by a given template ref. func (ctx *Context) GetTemplateFromRef(tmplRef *wfv1.TemplateRef) (*wfv1.Template, error) { ctx.log.Debug("Getting the template from ref") + var template *wfv1.Template + var wftmpl wfv1.WorkflowTemplateInterface + var err error + if tmplRef.ClusterScope { + wftmpl, err = ctx.cwftmplGetter.Get(tmplRef.Name) + } else { + wftmpl, err = ctx.wftmplGetter.Get(tmplRef.Name) + } - wftmpl, err := ctx.wftmplGetter.Get(tmplRef.Name) if err != nil { if apierr.IsNotFound(err) { return nil, errors.Errorf(errors.CodeNotFound, "workflow template %s not found", tmplRef.Name) } return nil, err } - tmpl := wftmpl.GetTemplateByName(tmplRef.Template) - if tmpl == nil { + + template = wftmpl.GetTemplateByName(tmplRef.Template) + + if template == nil { return nil, errors.Errorf(errors.CodeNotFound, "template %s not found in workflow template %s", tmplRef.Template, tmplRef.Name) } - return tmpl.DeepCopy(), nil + return template.DeepCopy(), nil } // GetTemplate returns a template found by template name or template ref. @@ -202,21 +244,41 @@ func (ctx *Context) resolveTemplateImpl(tmplHolder wfv1.TemplateHolder, depth in func (ctx *Context) WithTemplateHolder(tmplHolder wfv1.TemplateHolder) (*Context, error) { tmplRef := tmplHolder.GetTemplateRef() if tmplRef != nil { - return ctx.WithWorkflowTemplate(tmplRef.Name) + tmplName := tmplRef.Name + if tmplRef.ClusterScope { + tmplName = "cluster/" + tmplName + } else { + tmplName = "namespaced/" + tmplName + } + return ctx.WithWorkflowTemplate(tmplName) } return ctx.WithTemplateBase(ctx.tmplBase), nil } // WithTemplateBase creates new context with a wfv1.TemplateGetter. func (ctx *Context) WithTemplateBase(tmplBase wfv1.TemplateGetter) *Context { - return NewContext(ctx.wftmplGetter, tmplBase, ctx.storage) + return NewContext(ctx.wftmplGetter, ctx.cwftmplGetter, tmplBase, ctx.storage) } // WithWorkflowTemplate creates new context with a wfv1.TemplateGetter. func (ctx *Context) WithWorkflowTemplate(name string) (*Context, error) { - wftmpl, err := ctx.wftmplGetter.Get(name) - if err != nil { - return nil, err + wfTmplnames := strings.Split(name, "/") + if len(wfTmplnames) < 1 { + return nil, errors.Errorf(errors.CodeBadRequest, "Invalid template name. %s", name) + } + if wfTmplnames[0] == "cluster" { + cwftmpl, err := ctx.cwftmplGetter.Get(wfTmplnames[1]) + if err != nil { + return nil, err + } + return ctx.WithTemplateBase(cwftmpl), nil + } + if wfTmplnames[0] == "namespaced" { + wftmpl, err := ctx.wftmplGetter.Get(wfTmplnames[1]) + if err != nil { + return nil, err + } + return ctx.WithTemplateBase(wftmpl), nil } - return ctx.WithTemplateBase(wftmpl), nil + return ctx, nil } diff --git a/workflow/templateresolution/context_test.go b/workflow/templateresolution/context_test.go index 5b17774497c6..a49d6914fa08 100644 --- a/workflow/templateresolution/context_test.go +++ b/workflow/templateresolution/context_test.go @@ -94,7 +94,7 @@ spec: func TestGetTemplateByName(t *testing.T) { wfClientset := fakewfclientset.NewSimpleClientset() wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) tmpl, err := ctx.GetTemplateByName("whalesay") if !assert.NoError(t, err) { @@ -118,7 +118,7 @@ func TestGetTemplateFromRef(t *testing.T) { t.Fatal(err) } wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) // Get the template of existing template reference. tmplRef := wfv1.TemplateRef{Name: "some-workflow-template", Template: "whalesay"} @@ -151,7 +151,7 @@ func TestGetTemplate(t *testing.T) { t.Fatal(err) } wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) // Get the template of existing template name. tmplHolder := wfv1.WorkflowStep{Template: "whalesay"} @@ -190,7 +190,7 @@ func TestGetTemplate(t *testing.T) { func TestGetCurrentTemplateBase(t *testing.T) { wfClientset := fakewfclientset.NewSimpleClientset() wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) // Get the template base of existing template name. tmplBase := ctx.GetCurrentTemplateBase() @@ -212,7 +212,7 @@ func TestWithTemplateHolder(t *testing.T) { t.Fatal(err) } wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) var tmplGetter wfv1.TemplateGetter // Get the template base of existing template name. @@ -262,7 +262,7 @@ func TestResolveTemplate(t *testing.T) { t.Fatal(err) } wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) // Get the template of template name. tmplHolder := wfv1.WorkflowStep{Template: "whalesay"} @@ -368,7 +368,7 @@ func TestResolveTemplate(t *testing.T) { func TestWithTemplateBase(t *testing.T) { wfClientset := fakewfclientset.NewSimpleClientset() wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) anotherWftmpl := unmarshalWftmpl(anotherWorkflowTemplateYaml) @@ -384,7 +384,7 @@ func TestWithTemplateBase(t *testing.T) { func TestOnWorkflowTemplate(t *testing.T) { wfClientset := fakewfclientset.NewSimpleClientset() wftmpl := unmarshalWftmpl(baseWorkflowTemplateYaml) - ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wftmpl, nil) + ctx := NewContextFromClientset(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault), wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates(), wftmpl, nil) err := createWorkflowTemplate(wfClientset, anotherWorkflowTemplateYaml) if err != nil { diff --git a/workflow/util/util.go b/workflow/util/util.go index 13639fcdb171..3640e5b9f62a 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -172,7 +172,10 @@ func SubmitWorkflow(wfIf v1alpha1.WorkflowInterface, wfClientset wfclientset.Int return nil, err } wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - _, err = validate.ValidateWorkflow(wftmplGetter, wf, validate.ValidateOpts{}) + cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) + + _, err = validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, validate.ValidateOpts{}) + if err != nil { return nil, err } diff --git a/workflow/validate/lint.go b/workflow/validate/lint.go index 413582105f32..48bb39cba136 100644 --- a/workflow/validate/lint.go +++ b/workflow/validate/lint.go @@ -80,7 +80,7 @@ func ParseWfTmplFromFile(filePath string, strict bool) ([]wfv1.WorkflowTemplate, } // LintWorkflowTemplateDir validates all workflow manifests in a directory. Ignores non-workflow template manifests -func LintWorkflowTemplateDir(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, dirPath string, strict bool) error { +func LintWorkflowTemplateDir(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, dirPath string, strict bool) error { walkFunc := func(path string, info os.FileInfo, err error) error { if info == nil || info.IsDir() { return nil @@ -91,14 +91,14 @@ func LintWorkflowTemplateDir(wftmplGetter templateresolution.WorkflowTemplateNam default: return nil } - return LintWorkflowTemplateFile(wftmplGetter, path, strict) + return LintWorkflowTemplateFile(wftmplGetter, cwftmplGetter, path, strict) } return filepath.Walk(dirPath, walkFunc) } // LintWorkflowTemplateFile lints a json file, or multiple workflow template manifest in a single yaml file. Ignores // non-workflow template manifests -func LintWorkflowTemplateFile(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, filePath string, strict bool) error { +func LintWorkflowTemplateFile(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, filePath string, strict bool) error { body, err := ioutil.ReadFile(filePath) if err != nil { return errors.Errorf(errors.CodeBadRequest, "Can't read from file: %s, err: %v", filePath, err) @@ -127,7 +127,7 @@ func LintWorkflowTemplateFile(wftmplGetter templateresolution.WorkflowTemplateNa return errors.Errorf(errors.CodeBadRequest, "%s failed to parse: %v", filePath, err) } for _, wftmpl := range workflowTemplates { - err = ValidateWorkflowTemplate(wftmplGetter, &wftmpl) + err = ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, &wftmpl) if err != nil { return errors.Errorf(errors.CodeBadRequest, "%s: %s", filePath, err.Error()) } @@ -165,3 +165,34 @@ func ParseCronWorkflowsFromFile(filePath string, strict bool) ([]wfv1.CronWorkfl } return cronWorkflows, nil } + +func ParseCWfTmplFromFile(filePath string, strict bool) ([]wfv1.ClusterWorkflowTemplate, error) { + body, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, errors.Errorf(errors.CodeBadRequest, "Can't read from file: %s, err: %v", filePath, err) + } + var clusterWorkflowTmpls []wfv1.ClusterWorkflowTemplate + if json.IsJSON(body) { + var cwfTmpl wfv1.ClusterWorkflowTemplate + if strict { + err = json.UnmarshalStrict(body, &cwfTmpl) + } else { + err = json.Unmarshal(body, &cwfTmpl) + } + if err == nil { + clusterWorkflowTmpls = []wfv1.ClusterWorkflowTemplate{cwfTmpl} + } else { + if cwfTmpl.Kind != "" && cwfTmpl.Kind != workflow.ClusterWorkflowTemplateKind { + // If we get here, it was a k8s manifest which was not of type 'Workflow' + // We ignore these since we only care about validating Workflow manifests. + return nil, nil + } + } + } else { + clusterWorkflowTmpls, err = common.SplitClusterWorkflowTemplateYAMLFile(body, strict) + } + if err != nil { + return nil, errors.Errorf(errors.CodeBadRequest, "%s failed to parse: %v", filePath, err) + } + return clusterWorkflowTmpls, nil +} diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index e54dd30ea900..98b25ad439ef 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -91,10 +91,10 @@ func (args *FakeArguments) GetArtifactByName(name string) *wfv1.Artifact { var _ wfv1.ArgumentsProvider = &FakeArguments{} // ValidateWorkflow accepts a workflow and performs validation against it. -func ValidateWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, wf *wfv1.Workflow, opts ValidateOpts) (*wfv1.WorkflowConditions, error) { +func ValidateWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, wf *wfv1.Workflow, opts ValidateOpts) (*wfv1.WorkflowConditions, error) { wfConditions := &wfv1.WorkflowConditions{} ctx := newTemplateValidationCtx(wf, opts) - tmplCtx := templateresolution.NewContext(wftmplGetter, wf, wf) + tmplCtx := templateresolution.NewContext(wftmplGetter, cwftmplGetter, wf, wf) err := validateWorkflowFieldNames(wf.Spec.Templates) if err != nil { @@ -199,12 +199,12 @@ func ValidateWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespaced } // ValidateWorkflow accepts a workflow template and performs validation against it. -func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, wftmpl *wfv1.WorkflowTemplate) error { +func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, wftmpl wfv1.TemplateGetter) error { ctx := newTemplateValidationCtx(nil, ValidateOpts{}) - tmplCtx := templateresolution.NewContext(wftmplGetter, wftmpl, nil) + tmplCtx := templateresolution.NewContext(wftmplGetter, cwftmplGetter, wftmpl, nil) // Check if all templates can be resolved. - for _, template := range wftmpl.Spec.Templates { + for _, template := range wftmpl.GetAllTemplates() { _, err := ctx.validateTemplateHolder(&wfv1.WorkflowStep{Template: template.Name}, tmplCtx, &FakeArguments{}, map[string]interface{}{}) if err != nil { return errors.Errorf(errors.CodeBadRequest, "templates.%s %s", template.Name, err.Error()) @@ -214,7 +214,7 @@ func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNa } // ValidateCronWorkflow validates a CronWorkflow -func ValidateCronWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cronWf *wfv1.CronWorkflow) error { +func ValidateCronWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, cronWf *wfv1.CronWorkflow) error { if _, err := cron.ParseStandard(cronWf.Spec.Schedule); err != nil { return errors.Errorf(errors.CodeBadRequest, "cron schedule is malformed: %s", err) } @@ -232,7 +232,7 @@ func ValidateCronWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamesp wf := common.ConvertCronWorkflowToWorkflow(cronWf) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) if err != nil { return errors.Errorf(errors.CodeBadRequest, "cannot validate Workflow: %s", err) } diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 4ba4acd62dc2..ac49b050df5c 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -17,6 +17,7 @@ import ( var wfClientset = fakewfclientset.NewSimpleClientset() var wftmplGetter = templateresolution.WrapWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault)) +var cwftmplGetter = templateresolution.WrapClusterWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) func createWorkflowTemplate(yamlStr string) error { wftmpl := unmarshalWftmpl(yamlStr) @@ -31,14 +32,14 @@ func createWorkflowTemplate(yamlStr string) error { // its validation result. func validate(yamlStr string) (*wfv1.WorkflowConditions, error) { wf := unmarshalWf(yamlStr) - return ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) } // validateWorkflowTemplate is a test helper to accept WorkflowTemplate YAML as a string and return // its validation result. func validateWorkflowTemplate(yamlStr string) error { wftmpl := unmarshalWftmpl(yamlStr) - return ValidateWorkflowTemplate(wftmplGetter, wftmpl) + return ValidateWorkflowTemplate(wftmplGetter, cwftmplGetter, wftmpl) } func unmarshalWf(yamlStr string) *wfv1.Workflow { @@ -1009,13 +1010,17 @@ spec: func TestVolumeMountArtifactPathCollision(t *testing.T) { // ensure we detect and reject path collisions wf := unmarshalWf(volumeMountArtifactPathCollision) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + if assert.NotNil(t, err) { assert.Contains(t, err.Error(), "already mounted") } // tweak the mount path and validation should now be successful wf.Spec.Templates[0].Container.VolumeMounts[0].MountPath = "/differentpath" - _, err = ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NoError(t, err) } @@ -1275,7 +1280,8 @@ func TestPodNameVariable(t *testing.T) { } func TestGlobalParamWithVariable(t *testing.T) { - _, err := ValidateWorkflow(wftmplGetter, test.LoadE2EWorkflow("functional/global-outputs-variable.yaml"), ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, test.LoadE2EWorkflow("functional/global-outputs-variable.yaml"), ValidateOpts{}) + assert.NoError(t, err) } @@ -1300,9 +1306,11 @@ spec: // TestSpecArgumentNoValue we allow parameters to have no value at the spec level during linting func TestSpecArgumentNoValue(t *testing.T) { wf := unmarshalWf(specArgumentNoValue) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{Lint: true}) + + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{Lint: true}) assert.NoError(t, err) - _, err = ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NotNil(t, err) } @@ -1337,7 +1345,9 @@ spec: // TestSpecArgumentSnakeCase we allow parameter and artifact names to be snake case func TestSpecArgumentSnakeCase(t *testing.T) { wf := unmarshalWf(specArgumentSnakeCase) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{Lint: true}) + + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{Lint: true}) + assert.NoError(t, err) } @@ -1372,7 +1382,9 @@ spec: // TestSpecBadSequenceCountAndEnd verifies both count and end cannot be defined func TestSpecBadSequenceCountAndEnd(t *testing.T) { wf := unmarshalWf(specBadSequenceCountAndEnd) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{Lint: true}) + + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{Lint: true}) + assert.Error(t, err) } @@ -1392,7 +1404,9 @@ spec: // TestCustomTemplatVariable verifies custom template variable func TestCustomTemplatVariable(t *testing.T) { wf := unmarshalWf(customVariableInput) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{Lint: true}) + + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{Lint: true}) + assert.Equal(t, err, nil) } @@ -1493,28 +1507,28 @@ func TestBaseImageOutputVerify(t *testing.T) { for _, executor := range []string{common.ContainerRuntimeExecutorK8sAPI, common.ContainerRuntimeExecutorKubelet, common.ContainerRuntimeExecutorPNS, common.ContainerRuntimeExecutorDocker, ""} { switch executor { case common.ContainerRuntimeExecutorK8sAPI, common.ContainerRuntimeExecutorKubelet: - _, err = ValidateWorkflow(wftmplGetter, wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.Error(t, err) - _, err = ValidateWorkflow(wftmplGetter, wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.Error(t, err) - _, err = ValidateWorkflow(wftmplGetter, wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.Error(t, err) case common.ContainerRuntimeExecutorPNS: - _, err = ValidateWorkflow(wftmplGetter, wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.NoError(t, err) - _, err = ValidateWorkflow(wftmplGetter, wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.NoError(t, err) - _, err = ValidateWorkflow(wftmplGetter, wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.Error(t, err) case common.ContainerRuntimeExecutorDocker, "": - _, err = ValidateWorkflow(wftmplGetter, wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.NoError(t, err) - _, err = ValidateWorkflow(wftmplGetter, wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseOutParam, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.NoError(t, err) - _, err = ValidateWorkflow(wftmplGetter, wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfBaseWithEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.NoError(t, err) } - _, err = ValidateWorkflow(wftmplGetter, wfEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wfEmptyDirOutArt, ValidateOpts{ContainerRuntimeExecutor: executor}) assert.NoError(t, err) } } @@ -1809,7 +1823,9 @@ spec: // TestValidResourceWorkflow verifies a workflow of a valid resource. func TestValidResourceWorkflow(t *testing.T) { wf := unmarshalWf(validResourceWorkflow) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.Equal(t, err, nil) } @@ -1852,11 +1868,11 @@ spec: // TestInvalidResourceWorkflow verifies an error against a workflow of an invalid resource. func TestInvalidResourceWorkflow(t *testing.T) { wf := unmarshalWf(invalidResourceWorkflow) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) assert.EqualError(t, err, "templates.whalesay.resource.manifest must be a valid yaml") wf = unmarshalWf(invalidActionResourceWorkflow) - _, err = ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) assert.EqualError(t, err, "templates.whalesay.resource.action must be one of: get, create, apply, delete, replace, patch") } @@ -1880,12 +1896,13 @@ spec: // TestUnknownPodGCStrategy verifies pod gc strategy is correct. func TestUnknownPodGCStrategy(t *testing.T) { wf := unmarshalWf(invalidPodGC) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.EqualError(t, err, "podGC.strategy unknown strategy 'Foo'") for _, strat := range []wfv1.PodGCStrategy{wfv1.PodGCOnPodCompletion, wfv1.PodGCOnPodSuccess, wfv1.PodGCOnWorkflowCompletion, wfv1.PodGCOnWorkflowSuccess} { wf.Spec.PodGC.Strategy = strat - _, err = ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) assert.NoError(t, err) } } @@ -1960,22 +1977,26 @@ spec: func TestAutomountServiceAccountTokenUse(t *testing.T) { { wf := unmarshalWf(validAutomountServiceAccountTokenUseWfLevel) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NoError(t, err) } { wf := unmarshalWf(validAutomountServiceAccountTokenUseTmplLevel) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NoError(t, err) } { wf := unmarshalWf(invalidAutomountServiceAccountTokenUseWfLevel) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.EqualError(t, err, "templates.whalesay.executor.serviceAccountName must not be empty if automountServiceAccountToken is false") } { wf := unmarshalWf(invalidAutomountServiceAccountTokenUseTmplLevel) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.EqualError(t, err, "templates.whalesay.executor.serviceAccountName must not be empty if automountServiceAccountToken is false") } } @@ -2017,7 +2038,8 @@ spec: func TestTemplateResolutionWithPlaceholderWorkflow(t *testing.T) { { wf := unmarshalWf(templateResolutionWithPlaceholderWorkflow) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NoError(t, err) } } @@ -2120,7 +2142,8 @@ spec: func TestAllowPlaceholderInVariableTakenFromInputs(t *testing.T) { { wf := unmarshalWf(allowPlaceholderInVariableTakenFromInputs) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NoError(t, err) } } @@ -2178,6 +2201,7 @@ spec: // TestInvalidResourceWorkflow verifies an error against a workflow of an invalid resource. func TestRuntimeResolutionOfVariableNames(t *testing.T) { wf := unmarshalWf(runtimeResolutionOfVariableNames) - _, err := ValidateWorkflow(wftmplGetter, wf, ValidateOpts{}) + _, err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) + assert.NoError(t, err) }