diff --git a/.github/workflows/pr-e2e-checker.yml b/.github/workflows/pr-e2e-checker.yml index 7457c230cf7..48851b07607 100644 --- a/.github/workflows/pr-e2e-checker.yml +++ b/.github/workflows/pr-e2e-checker.yml @@ -18,7 +18,7 @@ jobs: name: label checker runs-on: ubuntu-latest steps: - - uses: LouisBrunner/checks-action@v1.6.2 + - uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 name: Enqueue e2e id: create with: @@ -27,7 +27,7 @@ jobs: name: ${{ env.E2E_CHECK_NAME }} status: queued - - uses: LouisBrunner/checks-action@v1.6.2 + - uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 name: Skip e2e if: ${{ contains(github.event.pull_request.labels.*.name, env.SKIP_E2E_TAG )}} with: diff --git a/.github/workflows/pr-e2e-creator.yml b/.github/workflows/pr-e2e-creator.yml index 55c0119b7c0..f263683163e 100644 --- a/.github/workflows/pr-e2e-creator.yml +++ b/.github/workflows/pr-e2e-creator.yml @@ -18,7 +18,7 @@ jobs: name: check-creator runs-on: ubuntu-latest steps: - - uses: LouisBrunner/checks-action@v1.6.2 + - uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 name: Enqueue e2e id: create with: @@ -27,7 +27,7 @@ jobs: name: ${{ env.E2E_CHECK_NAME }} status: queued - - uses: LouisBrunner/checks-action@v1.6.2 + - uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 name: Skip e2e if: ${{ contains(github.event.pull_request.labels.*.name, env.SKIP_E2E_TAG )}} with: diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 68c5987ed4c..3f8cefcf11b 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -27,7 +27,7 @@ jobs: - name: Update comment with the execution url if: ${{ startsWith(github.event.comment.body,'/run-e2e') && steps.checkUserMember.outputs.isTeamMember == 'true' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 with: comment-id: ${{ github.event.comment.id }} body: | @@ -72,7 +72,7 @@ jobs: if: needs.triage.outputs.run-e2e == 'true' steps: - name: Set status in-progress - uses: LouisBrunner/checks-action@v1.6.2 + uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 with: token: ${{ secrets.GITHUB_TOKEN }} sha: ${{ needs.triage.outputs.commit_sha }} @@ -116,7 +116,7 @@ jobs: if: needs.triage.outputs.run-e2e == 'true' steps: - name: Set status in-progress - uses: LouisBrunner/checks-action@v1.6.2 + uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 with: token: ${{ secrets.GITHUB_TOKEN }} sha: ${{ needs.triage.outputs.commit_sha }} @@ -191,7 +191,7 @@ jobs: reaction: "+1" - name: Set status success - uses: LouisBrunner/checks-action@v1.6.2 + uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 if: steps.test.outcome == 'success' with: token: ${{ secrets.GITHUB_TOKEN }} @@ -209,7 +209,7 @@ jobs: reaction: "-1" - name: Set status failure - uses: LouisBrunner/checks-action@v1.6.2 + uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 if: steps.test.outcome != 'success' with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index a40476b3105..77e128b0cb6 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -90,7 +90,7 @@ jobs: - name: Register workspace path run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: dorny/paths-filter@v2 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3 id: filter with: filters: | @@ -118,7 +118,7 @@ jobs: - name: Register workspace path run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: dorny/paths-filter@v2 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3 id: filter with: filters: | diff --git a/.github/workflows/template-trivy-scan.yml b/.github/workflows/template-trivy-scan.yml index 7aba7d70e1d..8bab390a081 100644 --- a/.github/workflows/template-trivy-scan.yml +++ b/.github/workflows/template-trivy-scan.yml @@ -39,7 +39,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - name: Run Trivy - uses: aquasecurity/trivy-action@0.16.0 + uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1 with: scan-type: ${{ inputs.scan-type }} image-ref: ${{ inputs.image-ref }} diff --git a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml index 3f367731e56..2225f2b6ec8 100644 --- a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: cloudeventsources.eventing.keda.sh spec: group: eventing.keda.sh @@ -25,14 +25,19 @@ spec: sink properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml index da5895d71d9..05c21c07497 100644 --- a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml +++ b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: clustertriggerauthentications.keda.sh spec: group: keda.sh @@ -45,14 +45,19 @@ spec: globally properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -129,8 +134,9 @@ spec: - accessSecretKey type: object podIdentity: - description: AuthPodIdentity allows users to select the platform - native identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string @@ -227,8 +233,9 @@ spec: - tenantId type: object podIdentity: - description: AuthPodIdentity allows users to select the platform - native identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string @@ -296,8 +303,9 @@ spec: type: array env: items: - description: AuthEnvironment is used to authenticate using environment - variables in the destination ScaleTarget spec + description: |- + AuthEnvironment is used to authenticate using environment variables + in the destination ScaleTarget spec properties: containerName: type: string @@ -338,8 +346,9 @@ spec: - clientSecret type: object podIdentity: - description: AuthPodIdentity allows users to select the platform - native identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string @@ -453,8 +462,9 @@ spec: - secrets type: object podIdentity: - description: AuthPodIdentity allows users to select the platform native - identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 3d57ca40179..2ff92b03b46 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: scaledjobs.keda.sh spec: group: keda.sh @@ -47,14 +47,19 @@ spec: description: ScaledJob is the Schema for the scaledjobs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -70,169 +75,185 @@ spec: description: JobSpec describes how the job execution will look like. properties: activeDeadlineSeconds: - description: Specifies the duration in seconds relative to the - startTime that the job may be continuously active before the - system tries to terminate it; value must be positive integer. - If a Job is suspended (at creation or through an update), this - timer will effectively be stopped and reset when the Job is + description: |- + Specifies the duration in seconds relative to the startTime that the job + may be continuously active before the system tries to terminate it; value + must be positive integer. If a Job is suspended (at creation or through an + update), this timer will effectively be stopped and reset when the Job is resumed again. format: int64 type: integer backoffLimit: - description: Specifies the number of retries before marking this - job failed. Defaults to 6 + description: |- + Specifies the number of retries before marking this job failed. + Defaults to 6 format: int32 type: integer backoffLimitPerIndex: - description: Specifies the limit for the number of retries within - an index before marking this index as failed. When enabled the - number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count - annotation. It can only be set when Job's completionMode=Indexed, - and the Pod's restart policy is Never. The field is immutable. + description: |- + Specifies the limit for the number of retries within an + index before marking this index as failed. When enabled the number of + failures per index is kept in the pod's + batch.kubernetes.io/job-index-failure-count annotation. It can only + be set when Job's completionMode=Indexed, and the Pod's restart + policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). format: int32 type: integer completionMode: - description: "completionMode specifies how Pod completions are - tracked. It can be `NonIndexed` (default) or `Indexed`. \n `NonIndexed` - means that the Job is considered complete when there have been - .spec.completions successfully completed Pods. Each Pod completion - is homologous to each other. \n `Indexed` means that the Pods - of a Job get an associated completion index from 0 to (.spec.completions - - 1), available in the annotation batch.kubernetes.io/job-completion-index. - The Job is considered complete when there is one successfully - completed Pod for each index. When value is `Indexed`, .spec.completions - must be specified and `.spec.parallelism` must be less than - or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, - the Pod hostname takes the form `$(job-name)-$(index)`. \n More - completion modes can be added in the future. If the Job controller - observes a mode that it doesn't recognize, which is possible - during upgrades due to version skew, the controller skips updates - for the Job." + description: |- + completionMode specifies how Pod completions are tracked. It can be + `NonIndexed` (default) or `Indexed`. + + + `NonIndexed` means that the Job is considered complete when there have + been .spec.completions successfully completed Pods. Each Pod completion is + homologous to each other. + + + `Indexed` means that the Pods of a + Job get an associated completion index from 0 to (.spec.completions - 1), + available in the annotation batch.kubernetes.io/job-completion-index. + The Job is considered complete when there is one successfully completed Pod + for each index. + When value is `Indexed`, .spec.completions must be specified and + `.spec.parallelism` must be less than or equal to 10^5. + In addition, The Pod name takes the form + `$(job-name)-$(index)-$(random-string)`, + the Pod hostname takes the form `$(job-name)-$(index)`. + + + More completion modes can be added in the future. + If the Job controller observes a mode that it doesn't recognize, which + is possible during upgrades due to version skew, the controller + skips updates for the Job. type: string completions: - description: 'Specifies the desired number of successfully finished - pods the job should be run with. Setting to null means that - the success of any pod signals the success of all pods, and - allows parallelism to have any positive value. Setting to 1 - means that parallelism is limited to 1 and the success of that - pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/' + description: |- + Specifies the desired number of successfully finished pods the + job should be run with. Setting to null means that the success of any + pod signals the success of all pods, and allows parallelism to have any positive + value. Setting to 1 means that parallelism is limited to 1 and the success of that + pod signals the success of the job. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ format: int32 type: integer manualSelector: - description: 'manualSelector controls generation of pod labels - and pod selectors. Leave `manualSelector` unset unless you are - certain what you are doing. When false or unset, the system - pick labels unique to this job and appends those labels to the - pod template. When true, the user is responsible for picking - unique labels and specifying the selector. Failure to pick - a unique label may cause this and other jobs to not function - correctly. However, You may see `manualSelector=true` in jobs - that were created with the old `extensions/v1beta1` API. More - info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector' + description: |- + manualSelector controls generation of pod labels and pod selectors. + Leave `manualSelector` unset unless you are certain what you are doing. + When false or unset, the system pick labels unique to this job + and appends those labels to the pod template. When true, + the user is responsible for picking unique labels and specifying + the selector. Failure to pick a unique label may cause this + and other jobs to not function correctly. However, You may see + `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + API. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector type: boolean maxFailedIndexes: - description: Specifies the maximal number of failed indexes before - marking the Job as failed, when backoffLimitPerIndex is set. - Once the number of failed indexes exceeds this number the entire - Job is marked as Failed and its execution is terminated. When - left as null the job continues execution of all of its indexes - and is marked with the `Complete` Job condition. It can only - be specified when backoffLimitPerIndex is set. It can be null - or up to completions. It is required and must be less than or - equal to 10^4 when is completions greater than 10^5. This field - is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + description: |- + Specifies the maximal number of failed indexes before marking the Job as + failed, when backoffLimitPerIndex is set. Once the number of failed + indexes exceeds this number the entire Job is marked as Failed and its + execution is terminated. When left as null the job continues execution of + all of its indexes and is marked with the `Complete` Job condition. + It can only be specified when backoffLimitPerIndex is set. + It can be null or up to completions. It is required and must be + less than or equal to 10^4 when is completions greater than 10^5. + This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). format: int32 type: integer parallelism: - description: 'Specifies the maximum desired number of pods the - job should run at any given time. The actual number of pods - running in steady state will be less than this number when ((.spec.completions - - .status.successful) < .spec.parallelism), i.e. when the work - left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/' + description: |- + Specifies the maximum desired number of pods the job should + run at any given time. The actual number of pods running in steady state will + be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + i.e. when the work left to do is less than max parallelism. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ format: int32 type: integer podFailurePolicy: - description: "Specifies the policy of handling failed pods. In - particular, it allows to specify the set of actions and conditions - which need to be satisfied to take the associated action. If - empty, the default behaviour applies - the counter of failed - pods, represented by the jobs's .status.failed field, is incremented - and it is checked against the backoffLimit. This field cannot - be used in combination with restartPolicy=OnFailure. \n This - field is beta-level. It can be used when the `JobPodFailurePolicy` - feature gate is enabled (enabled by default)." + description: |- + Specifies the policy of handling failed pods. In particular, it allows to + specify the set of actions and conditions which need to be + satisfied to take the associated action. + If empty, the default behaviour applies - the counter of failed pods, + represented by the jobs's .status.failed field, is incremented and it is + checked against the backoffLimit. This field cannot be used in combination + with restartPolicy=OnFailure. + + + This field is beta-level. It can be used when the `JobPodFailurePolicy` + feature gate is enabled (enabled by default). properties: rules: - description: A list of pod failure policy rules. The rules - are evaluated in order. Once a rule matches a Pod failure, - the remaining of the rules are ignored. When no rule matches - the Pod failure, the default handling applies - the counter - of pod failures is incremented and it is checked against + description: |- + A list of pod failure policy rules. The rules are evaluated in order. + Once a rule matches a Pod failure, the remaining of the rules are ignored. + When no rule matches the Pod failure, the default handling applies - the + counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed. items: - description: PodFailurePolicyRule describes how a pod failure - is handled when the requirements are met. One of onExitCodes - and onPodConditions, but not both, can be used in each - rule. + description: |- + PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. + One of onExitCodes and onPodConditions, but not both, can be used in each rule. properties: action: - description: "Specifies the action taken on a pod failure - when the requirements are satisfied. Possible values - are: \n - FailJob: indicates that the pod's job is - marked as Failed and all running pods are terminated. - - FailIndex: indicates that the pod's index is marked - as Failed and will not be restarted. This value is - alpha-level. It can be used when the `JobBackoffLimitPerIndex` - feature gate is enabled (disabled by default). - Ignore: - indicates that the counter towards the .backoffLimit - is not incremented and a replacement pod is created. - - Count: indicates that the pod is handled in the - default way - the counter towards the .backoffLimit - is incremented. Additional values are considered to - be added in the future. Clients should react to an - unknown action by skipping the rule." + description: |- + Specifies the action taken on a pod failure when the requirements are satisfied. + Possible values are: + + + - FailJob: indicates that the pod's job is marked as Failed and all + running pods are terminated. + - FailIndex: indicates that the pod's index is marked as Failed and will + not be restarted. + This value is alpha-level. It can be used when the + `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + - Ignore: indicates that the counter towards the .backoffLimit is not + incremented and a replacement pod is created. + - Count: indicates that the pod is handled in the default way - the + counter towards the .backoffLimit is incremented. + Additional values are considered to be added in the future. Clients should + react to an unknown action by skipping the rule. type: string onExitCodes: description: Represents the requirement on the container exit codes. properties: containerName: - description: Restricts the check for exit codes - to the container with the specified name. When - null, the rule applies to all containers. When - specified, it should match one the container or - initContainer names in the pod template. + description: |- + Restricts the check for exit codes to the container with the + specified name. When null, the rule applies to all containers. + When specified, it should match one the container or initContainer + names in the pod template. type: string operator: - description: "Represents the relationship between - the container exit code(s) and the specified values. - Containers completed with success (exit code 0) - are excluded from the requirement check. Possible - values are: \n - In: the requirement is satisfied - if at least one container exit code (might be - multiple if there are multiple containers not - restricted by the 'containerName' field) is in - the set of specified values. - NotIn: the requirement - is satisfied if at least one container exit code - (might be multiple if there are multiple containers - not restricted by the 'containerName' field) is - not in the set of specified values. Additional - values are considered to be added in the future. - Clients should react to an unknown operator by - assuming the requirement is not satisfied." + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Containers completed with success (exit code 0) are + excluded from the requirement check. Possible values are: + + + - In: the requirement is satisfied if at least one container exit code + (might be multiple if there are multiple containers not restricted + by the 'containerName' field) is in the set of specified values. + - NotIn: the requirement is satisfied if at least one container exit code + (might be multiple if there are multiple containers not restricted + by the 'containerName' field) is not in the set of specified values. + Additional values are considered to be added in the future. Clients should + react to an unknown operator by assuming the requirement is not satisfied. type: string values: - description: Specifies the set of values. Each returned - container exit code (might be multiple in case - of multiple containers) is checked against this - set of values with respect to the operator. The - list of values must be ordered and must not contain - duplicates. Value '0' cannot be used for the In - operator. At least one element is required. At - most 255 elements are allowed. + description: |- + Specifies the set of values. Each returned container exit code (might be + multiple in case of multiple containers) is checked against this set of + values with respect to the operator. The list of values must be ordered + and must not contain duplicates. Value '0' cannot be used for the In operator. + At least one element is required. At most 255 elements are allowed. items: format: int32 type: integer @@ -243,27 +264,25 @@ spec: - values type: object onPodConditions: - description: Represents the requirement on the pod conditions. - The requirement is represented as a list of pod condition - patterns. The requirement is satisfied if at least - one pattern matches an actual pod condition. At most - 20 elements are allowed. + description: |- + Represents the requirement on the pod conditions. The requirement is represented + as a list of pod condition patterns. The requirement is satisfied if at + least one pattern matches an actual pod condition. At most 20 elements are allowed. items: - description: PodFailurePolicyOnPodConditionsPattern - describes a pattern for matching an actual pod condition - type. + description: |- + PodFailurePolicyOnPodConditionsPattern describes a pattern for matching + an actual pod condition type. properties: status: - description: Specifies the required Pod condition - status. To match a pod condition it is required - that the specified status equals the pod condition - status. Defaults to True. + description: |- + Specifies the required Pod condition status. To match a pod condition + it is required that the specified status equals the pod condition status. + Defaults to True. type: string type: - description: Specifies the required Pod condition - type. To match a pod condition it is required - that specified type equals the pod condition - type. + description: |- + Specifies the required Pod condition type. To match a pod condition + it is required that specified type equals the pod condition type. type: string required: - status @@ -280,45 +299,48 @@ spec: - rules type: object podReplacementPolicy: - description: "podReplacementPolicy specifies when to create replacement - Pods. Possible values are: - TerminatingOrFailed means that - we recreate pods when they are terminating (has a metadata.deletionTimestamp) - or failed. - Failed means to wait until a previously created - Pod is fully terminated (has phase Failed or Succeeded) before - creating a replacement Pod. \n When using podFailurePolicy, - Failed is the the only allowed value. TerminatingOrFailed and - Failed are allowed values when podFailurePolicy is not in use. - This is an alpha field. Enable JobPodReplacementPolicy to be - able to use this field." + description: |- + podReplacementPolicy specifies when to create replacement Pods. + Possible values are: + - TerminatingOrFailed means that we recreate pods + when they are terminating (has a metadata.deletionTimestamp) or failed. + - Failed means to wait until a previously created Pod is fully terminated (has phase + Failed or Succeeded) before creating a replacement Pod. + + + When using podFailurePolicy, Failed is the the only allowed value. + TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. type: string selector: - description: 'A label query over pods that should match the pod - count. Normally, the system sets this field for you. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + description: |- + A label query over pods that should match the pod count. + Normally, the system sets this field for you. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -330,42 +352,44 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic suspend: - description: suspend specifies whether the Job controller should - create Pods or not. If a Job is created with suspend set to - true, no Pods are created by the Job controller. If a Job is - suspended after creation (i.e. the flag goes from false to true), - the Job controller will delete all active Pods associated with - this Job. Users must design their workload to gracefully handle - this. Suspending a Job will reset the StartTime field of the - Job, effectively resetting the ActiveDeadlineSeconds timer too. - Defaults to false. + description: |- + suspend specifies whether the Job controller should create Pods or not. If + a Job is created with suspend set to true, no Pods are created by the Job + controller. If a Job is suspended after creation (i.e. the flag goes from + false to true), the Job controller will delete all active Pods associated + with this Job. Users must design their workload to gracefully handle this. + Suspending a Job will reset the StartTime field of the Job, effectively + resetting the ActiveDeadlineSeconds timer too. Defaults to false. type: boolean template: - description: 'Describes the pod that will be created when executing - a job. The only allowed template.spec.restartPolicy values are - "Never" or "OnFailure". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/' + description: |- + Describes the pod that will be created when executing a job. + The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". + More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ properties: metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata type: object spec: - description: 'Specification of the desired behavior of the - pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: activeDeadlineSeconds: - description: Optional duration in seconds the pod may - be active on the node relative to StartTime before the - system will actively try to mark it failed and kill - associated containers. Value must be a positive integer. + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. format: int64 type: integer affinity: @@ -376,24 +400,20 @@ spec: for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -403,35 +423,26 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array @@ -444,35 +455,26 @@ spec: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array @@ -495,57 +497,46 @@ spec: type: object type: array requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array @@ -558,35 +549,26 @@ spec: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array @@ -609,20 +591,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -641,11 +619,9 @@ spec: a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -653,23 +629,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -681,39 +650,29 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -721,23 +680,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -749,49 +701,37 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -800,26 +740,22 @@ spec: type: object type: array requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: description: A label query over a set of @@ -830,10 +766,9 @@ spec: of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -841,20 +776,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -866,36 +797,29 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -903,20 +827,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -928,38 +848,29 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey @@ -972,20 +883,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -1004,11 +911,9 @@ spec: a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1016,23 +921,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -1044,39 +942,29 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1084,23 +972,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -1112,49 +993,37 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -1163,26 +1032,22 @@ spec: type: object type: array requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: description: A label query over a set of @@ -1193,10 +1058,9 @@ spec: of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1204,20 +1068,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -1229,36 +1089,29 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1266,20 +1119,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -1291,38 +1140,29 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey @@ -1335,47 +1175,45 @@ spec: a service account token should be automatically mounted. type: boolean containers: - description: List of containers belonging to the pod. - Containers cannot currently be added or removed. There - must be at least one container in a Pod. Cannot be updated. + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. items: description: A single application container that you want to run within a pod. properties: args: - description: 'Arguments to the entrypoint. The container - image''s CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using - the container''s environment. If a variable cannot - be resolved, the reference in the input string - will be unchanged. Double $$ are reduced to a - single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will - never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array command: - description: 'Entrypoint array. Not executed within - a shell. The container image''s ENTRYPOINT is - used if this is not provided. Variable references - $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, - the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot - be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array env: - description: List of environment variables to set - in the container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -1385,18 +1223,16 @@ spec: Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) - are expanded using the previously defined - environment variables in the container and - any service environment variables. If a - variable cannot be resolved, the reference - in the input string will be unchanged. Double - $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal - "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable - exists or not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -1409,10 +1245,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -1423,11 +1259,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: - supports metadata.name, metadata.namespace, - `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema @@ -1443,12 +1277,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required @@ -1481,10 +1312,10 @@ spec: secret key. type: string name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -1500,15 +1331,13 @@ spec: type: object type: array envFrom: - description: List of sources to populate environment - variables in the container. The keys defined within - a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container - is starting. When a key exists in multiple sources, - the value associated with the last source will - take precedence. Values defined by an Env with - a duplicate key will take precedence. Cannot be - updated. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: description: EnvFromSource represents the source of a set of ConfigMaps @@ -1517,10 +1346,10 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -1537,10 +1366,10 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -1551,46 +1380,43 @@ spec: type: object type: array image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config - management to default or override container images - in workload controllers like Deployments and StatefulSets.' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string imagePullPolicy: - description: 'Image pull policy. One of Always, - Never, IfNotPresent. Defaults to Always if :latest - tag is specified, or IfNotPresent otherwise. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: - description: Actions that the management system - should take in response to container lifecycle - events. Cannot be updated. + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. properties: postStart: - description: 'PostStart is called immediately - after a container is created. If the handler - fails, the container is terminated and restarted - according to its restart policy. Other management - of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command - line to execute inside the container, - the working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to - explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array @@ -1600,10 +1426,9 @@ spec: request to perform. properties: host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders - instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in @@ -1615,11 +1440,9 @@ spec: probes properties: name: - description: The header field - name. This will be canonicalized - upon output, so case-variant - names will be understood as - the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field @@ -1638,25 +1461,24 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT - supported as a LifecycleHandler and kept - for the backward compatibility. There - are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler - is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to @@ -1666,47 +1488,38 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately - before a container is terminated due to an - API request or management event such as liveness/startup - probe failure, preemption, resource contention, - etc. The handler is not called if the container - crashes or exits. The Pod''s termination grace - period countdown begins before the PreStop - hook is executed. Regardless of the outcome - of the handler, the container will eventually - terminate within the Pod''s termination grace - period (unless delayed by finalizers). Other - management of the container blocks until the - hook completes or until the termination grace - period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command - line to execute inside the container, - the working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to - explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array @@ -1716,10 +1529,9 @@ spec: request to perform. properties: host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders - instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in @@ -1731,11 +1543,9 @@ spec: probes properties: name: - description: The header field - name. This will be canonicalized - upon output, so case-variant - names will be understood as - the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field @@ -1754,25 +1564,24 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT - supported as a LifecycleHandler and kept - for the backward compatibility. There - are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler - is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to @@ -1782,10 +1591,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -1793,33 +1602,30 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. + description: |- + Periodic probe of container liveness. Container will be restarted if the probe fails. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -1832,11 +1638,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -1846,8 +1653,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -1858,10 +1665,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -1879,35 +1685,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -1922,63 +1728,59 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the container specified as - a DNS_LABEL. Each container in a pod must have - a unique name (DNS_LABEL). Cannot be updated. + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. type: string ports: - description: List of ports to expose from the container. - Not specifying a port here DOES NOT prevent that - port from being exposed. Any port which is listening - on the default "0.0.0.0" address inside a container - will be accessible from the network. Modifying - this array with strategic merge patch may corrupt - the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: containerPort: - description: Number of port to expose on the - pod's IP address. This must be a valid port - number, 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -1986,24 +1788,24 @@ spec: port to. type: string hostPort: - description: Number of port to expose on the - host. If specified, this must be a valid - port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an - IANA_SVC_NAME and unique within the pod. - Each named port in a pod must have a unique - name. Name for the port that can be referred - to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, - TCP, or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -2014,34 +1816,30 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service - readiness. Container will be removed from service - endpoints if the probe fails. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -2054,11 +1852,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -2068,8 +1867,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -2080,10 +1879,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -2101,35 +1899,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -2144,38 +1942,33 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object @@ -2186,14 +1979,14 @@ spec: resource resize policy for the container. properties: resourceName: - description: 'Name of the resource to which - this resource resize policy applies. Supported - values: cpu, memory.' + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string restartPolicy: - description: Restart policy to apply when - specified resource is resized. If not specified, - it defaults to NotRequired. + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - resourceName @@ -2202,26 +1995,31 @@ spec: type: array x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this - container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -2237,8 +2035,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2247,61 +2046,52 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object restartPolicy: - description: 'RestartPolicy defines the restart - behavior of individual containers in a pod. This - field may only be set for init containers, and - the only allowed value is "Always". For non-init - containers or when this field is not specified, - the restart behavior is defined by the Pod''s - restart policy and the container type. Setting - the RestartPolicy as "Always" for the init container - will have the following effect: this init container - will be continually restarted on exit until all - regular containers have terminated. Once all regular - containers have completed, all init containers - with restartPolicy "Always" will be shut down. - This lifecycle differs from normal init containers - and is often referred to as a "sidecar" container. - Although this init container still starts in the - init container sequence, it does not wait for - the container to complete before proceeding to - the next init container. Instead, the next init - container starts immediately after this init container - is started, or after any startupProbe has successfully - completed.' + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. type: string securityContext: - description: 'SecurityContext defines the security - options the container should be run with. If set, - the fields of SecurityContext override the equivalent - fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls - whether a process can gain more privileges - than its parent process. This bool directly - controls if the no_new_privs flag will be - set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN Note that - this field cannot be set when spec.os.name - is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean capabilities: - description: The capabilities to add/drop when - running containers. Defaults to the default - set of capabilities granted by the container - runtime. Note that this field cannot be set - when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -2319,69 +2109,60 @@ spec: type: array type: object privileged: - description: Run container in privileged mode. - Processes in privileged containers are essentially - equivalent to root on the host. Defaults to - false. Note that this field cannot be set - when spec.os.name is windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc - mount to use for the containers. The default - is DefaultProcMount which uses the container - runtime defaults for readonly paths and masked - paths. This requires the ProcMountType feature - flag to be enabled. Note that this field cannot - be set when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that - this field cannot be set when spec.os.name - is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of - the container process. Uses runtime default - if unset. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must - run as a non-root user. If true, the Kubelet - will validate the image at runtime to ensure - that it does not run as UID 0 (root) and fail - to start the container if it does. If unset - or false, no such validation will be performed. - May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of - the container process. Defaults to user specified - in image metadata if unspecified. May also - be set in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied - to the container. If unspecified, the container - runtime will allocate a random SELinux context - for each container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -2401,112 +2182,93 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this - container. If seccomp options are provided - at both the pod & container level, the container - options override the pod options. Note that - this field cannot be set when spec.os.name - is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates - a profile defined in a file on the node - should be used. The profile must be preconfigured - on the node to work. Must be a descending - path, relative to the kubelet's configured - seccomp profile location. Must be set - if type is "Localhost". Must NOT be set - for any other type. + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: "type indicates which kind - of seccomp profile will be applied. Valid - options are: \n Localhost - a profile - defined in a file on the node should be - used. RuntimeDefault - the container runtime - default profile should be used. Unconfined - - no profile should be applied." + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options - from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where - the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName - field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a - container should be run as a 'Host Process' - container. All of a Pod's containers must - have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). - In addition, if HostProcess is true then - HostNetwork must also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to - run the entrypoint of the container process. - Defaults to the user specified in image - metadata if unspecified. May also be set - in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod - has successfully initialized. If specified, no - other probes are executed until this completes - successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters - at the beginning of a Pod''s lifecycle, when it - might take a long time to load data or warm a - cache, than during steady-state operation. This - cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -2519,11 +2281,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -2533,8 +2296,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -2545,10 +2308,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -2566,35 +2328,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -2609,87 +2371,76 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate - a buffer for stdin in the container runtime. If - this is not set, reads from stdin in the container - will always result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should - close the stdin channel after it has been opened - by a single attach. When stdin is true the stdin - stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is - opened on container start, is empty until the - first client attaches to stdin, and then remains - open and accepts data until the client disconnects, - at which time stdin is closed and remains closed - until the container is restarted. If this flag - is false, a container processes that reads from - stdin will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to - which the container''s termination message will - be written is mounted into the container''s filesystem. - Message written is intended to be brief final - status, such as an assertion failure message. - Will be truncated by the node if greater than - 4096 bytes. The total message length across all - containers will be limited to 12kb. Defaults to - /dev/termination-log. Cannot be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message - should be populated. File will use the contents - of terminationMessagePath to populate the container - status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output - if the termination message file is empty and the - container exited with an error. The log output - is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate - a TTY for itself, also requires 'stdin' to be - true. Default is false. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean volumeDevices: description: volumeDevices is the list of block @@ -2713,46 +2464,45 @@ spec: type: object type: array volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at - which the volume should be mounted. Must + description: |- + Path within the container at which the volume should be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how - mounts are propagated from the host to container - and the other way around. When not set, - MountPropagationNone is used. This field - is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults - to false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean subPath: - description: Path within the volume from which - the container's volume should be mounted. + description: |- + Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume - from which the container's volume should - be mounted. Behaves similarly to SubPath - but environment variable references $(VAR_NAME) - are expanded using the container's environment. - Defaults to "" (volume's root). SubPathExpr - and SubPath are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath @@ -2760,33 +2510,36 @@ spec: type: object type: array workingDir: - description: Container's working directory. If not - specified, the container runtime's default will - be used, which might be configured in the container - image. Cannot be updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array dnsConfig: - description: Specifies the DNS parameters of a pod. Parameters - specified here will be merged to the generated DNS configuration - based on DNSPolicy. + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. properties: nameservers: - description: A list of DNS name server IP addresses. - This will be appended to the base nameservers generated - from DNSPolicy. Duplicated nameservers will be removed. + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. items: type: string type: array options: - description: A list of DNS resolver options. This - will be merged with the base options generated from - DNSPolicy. Duplicated entries will be removed. Resolution - options given in Options will override those that - appear in the base DNSPolicy. + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. items: description: PodDNSConfigOption defines DNS resolver options of a pod. @@ -2799,82 +2552,77 @@ spec: type: object type: array searches: - description: A list of DNS search domains for host-name - lookup. This will be appended to the base search - paths generated from DNSPolicy. Duplicated search - paths will be removed. + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. items: type: string type: array type: object dnsPolicy: - description: Set DNS policy for the pod. Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', - 'Default' or 'None'. DNS parameters given in DNSConfig - will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you - have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. type: string enableServiceLinks: - description: 'EnableServiceLinks indicates whether information - about services should be injected into pod''s environment - variables, matching the syntax of Docker links. Optional: - Defaults to true.' + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. type: boolean ephemeralContainers: - description: List of ephemeral containers run in this - pod. Ephemeral containers may be run in an existing - pod to perform user-initiated actions such as debugging. - This list cannot be specified when creating a pod, and - it cannot be modified by updating the pod spec. In order - to add an ephemeral container to an existing pod, use - the pod's ephemeralcontainers subresource. + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. items: - description: "An EphemeralContainer is a temporary container - that you may add to an existing Pod for user-initiated - activities such as debugging. Ephemeral containers - have no resource or scheduling guarantees, and they - will not be restarted when they exit or when a Pod - is removed or restarted. The kubelet may evict a Pod - if an ephemeral container causes the Pod to exceed - its resource allocation. \n To add an ephemeral container, - use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted." + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. properties: args: - description: 'Arguments to the entrypoint. The image''s - CMD is used if this is not provided. Variable - references $(VAR_NAME) are expanded using the - container''s environment. If a variable cannot - be resolved, the reference in the input string - will be unchanged. Double $$ are reduced to a - single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will - never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array command: - description: 'Entrypoint array. Not executed within - a shell. The image''s ENTRYPOINT is used if this - is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. - If a variable cannot be resolved, the reference - in the input string will be unchanged. Double - $$ are reduced to a single $, which allows for - escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot - be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array env: - description: List of environment variables to set - in the container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -2884,18 +2632,16 @@ spec: Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) - are expanded using the previously defined - environment variables in the container and - any service environment variables. If a - variable cannot be resolved, the reference - in the input string will be unchanged. Double - $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal - "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable - exists or not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -2908,10 +2654,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -2922,11 +2668,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: - supports metadata.name, metadata.namespace, - `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema @@ -2942,12 +2686,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required @@ -2980,10 +2721,10 @@ spec: secret key. type: string name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -2999,15 +2740,13 @@ spec: type: object type: array envFrom: - description: List of sources to populate environment - variables in the container. The keys defined within - a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container - is starting. When a key exists in multiple sources, - the value associated with the last source will - take precedence. Values defined by an Env with - a duplicate key will take precedence. Cannot be - updated. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: description: EnvFromSource represents the source of a set of ConfigMaps @@ -3016,10 +2755,10 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -3036,10 +2775,10 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -3050,42 +2789,40 @@ spec: type: object type: array image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images type: string imagePullPolicy: - description: 'Image pull policy. One of Always, - Never, IfNotPresent. Defaults to Always if :latest - tag is specified, or IfNotPresent otherwise. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: description: Lifecycle is not allowed for ephemeral containers. properties: postStart: - description: 'PostStart is called immediately - after a container is created. If the handler - fails, the container is terminated and restarted - according to its restart policy. Other management - of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command - line to execute inside the container, - the working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to - explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array @@ -3095,10 +2832,9 @@ spec: request to perform. properties: host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders - instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in @@ -3110,11 +2846,9 @@ spec: probes properties: name: - description: The header field - name. This will be canonicalized - upon output, so case-variant - names will be understood as - the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field @@ -3133,25 +2867,24 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT - supported as a LifecycleHandler and kept - for the backward compatibility. There - are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler - is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to @@ -3161,47 +2894,38 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately - before a container is terminated due to an - API request or management event such as liveness/startup - probe failure, preemption, resource contention, - etc. The handler is not called if the container - crashes or exits. The Pod''s termination grace - period countdown begins before the PreStop - hook is executed. Regardless of the outcome - of the handler, the container will eventually - terminate within the Pod''s termination grace - period (unless delayed by finalizers). Other - management of the container blocks until the - hook completes or until the termination grace - period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command - line to execute inside the container, - the working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to - explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array @@ -3211,10 +2935,9 @@ spec: request to perform. properties: host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders - instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in @@ -3226,11 +2949,9 @@ spec: probes properties: name: - description: The header field - name. This will be canonicalized - upon output, so case-variant - names will be understood as - the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field @@ -3249,25 +2970,24 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT - supported as a LifecycleHandler and kept - for the backward compatibility. There - are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler - is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to @@ -3277,10 +2997,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -3295,25 +3015,20 @@ spec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -3326,11 +3041,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -3340,8 +3056,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -3352,10 +3068,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -3373,35 +3088,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -3416,46 +3131,40 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the ephemeral container specified - as a DNS_LABEL. This name must be unique among - all containers, init containers and ephemeral - containers. + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. type: string ports: description: Ports are not allowed for ephemeral @@ -3465,9 +3174,9 @@ spec: port in a single container. properties: containerPort: - description: Number of port to expose on the - pod's IP address. This must be a valid port - number, 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -3475,24 +3184,24 @@ spec: port to. type: string hostPort: - description: Number of port to expose on the - host. If specified, this must be a valid - port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an - IANA_SVC_NAME and unique within the pod. - Each named port in a pod must have a unique - name. Name for the port that can be referred - to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, - TCP, or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -3510,25 +3219,20 @@ spec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -3541,11 +3245,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -3555,8 +3260,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -3567,10 +3272,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -3588,35 +3292,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -3631,38 +3335,33 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object @@ -3673,14 +3372,14 @@ spec: resource resize policy for the container. properties: resourceName: - description: 'Name of the resource to which - this resource resize policy applies. Supported - values: cpu, memory.' + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string restartPolicy: - description: Restart policy to apply when - specified resource is resized. If not specified, - it defaults to NotRequired. + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - resourceName @@ -3689,27 +3388,30 @@ spec: type: array x-kubernetes-list-type: atomic resources: - description: Resources are not allowed for ephemeral - containers. Ephemeral containers use spare resources + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -3725,8 +3427,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -3735,43 +3438,40 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object restartPolicy: - description: Restart policy for the container to - manage the restart behavior of each container - within a pod. This may only be set for init containers. - You cannot set this field on ephemeral containers. + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. type: string securityContext: - description: 'Optional: SecurityContext defines - the security options the ephemeral container should - be run with. If set, the fields of SecurityContext - override the equivalent fields of PodSecurityContext.' + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls - whether a process can gain more privileges - than its parent process. This bool directly - controls if the no_new_privs flag will be - set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN Note that - this field cannot be set when spec.os.name - is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean capabilities: - description: The capabilities to add/drop when - running containers. Defaults to the default - set of capabilities granted by the container - runtime. Note that this field cannot be set - when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -3789,69 +3489,60 @@ spec: type: array type: object privileged: - description: Run container in privileged mode. - Processes in privileged containers are essentially - equivalent to root on the host. Defaults to - false. Note that this field cannot be set - when spec.os.name is windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc - mount to use for the containers. The default - is DefaultProcMount which uses the container - runtime defaults for readonly paths and masked - paths. This requires the ProcMountType feature - flag to be enabled. Note that this field cannot - be set when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that - this field cannot be set when spec.os.name - is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of - the container process. Uses runtime default - if unset. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must - run as a non-root user. If true, the Kubelet - will validate the image at runtime to ensure - that it does not run as UID 0 (root) and fail - to start the container if it does. If unset - or false, no such validation will be performed. - May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of - the container process. Defaults to user specified - in image metadata if unspecified. May also - be set in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied - to the container. If unspecified, the container - runtime will allocate a random SELinux context - for each container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -3871,74 +3562,62 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this - container. If seccomp options are provided - at both the pod & container level, the container - options override the pod options. Note that - this field cannot be set when spec.os.name - is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates - a profile defined in a file on the node - should be used. The profile must be preconfigured - on the node to work. Must be a descending - path, relative to the kubelet's configured - seccomp profile location. Must be set - if type is "Localhost". Must NOT be set - for any other type. + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: "type indicates which kind - of seccomp profile will be applied. Valid - options are: \n Localhost - a profile - defined in a file on the node should be - used. RuntimeDefault - the container runtime - default profile should be used. Unconfined - - no profile should be applied." + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options - from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where - the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName - field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a - container should be run as a 'Host Process' - container. All of a Pod's containers must - have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). - In addition, if HostProcess is true then - HostNetwork must also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to - run the entrypoint of the container process. - Defaults to the user specified in image - metadata if unspecified. May also be set - in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object @@ -3950,25 +3629,20 @@ spec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -3981,11 +3655,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -3995,8 +3670,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -4007,10 +3682,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -4028,35 +3702,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -4071,98 +3745,86 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate - a buffer for stdin in the container runtime. If - this is not set, reads from stdin in the container - will always result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should - close the stdin channel after it has been opened - by a single attach. When stdin is true the stdin - stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is - opened on container start, is empty until the - first client attaches to stdin, and then remains - open and accepts data until the client disconnects, - at which time stdin is closed and remains closed - until the container is restarted. If this flag - is false, a container processes that reads from - stdin will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean targetContainerName: - description: "If set, the name of the container - from PodSpec that this ephemeral container targets. - The ephemeral container will be run in the namespaces - (IPC, PID, etc) of this container. If not set - then the ephemeral container uses the namespaces - configured in the Pod spec. \n The container runtime - must implement support for this feature. If the - runtime does not support namespace targeting then - the result of setting this field is undefined." + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. type: string terminationMessagePath: - description: 'Optional: Path at which the file to - which the container''s termination message will - be written is mounted into the container''s filesystem. - Message written is intended to be brief final - status, such as an assertion failure message. - Will be truncated by the node if greater than - 4096 bytes. The total message length across all - containers will be limited to 12kb. Defaults to - /dev/termination-log. Cannot be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message - should be populated. File will use the contents - of terminationMessagePath to populate the container - status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output - if the termination message file is empty and the - container exited with an error. The log output - is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate - a TTY for itself, also requires 'stdin' to be - true. Default is false. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean volumeDevices: description: volumeDevices is the list of block @@ -4186,47 +3848,45 @@ spec: type: object type: array volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Subpath mounts are not allowed for - ephemeral containers. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at - which the volume should be mounted. Must + description: |- + Path within the container at which the volume should be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how - mounts are propagated from the host to container - and the other way around. When not set, - MountPropagationNone is used. This field - is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults - to false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean subPath: - description: Path within the volume from which - the container's volume should be mounted. + description: |- + Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume - from which the container's volume should - be mounted. Behaves similarly to SubPath - but environment variable references $(VAR_NAME) - are expanded using the container's environment. - Defaults to "" (volume's root). SubPathExpr - and SubPath are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath @@ -4234,24 +3894,24 @@ spec: type: object type: array workingDir: - description: Container's working directory. If not - specified, the container runtime's default will - be used, which might be configured in the container - image. Cannot be updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array hostAliases: - description: HostAliases is an optional list of hosts - and IPs that will be injected into the pod's hosts file - if specified. This is only valid for non-hostNetwork - pods. + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. This is only valid for non-hostNetwork pods. items: - description: HostAlias holds the mapping between IP - and hostnames that will be injected as an entry in - the pod's hosts file. + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. properties: hostnames: description: Hostnames for the above IP address. @@ -4264,111 +3924,106 @@ spec: type: object type: array hostIPC: - description: 'Use the host''s ipc namespace. Optional: - Default to false.' + description: |- + Use the host's ipc namespace. + Optional: Default to false. type: boolean hostNetwork: - description: Host networking requested for this pod. Use - the host's network namespace. If this option is set, - the ports that will be used must be specified. Default - to false. + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. type: boolean hostPID: - description: 'Use the host''s pid namespace. Optional: - Default to false.' + description: |- + Use the host's pid namespace. + Optional: Default to false. type: boolean hostUsers: - description: 'Use the host''s user namespace. Optional: - Default to true. If set to true or not present, the - pod will be run in the host user namespace, useful for - when the pod needs a feature only available to the host - user namespace, such as loading a kernel module with - CAP_SYS_MODULE. When set to false, a new userns is created - for the pod. Setting false is useful for mitigating - container breakout vulnerabilities even allowing users - to run their containers as root without actually having - root privileges on the host. This field is alpha-level - and is only honored by servers that enable the UserNamespacesSupport - feature.' + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. type: boolean hostname: - description: Specifies the hostname of the Pod If not - specified, the pod's hostname will be set to a system-defined - value. + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. type: string imagePullSecrets: - description: 'ImagePullSecrets is an optional list of - references to secrets in the same namespace to use for - pulling any of the images used by this PodSpec. If specified, - these secrets will be passed to individual puller implementations - for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod items: - description: LocalObjectReference contains enough information - to let you locate the referenced object inside the - same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic type: array initContainers: - description: 'List of initialization containers belonging - to the pod. Init containers are executed in order prior - to containers being started. If any init container fails, - the pod is considered to have failed and is handled - according to its restartPolicy. The name for an init - container or normal container must be unique among all - containers. Init containers may not have Lifecycle actions, - Readiness probes, Liveness probes, or Startup probes. - The resourceRequirements of an init container are taken - into account during scheduling by finding the highest - request/limit for each resource type, and then using - the max of of that value or the sum of the normal containers. - Limits are applied to init containers in a similar fashion. + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. Init containers cannot currently be added or removed. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ items: description: A single application container that you want to run within a pod. properties: args: - description: 'Arguments to the entrypoint. The container - image''s CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using - the container''s environment. If a variable cannot - be resolved, the reference in the input string - will be unchanged. Double $$ are reduced to a - single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will - never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array command: - description: 'Entrypoint array. Not executed within - a shell. The container image''s ENTRYPOINT is - used if this is not provided. Variable references - $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, - the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot - be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array env: - description: List of environment variables to set - in the container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -4378,18 +4033,16 @@ spec: Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) - are expanded using the previously defined - environment variables in the container and - any service environment variables. If a - variable cannot be resolved, the reference - in the input string will be unchanged. Double - $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal - "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable - exists or not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -4402,10 +4055,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -4416,11 +4069,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: - supports metadata.name, metadata.namespace, - `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema @@ -4436,12 +4087,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required @@ -4474,10 +4122,10 @@ spec: secret key. type: string name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -4493,15 +4141,13 @@ spec: type: object type: array envFrom: - description: List of sources to populate environment - variables in the container. The keys defined within - a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container - is starting. When a key exists in multiple sources, - the value associated with the last source will - take precedence. Values defined by an Env with - a duplicate key will take precedence. Cannot be - updated. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: description: EnvFromSource represents the source of a set of ConfigMaps @@ -4510,10 +4156,10 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap @@ -4530,10 +4176,10 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret @@ -4544,46 +4190,43 @@ spec: type: object type: array image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config - management to default or override container images - in workload controllers like Deployments and StatefulSets.' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string imagePullPolicy: - description: 'Image pull policy. One of Always, - Never, IfNotPresent. Defaults to Always if :latest - tag is specified, or IfNotPresent otherwise. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: - description: Actions that the management system - should take in response to container lifecycle - events. Cannot be updated. + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. properties: postStart: - description: 'PostStart is called immediately - after a container is created. If the handler - fails, the container is terminated and restarted - according to its restart policy. Other management - of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command - line to execute inside the container, - the working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to - explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array @@ -4593,10 +4236,9 @@ spec: request to perform. properties: host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders - instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in @@ -4608,11 +4250,9 @@ spec: probes properties: name: - description: The header field - name. This will be canonicalized - upon output, so case-variant - names will be understood as - the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field @@ -4631,25 +4271,24 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT - supported as a LifecycleHandler and kept - for the backward compatibility. There - are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler - is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to @@ -4659,47 +4298,38 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately - before a container is terminated due to an - API request or management event such as liveness/startup - probe failure, preemption, resource contention, - etc. The handler is not called if the container - crashes or exits. The Pod''s termination grace - period countdown begins before the PreStop - hook is executed. Regardless of the outcome - of the handler, the container will eventually - terminate within the Pod''s termination grace - period (unless delayed by finalizers). Other - management of the container blocks until the - hook completes or until the termination grace - period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command - line to execute inside the container, - the working directory for the command is - root ('/') in the container's filesystem. - The command is simply exec'd, it is - not run inside a shell, so traditional - shell instructions ('|', etc) won't - work. To use a shell, you need to - explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array @@ -4709,10 +4339,9 @@ spec: request to perform. properties: host: - description: Host name to connect to, - defaults to the pod IP. You probably - want to set "Host" in httpHeaders - instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in @@ -4724,11 +4353,9 @@ spec: probes properties: name: - description: The header field - name. This will be canonicalized - upon output, so case-variant - names will be understood as - the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field @@ -4747,25 +4374,24 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object tcpSocket: - description: Deprecated. TCPSocket is NOT - supported as a LifecycleHandler and kept - for the backward compatibility. There - are no validation of this field and lifecycle - hooks will fail in runtime when tcp handler - is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to @@ -4775,10 +4401,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number - must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -4786,33 +4412,30 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. + description: |- + Periodic probe of container liveness. Container will be restarted if the probe fails. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -4825,11 +4448,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -4839,8 +4463,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -4851,10 +4475,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -4872,35 +4495,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -4915,63 +4538,59 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the container specified as - a DNS_LABEL. Each container in a pod must have - a unique name (DNS_LABEL). Cannot be updated. + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. type: string ports: - description: List of ports to expose from the container. - Not specifying a port here DOES NOT prevent that - port from being exposed. Any port which is listening - on the default "0.0.0.0" address inside a container - will be accessible from the network. Modifying - this array with strategic merge patch may corrupt - the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: containerPort: - description: Number of port to expose on the - pod's IP address. This must be a valid port - number, 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -4979,24 +4598,24 @@ spec: port to. type: string hostPort: - description: Number of port to expose on the - host. If specified, this must be a valid - port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an - IANA_SVC_NAME and unique within the pod. - Each named port in a pod must have a unique - name. Name for the port that can be referred - to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, - TCP, or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -5007,34 +4626,30 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service - readiness. Container will be removed from service - endpoints if the probe fails. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -5047,11 +4662,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -5061,8 +4677,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -5073,10 +4689,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -5094,35 +4709,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -5137,38 +4752,33 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object @@ -5179,14 +4789,14 @@ spec: resource resize policy for the container. properties: resourceName: - description: 'Name of the resource to which - this resource resize policy applies. Supported - values: cpu, memory.' + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string restartPolicy: - description: Restart policy to apply when - specified resource is resized. If not specified, - it defaults to NotRequired. + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - resourceName @@ -5195,26 +4805,31 @@ spec: type: array x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this - container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: claims: - description: "Claims lists the names of resources, - defined in spec.resourceClaims, that are used - by this container. \n This is an alpha field - and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. - It can only be set for containers." + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name - of one entry in pod.spec.resourceClaims - of the Pod where this field is used. - It makes that resource available inside - a container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -5230,8 +4845,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -5240,61 +4856,52 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If Requests - is omitted for a container, it defaults to - Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object restartPolicy: - description: 'RestartPolicy defines the restart - behavior of individual containers in a pod. This - field may only be set for init containers, and - the only allowed value is "Always". For non-init - containers or when this field is not specified, - the restart behavior is defined by the Pod''s - restart policy and the container type. Setting - the RestartPolicy as "Always" for the init container - will have the following effect: this init container - will be continually restarted on exit until all - regular containers have terminated. Once all regular - containers have completed, all init containers - with restartPolicy "Always" will be shut down. - This lifecycle differs from normal init containers - and is often referred to as a "sidecar" container. - Although this init container still starts in the - init container sequence, it does not wait for - the container to complete before proceeding to - the next init container. Instead, the next init - container starts immediately after this init container - is started, or after any startupProbe has successfully - completed.' + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. type: string securityContext: - description: 'SecurityContext defines the security - options the container should be run with. If set, - the fields of SecurityContext override the equivalent - fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls - whether a process can gain more privileges - than its parent process. This bool directly - controls if the no_new_privs flag will be - set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN Note that - this field cannot be set when spec.os.name - is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean capabilities: - description: The capabilities to add/drop when - running containers. Defaults to the default - set of capabilities granted by the container - runtime. Note that this field cannot be set - when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -5312,69 +4919,60 @@ spec: type: array type: object privileged: - description: Run container in privileged mode. - Processes in privileged containers are essentially - equivalent to root on the host. Defaults to - false. Note that this field cannot be set - when spec.os.name is windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc - mount to use for the containers. The default - is DefaultProcMount which uses the container - runtime defaults for readonly paths and masked - paths. This requires the ProcMountType feature - flag to be enabled. Note that this field cannot - be set when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that - this field cannot be set when spec.os.name - is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of - the container process. Uses runtime default - if unset. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must - run as a non-root user. If true, the Kubelet - will validate the image at runtime to ensure - that it does not run as UID 0 (root) and fail - to start the container if it does. If unset - or false, no such validation will be performed. - May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of - the container process. Defaults to user specified - in image metadata if unspecified. May also - be set in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied - to the container. If unspecified, the container - runtime will allocate a random SELinux context - for each container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -5394,112 +4992,93 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this - container. If seccomp options are provided - at both the pod & container level, the container - options override the pod options. Note that - this field cannot be set when spec.os.name - is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates - a profile defined in a file on the node - should be used. The profile must be preconfigured - on the node to work. Must be a descending - path, relative to the kubelet's configured - seccomp profile location. Must be set - if type is "Localhost". Must NOT be set - for any other type. + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: "type indicates which kind - of seccomp profile will be applied. Valid - options are: \n Localhost - a profile - defined in a file on the node should be - used. RuntimeDefault - the container runtime - default profile should be used. Unconfined - - no profile should be applied." + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options - from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. Note that this field cannot be - set when spec.os.name is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where - the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName - field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a - container should be run as a 'Host Process' - container. All of a Pod's containers must - have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess - containers and non-HostProcess containers). - In addition, if HostProcess is true then - HostNetwork must also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to - run the entrypoint of the container process. - Defaults to the user specified in image - metadata if unspecified. May also be set - in PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod - has successfully initialized. If specified, no - other probes are executed until this completes - successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters - at the beginning of a Pod''s lifecycle, when it - might take a long time to load data or warm a - cache, than during steady-state operation. This - cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object failureThreshold: - description: Minimum consecutive failures for - the probe to be considered failed after having - succeeded. Defaults to 3. Minimum value is - 1. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: @@ -5512,11 +5091,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the - service to place in the gRPC HealthCheckRequest + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default - behavior is defined by gRPC." + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -5526,8 +5106,8 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -5538,10 +5118,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name. - This will be canonicalized upon - output, so case-variant names will - be understood as the same header. + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -5559,35 +5138,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform - the probe. Default to 10 seconds. Minimum - value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for - the probe to be considered successful after - having failed. Defaults to 1. Must be 1 for - liveness and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -5602,87 +5181,76 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the - pod needs to terminate gracefully upon probe - failure. The grace period is the duration - in seconds after the processes running in - the pod are sent a termination signal and - the time when the processes are forcibly halted - with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value - must be non-negative integer. The value zero - indicates stop immediately via the kill signal - (no opportunity to shut down). This is a beta - field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which - the probe times out. Defaults to 1 second. - Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate - a buffer for stdin in the container runtime. If - this is not set, reads from stdin in the container - will always result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should - close the stdin channel after it has been opened - by a single attach. When stdin is true the stdin - stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is - opened on container start, is empty until the - first client attaches to stdin, and then remains - open and accepts data until the client disconnects, - at which time stdin is closed and remains closed - until the container is restarted. If this flag - is false, a container processes that reads from - stdin will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to - which the container''s termination message will - be written is mounted into the container''s filesystem. - Message written is intended to be brief final - status, such as an assertion failure message. - Will be truncated by the node if greater than - 4096 bytes. The total message length across all - containers will be limited to 12kb. Defaults to - /dev/termination-log. Cannot be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message - should be populated. File will use the contents - of terminationMessagePath to populate the container - status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output - if the termination message file is empty and the - container exited with an error. The log output - is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate - a TTY for itself, also requires 'stdin' to be - true. Default is false. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean volumeDevices: description: volumeDevices is the list of block @@ -5706,46 +5274,45 @@ spec: type: object type: array volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at - which the volume should be mounted. Must + description: |- + Path within the container at which the volume should be mounted. Must not contain ':'. type: string mountPropagation: - description: mountPropagation determines how - mounts are propagated from the host to container - and the other way around. When not set, - MountPropagationNone is used. This field - is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults - to false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean subPath: - description: Path within the volume from which - the container's volume should be mounted. + description: |- + Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume - from which the container's volume should - be mounted. Behaves similarly to SubPath - but environment variable references $(VAR_NAME) - are expanded using the container's environment. - Defaults to "" (volume's root). SubPathExpr - and SubPath are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath @@ -5753,57 +5320,70 @@ spec: type: object type: array workingDir: - description: Container's working directory. If not - specified, the container runtime's default will - be used, which might be configured in the container - image. Cannot be updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array nodeName: - description: NodeName is a request to schedule this pod - onto a specific node. If it is non-empty, the scheduler - simply schedules this pod onto that node, assuming that - it fits resource requirements. + description: |- + NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + the scheduler simply schedules this pod onto that node, assuming that it fits resource + requirements. type: string nodeSelector: additionalProperties: type: string - description: 'NodeSelector is a selector which must be - true for the pod to fit on a node. Selector which must - match a node''s labels for the pod to be scheduled on - that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ type: object x-kubernetes-map-type: atomic os: - description: "Specifies the OS of the containers in the - pod. Some pod and container fields are restricted if - this is set. \n If the OS field is set to linux, the - following fields must be unset: -securityContext.windowsOptions - \n If the OS field is set to windows, following fields - must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - - spec.securityContext.sysctls - spec.shareProcessNamespace - - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - - spec.containers[*].securityContext.capabilities - - spec.containers[*].securityContext.readOnlyRootFilesystem - - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup" + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup properties: name: - description: 'Name is the name of the operating system. - The currently supported values are linux and windows. - Additional value may be defined in future and can - be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - Clients should expect to handle additional values - and treat unrecognized values in this field as os: - null' + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null type: string required: - name @@ -5815,48 +5395,45 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Overhead represents the resource overhead - associated with running a pod for a given RuntimeClass. - This field will be autopopulated at admission time by - the RuntimeClass admission controller. If the RuntimeClass - admission controller is enabled, overhead must not be - set in Pod create requests. The RuntimeClass admission - controller will reject Pod create requests which have - the overhead already set. If RuntimeClass is configured - and selected in the PodSpec, Overhead will be set to - the value defined in the corresponding RuntimeClass, - otherwise it will remain unset and treated as zero. - More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md type: object preemptionPolicy: - description: PreemptionPolicy is the Policy for preempting - pods with lower priority. One of Never, PreemptLowerPriority. + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. type: string priority: - description: The priority value. Various system components - use this field to find the priority of the pod. When - Priority Admission Controller is enabled, it prevents - users from setting this field. The admission controller - populates this field from PriorityClassName. The higher - the value, the higher the priority. + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. format: int32 type: integer priorityClassName: - description: If specified, indicates the pod's priority. - "system-node-critical" and "system-cluster-critical" - are two special keywords which indicate the highest - priorities with the former being the highest priority. - Any other name must be defined by creating a PriorityClass - object with that name. If not specified, the pod priority - will be default or zero if there is no default. + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. type: string readinessGates: - description: 'If specified, all readiness gates will be - evaluated for pod readiness. A pod is ready when all - its containers are ready AND all conditions specified - in the readiness gates have status equal to "True" More - info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates items: description: PodReadinessGate contains the reference to a pod condition @@ -5870,46 +5447,54 @@ spec: type: object type: array resourceClaims: - description: "ResourceClaims defines which ResourceClaims - must be allocated and reserved before the Pod is allowed - to start. The resources will be made available to those - containers which consume them by name. \n This is an - alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable." + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. items: - description: PodResourceClaim references exactly one - ResourceClaim through a ClaimSource. It adds a name - to it that uniquely identifies the ResourceClaim inside - the Pod. Containers that need access to the ResourceClaim - reference it with this name. + description: |- + PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. properties: name: - description: Name uniquely identifies this resource - claim inside the pod. This must be a DNS_LABEL. + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. type: string source: description: Source describes where to find the ResourceClaim. properties: resourceClaimName: - description: ResourceClaimName is the name of - a ResourceClaim object in the same namespace - as this pod. + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. type: string resourceClaimTemplateName: - description: "ResourceClaimTemplateName is the - name of a ResourceClaimTemplate object in - the same namespace as this pod. \n The template - will be used to create a new ResourceClaim, - which will be bound to this pod. When this - pod is deleted, the ResourceClaim will also - be deleted. The pod name and resource name, - along with a generated component, will be - used to form a unique name for the ResourceClaim, - which will be recorded in pod.status.resourceClaimStatuses. - \n This field is immutable and no changes - will be made to the corresponding ResourceClaim - by the control plane after creating the ResourceClaim." + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. type: string type: object required: @@ -5920,41 +5505,44 @@ spec: - name x-kubernetes-list-type: map restartPolicy: - description: 'Restart policy for all containers within - the pod. One of Always, OnFailure, Never. In some contexts, - only a subset of those values may be permitted. Default - to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy type: string runtimeClassName: - description: 'RuntimeClassName refers to a RuntimeClass - object in the node.k8s.io group, which should be used - to run this pod. If no RuntimeClass resource matches - the named class, the pod will not be run. If unset or - empty, the "legacy" RuntimeClass will be used, which - is an implicit class with an empty definition that uses - the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type: string schedulerName: - description: If specified, the pod will be dispatched - by specified scheduler. If not specified, the pod will - be dispatched by default scheduler. + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. type: string schedulingGates: - description: "SchedulingGates is an opaque list of values - that if specified will block scheduling the pod. If - schedulingGates is not empty, the pod will stay in the - SchedulingGated state and the scheduler will not attempt - to schedule the pod. \n SchedulingGates can only be - set at pod creation time, and be removed only afterwards. - \n This is a beta feature enabled by the PodSchedulingReadiness - feature gate." + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + + + This is a beta feature enabled by the PodSchedulingReadiness feature gate. items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. properties: name: - description: Name of the scheduling gate. Each scheduling - gate must have a unique name field. + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. type: string required: - name @@ -5964,75 +5552,73 @@ spec: - name x-kubernetes-list-type: map securityContext: - description: 'SecurityContext holds pod-level security - attributes and common container settings. Optional: - Defaults to empty. See type description for default - values of each field.' + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. properties: fsGroup: - description: "A special supplemental group that applies - to all containers in a pod. Some volume types allow - the Kubelet to change the ownership of that volume - to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files - created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- \n - If unset, the Kubelet will not modify the ownership - and permissions of any volume. Note that this field - cannot be set when spec.os.name is windows." + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer fsGroupChangePolicy: - description: 'fsGroupChangePolicy defines behavior - of changing ownership and permission of the volume - before being exposed inside Pod. This field will - only apply to volume types which support fsGroup - based ownership(and permissions). It will have no - effect on ephemeral volume types such as: secret, - configmaps and emptydir. Valid values are "OnRootMismatch" - and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name - is windows.' + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. type: string runAsGroup: - description: The GID to run the entrypoint of the - container process. Uses runtime default if unset. - May also be set in SecurityContext. If set in both - SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for - that container. Note that this field cannot be set - when spec.os.name is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run - as a non-root user. If true, the Kubelet will validate - the image at runtime to ensure that it does not - run as UID 0 (root) and fail to start the container - if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the - container process. Defaults to user specified in - image metadata if unspecified. May also be set in - SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to - all containers. If unspecified, the container runtime - will allocate a random SELinux context for each - container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence - for that container. Note that this field cannot - be set when spec.os.name is windows. + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -6052,52 +5638,48 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by the containers - in this pod. Note that this field cannot be set - when spec.os.name is windows. + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates a profile - defined in a file on the node should be used. - The profile must be preconfigured on the node - to work. Must be a descending path, relative - to the kubelet's configured seccomp profile - location. Must be set if type is "Localhost". - Must NOT be set for any other type. + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: "type indicates which kind of seccomp - profile will be applied. Valid options are: - \n Localhost - a profile defined in a file on - the node should be used. RuntimeDefault - the - container runtime default profile should be - used. Unconfined - no profile should be applied." + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object supplementalGroups: - description: A list of groups applied to the first - process run in each container, in addition to the - container's primary GID, the fsGroup (if specified), - and group memberships defined in the container image - for the uid of the container process. If unspecified, - no additional groups are added to any container. - Note that group memberships defined in the container - image for the uid of the container process are still - effective, even if they are not included in this - list. Note that this field cannot be set when spec.os.name - is windows. + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array sysctls: - description: Sysctls hold a list of namespaced sysctls - used for the pod. Pods with unsupported sysctls - (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. items: description: Sysctl defines a kernel parameter to be set @@ -6114,180 +5696,158 @@ spec: type: object type: array windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options within - a container's SecurityContext will be used. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA - admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container - should be run as a 'Host Process' container. - All of a Pod's containers must have the same - effective HostProcess value (it is not allowed - to have a mix of HostProcess containers and - non-HostProcess containers). In addition, if - HostProcess is true then HostNetwork must also - be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run the - entrypoint of the container process. Defaults - to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object serviceAccount: - description: 'DeprecatedServiceAccount is a depreciated - alias for ServiceAccountName. Deprecated: Use serviceAccountName - instead.' + description: |- + DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. type: string serviceAccountName: - description: 'ServiceAccountName is the name of the ServiceAccount - to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ type: string setHostnameAsFQDN: - description: If true the pod's hostname will be configured - as the pod's FQDN, rather than the leaf name (the default). - In Linux containers, this means setting the FQDN in - the hostname field of the kernel (the nodename field - of struct utsname). In Windows containers, this means - setting the registry value of hostname for the registry - key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters - to FQDN. If a pod does not have FQDN, this has no effect. + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. Default to false. type: boolean shareProcessNamespace: - description: 'Share a single process namespace between - all of the containers in a pod. When this is set containers - will be able to view and signal processes from other - containers in the same pod, and the first process in - each container will not be assigned PID 1. HostPID and - ShareProcessNamespace cannot both be set. Optional: - Default to false.' + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. type: boolean subdomain: - description: If specified, the fully qualified Pod hostname - will be "...svc.". If not specified, the pod will not have a - domainname at all. + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. type: string terminationGracePeriodSeconds: - description: Optional duration in seconds the pod needs - to terminate gracefully. May be decreased in delete - request. Value must be non-negative integer. The value - zero indicates stop immediately via the kill signal - (no opportunity to shut down). If this value is nil, - the default grace period will be used instead. The grace - period is the duration in seconds after the processes - running in the pod are sent a termination signal and - the time when the processes are forcibly halted with - a kill signal. Set this value longer than the expected - cleanup time for your process. Defaults to 30 seconds. + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. format: int64 type: integer tolerations: description: If specified, the pod's tolerations. items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: TopologySpreadConstraints describes how a - group of pods ought to spread across topology domains. - Scheduler will schedule pods in a way which abides by - the constraints. All topologySpreadConstraints are ANDed. + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are - counted to determine the number of pods in their - corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -6300,147 +5860,134 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic matchLabelKeys: - description: "MatchLabelKeys is a set of pod label - keys to select the pods over which spreading will - be calculated. The keys are used to lookup values - from the incoming pod labels, those key-value - labels are ANDed with labelSelector to select - the group of existing pods over which spreading - will be calculated for the incoming pod. The same - key is forbidden to exist in both MatchLabelKeys - and LabelSelector. MatchLabelKeys cannot be set - when LabelSelector isn't set. Keys that don't - exist in the incoming pod labels will be ignored. - A null or empty list means only match against - labelSelector. \n This is a beta field and requires - the MatchLabelKeysInPodTopologySpread feature - gate to be enabled (enabled by default)." + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string type: array x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between - the number of matching pods in the target topology - and the global minimum. The global minimum is - the minimum number of matching pods in an eligible - domain or zero if the number of eligible domains - is less than MinDomains. For example, in a 3-zone - cluster, MaxSkew is set to 1, and pods with the - same labelSelector spread as 2/2/1: In this case, - the global minimum is 1. | zone1 | zone2 | zone3 - | | P P | P P | P | - if MaxSkew is 1, - incoming pod can only be scheduled to zone3 to - become 2/2/2; scheduling it onto zone1(zone2) - would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - if MaxSkew is 2, incoming - pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default - value is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible - domains with matching topology keys is less than - minDomains, Pod Topology Spread treats \"global - minimum\" as 0, and then the calculation of Skew - is performed. And when the number of eligible - domains with matching topology keys equals or - greater than minDomains, this value has no effect - on scheduling. As a result, when the number of - eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those - domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are - integers greater than 0. When value is not nil, - WhenUnsatisfiable must be DoNotSchedule. \n For - example, in a 3-zone cluster, MaxSkew is set to - 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: | zone1 | zone2 - | zone3 | | P P | P P | P P | The number - of domains is less than 5(MinDomains), so \"global - minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be - scheduled, because computed skew will be 3(3 - - 0) if new Pod is scheduled to any of the three - zones, it will violate MaxSkew. \n This is a beta - field and requires the MinDomainsInPodTopologySpread - feature gate to be enabled (enabled by default)." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we - will treat Pod's nodeAffinity/nodeSelector when - calculating pod topology spread skew. Options - are: - Honor: only nodes matching nodeAffinity/nodeSelector - are included in the calculations. - Ignore: nodeAffinity/nodeSelector - are ignored. All nodes are included in the calculations. - \n If this value is nil, the behavior is equivalent - to the Honor policy. This is a beta-level feature - default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we - will treat node taints when calculating pod topology - spread skew. Options are: - Honor: nodes without - taints, along with tainted nodes for which the - incoming pod has a toleration, are included. - - Ignore: node taints are ignored. All nodes are - included. \n If this value is nil, the behavior - is equivalent to the Ignore policy. This is a - beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and - try to put balanced number of pods into each bucket. - We define a domain as a particular instance of - a topology. Also, we define an eligible domain - as a domain whose nodes meet the requirements - of nodeAffinityPolicy and nodeTaintsPolicy. e.g. - If TopologyKey is "kubernetes.io/hostname", each - Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is - a domain of that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to - deal with a pod if it doesn''t satisfy the spread - constraint. - DoNotSchedule (default) tells the - scheduler not to schedule it. - ScheduleAnyway - tells the scheduler to schedule the pod in any - location, but giving higher precedence to topologies - that would help reduce the skew. A constraint - is considered "Unsatisfiable" for an incoming - pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set - to 1, and pods with the same labelSelector spread - as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t - make it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -6453,47 +6000,44 @@ spec: - whenUnsatisfiable x-kubernetes-list-type: map volumes: - description: 'List of volumes that can be mounted by containers - belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes items: description: Volume represents a named volume in a pod that may be accessed by any container in the pod. properties: awsElasticBlockStore: - description: 'awsElasticBlockStore represents an - AWS Disk resource that is attached to a kubelet''s - host machine and then exposed to the pod. More - info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: - description: 'fsType is the filesystem type - of the volume that you want to mount. Tip: - Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem - from compromising the machine' + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: - description: 'partition is the partition in - the volume that you want to mount. If omitted, - the default is to mount by volume name. Examples: - For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for - /dev/sda is "0" (or you can leave the property - empty).' + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). format: int32 type: integer readOnly: - description: 'readOnly value true will force - the readOnly setting in VolumeMounts. More - info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore type: boolean volumeID: - description: 'volumeID is unique ID of the persistent - disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore type: string required: - volumeID @@ -6515,11 +6059,10 @@ spec: in the blob storage type: string fsType: - description: fsType is Filesystem type to mount. - Must be a filesystem type supported by the - host operating system. Ex. "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if - unspecified. + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string kind: description: 'kind expected values are Shared: @@ -6529,9 +6072,9 @@ spec: set). defaults to shared' type: string readOnly: - description: readOnly Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting - in VolumeMounts. + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean required: - diskName @@ -6543,9 +6086,9 @@ spec: pod. properties: readOnly: - description: readOnly defaults to false (read/write). - ReadOnly here will force the ReadOnly setting - in VolumeMounts. + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean secretName: description: secretName is the name of secret @@ -6564,9 +6107,9 @@ spec: the host that shares a pod's lifetime properties: monitors: - description: 'monitors is Required: Monitors - is a collection of Ceph monitors More info: - https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it items: type: string type: array @@ -6576,70 +6119,72 @@ spec: default is /' type: string readOnly: - description: 'readOnly is Optional: Defaults - to false (read/write). ReadOnly here will - force the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it type: boolean secretFile: - description: 'secretFile is Optional: SecretFile - is the path to key ring for User, default - is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it type: string secretRef: - description: 'secretRef is Optional: SecretRef - is reference to the authentication secret - for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic user: - description: 'user is optional: User is the - rados user name, default is admin More info: - https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it type: string required: - monitors type: object cinder: - description: 'cinder represents a cinder volume - attached and mounted on kubelets host machine. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: - description: 'fsType is the filesystem type - to mount. Must be a filesystem type supported - by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md type: string readOnly: - description: 'readOnly defaults to false (read/write). - ReadOnly here will force the ReadOnly setting - in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md type: boolean secretRef: - description: 'secretRef is optional: points - to a secret object containing parameters used - to connect to OpenStack.' + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic volumeID: - description: 'volumeID used to identify the - volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md type: string required: - volumeID @@ -6649,31 +6194,25 @@ spec: should populate this volume properties: defaultMode: - description: 'defaultMode is optional: mode - bits used to set permissions on created files - by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 - and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. Defaults to 0644. Directories within - the path are not affected by this setting. - This might be in conflict with other options - that affect the file mode, like fsGroup, and - the result can be other mode bits set.' + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -6682,26 +6221,21 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key @@ -6709,10 +6243,10 @@ spec: type: object type: array name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional specify whether the ConfigMap @@ -6726,47 +6260,43 @@ spec: CSI drivers (Beta feature). properties: driver: - description: driver is the name of the CSI driver - that handles this volume. Consult with your - admin for the correct name as registered in - the cluster. + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. type: string fsType: - description: fsType to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is - passed to the associated CSI driver which - will determine the default filesystem to apply. + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. type: string nodePublishSecretRef: - description: nodePublishSecretRef is a reference - to the secret object containing sensitive - information to pass to the CSI driver to complete - the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be - empty if no secret is required. If the secret - object contains more than one secret, all - secret references are passed. + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic readOnly: - description: readOnly specifies a read-only - configuration for the volume. Defaults to - false (read/write). + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). type: boolean volumeAttributes: additionalProperties: type: string - description: volumeAttributes stores driver-specific - properties that are passed to the CSI driver. - Consult your driver's documentation for supported - values. + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. type: object required: - driver @@ -6776,18 +6306,15 @@ spec: about the pod that should populate this volume properties: defaultMode: - description: 'Optional: mode bits to use on - created files by default. Must be a Optional: - mode bits used to set permissions on created - files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 - and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. Defaults to 0644. Directories within - the path are not affected by this setting. - This might be in conflict with other options - that affect the file mode, like fsGroup, and - the result can be other mode bits set.' + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer items: @@ -6817,18 +6344,13 @@ spec: type: object x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -6840,11 +6362,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -6873,128 +6393,125 @@ spec: type: array type: object emptyDir: - description: 'emptyDir represents a temporary directory - that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir properties: medium: - description: 'medium represents what type of - storage medium should back this directory. - The default is "" which means to use the node''s - default medium. Must be an empty string (default) - or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir type: string sizeLimit: anyOf: - type: integer - type: string - description: 'sizeLimit is the total amount - of local storage required for this EmptyDir - volume. The size limit is also applicable - for memory medium. The maximum usage on memory - medium EmptyDir would be the minimum value - between the SizeLimit specified here and the - sum of memory limits of all containers in - a pod. The default is nil which means that - the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: "ephemeral represents a volume that - is handled by a cluster storage driver. The volume's - lifecycle is tied to the pod that defines it - - it will be created before the pod starts, and - deleted when the pod is removed. \n Use this if: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from - snapshot or capacity tracking are needed, c) the - storage driver is specified through a storage - class, and d) the storage driver supports dynamic - volume provisioning through a PersistentVolumeClaim - (see EphemeralVolumeSource for more information - on the connection between this volume type and - PersistentVolumeClaim). \n Use PersistentVolumeClaim - or one of the vendor-specific APIs for volumes - that persist for longer than the lifecycle of - an individual pod. \n Use CSI for light-weight - local ephemeral volumes if the CSI driver is meant - to be used that way - see the documentation of - the driver for more information. \n A pod can - use both types of ephemeral volumes and persistent - volumes at the same time." + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. properties: volumeClaimTemplate: - description: "Will be used to create a stand-alone - PVC to provision the volume. The pod in which - this EphemeralVolumeSource is embedded will - be the owner of the PVC, i.e. the PVC will - be deleted together with the pod. The name - of the PVC will be `-` - where `` is the name from the - `PodSpec.Volumes` array entry. Pod validation - will reject the pod if the concatenated name + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - \n An existing PVC with that name that is - not owned by the pod will *not* be used for - the pod to avoid using an unrelated volume - by mistake. Starting the pod is then blocked - until the unrelated PVC is removed. If such - a pre-created PVC is meant to be used by the - pod, the PVC has to updated with an owner - reference to the pod once the pod exists. - Normally this should not be necessary, but - it may be useful when manually reconstructing - a broken cluster. \n This field is read-only - and no changes will be made by Kubernetes - to the PVC after it has been created. \n Required, - must not be nil." + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. properties: metadata: - description: May contain labels and annotations - that will be copied into the PVC when - creating it. No other fields are allowed - and will be rejected during validation. + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. type: object spec: - description: The specification for the PersistentVolumeClaim. - The entire content is copied unchanged - into the PVC that gets created from this + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. properties: accessModes: - description: 'accessModes contains the - desired access modes the volume should - have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array dataSource: - description: 'dataSource field can be - used to specify either: * An existing - VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external - controller can support the specified - data source, it will create a new - volume based on the contents of the - specified data source. When the AnyVolumeDataSource - feature gate is enabled, dataSource - contents will be copied to dataSourceRef, - and dataSourceRef contents will be - copied to dataSource when dataSourceRef.namespace - is not specified. If the namespace - is specified, then dataSourceRef will - not be copied to dataSource.' + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group - for the resource being referenced. - If APIGroup is not specified, - the specified Kind must be in - the core API group. For any other - third-party types, APIGroup is - required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of @@ -7010,57 +6527,36 @@ spec: type: object x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies - the object from which to populate - the volume with data, if a non-empty - volume is desired. This may be any - object from a non-empty API group - (non core object) or a PersistentVolumeClaim - object. When this field is specified, - volume binding will only succeed if - the type of the specified object matches - some installed volume populator or - dynamic provisioner. This field will - replace the functionality of the dataSource - field and as such if both fields are - non-empty, they must have the same - value. For backwards compatibility, - when namespace isn''t specified in - dataSourceRef, both fields (dataSource - and dataSourceRef) will be set to - the same value automatically if one - of them is empty and the other is - non-empty. When namespace is specified - in dataSourceRef, dataSource isn''t - set to the same value and must be - empty. There are three important differences - between dataSource and dataSourceRef: - * While dataSource only allows two - specific types of objects, dataSourceRef - allows any non-core object, as well - as PersistentVolumeClaim objects. - * While dataSource ignores disallowed - values (dropping them), dataSourceRef - preserves all values, and generates - an error if a disallowed value is - specified. * While dataSource only - allows local objects, dataSourceRef - allows objects in any namespaces. - (Beta) Using this field requires the - AnyVolumeDataSource feature gate to - be enabled. (Alpha) Using the namespace - field of dataSourceRef requires the - CrossNamespaceVolumeDataSource feature - gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group - for the resource being referenced. - If APIGroup is not specified, - the specified Kind must be in - the core API group. For any other - third-party types, APIGroup is - required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of @@ -7071,53 +6567,43 @@ spec: resource being referenced type: string namespace: - description: Namespace is the namespace - of resource being referenced Note - that when a namespace is specified, - a gateway.networking.k8s.io/ReferenceGrant - object is required in the referent - namespace to allow that namespace's - owner to accept the reference. - See the ReferenceGrant documentation - for details. (Alpha) This field - requires the CrossNamespaceVolumeDataSource - feature gate to be enabled. + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. type: string required: - kind - name type: object resources: - description: 'resources represents the - minimum resources the volume should - have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed - to specify resource requirements that - are lower than previous value but - must still be higher than capacity - recorded in the status field of the - claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: claims: - description: "Claims lists the names - of resources, defined in spec.resourceClaims, + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - \n This is an alpha field and - requires enabling the DynamicResourceAllocation - feature gate. \n This field is - immutable. It can only be set - for containers." + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match - the name of one entry in - pod.spec.resourceClaims - of the Pod where this field - is used. It makes that resource - available inside a container. + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - name @@ -7133,9 +6619,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the - maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -7144,15 +6630,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes - the minimum amount of compute - resources required. If Requests - is omitted for a container, it - defaults to Limits if that is - explicitly specified, otherwise - to an implementation-defined value. - Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -7164,11 +6646,9 @@ spec: a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -7176,23 +6656,16 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -7204,28 +6677,22 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the - name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 type: string volumeMode: - description: volumeMode defines what - type of volume is required by the - claim. Value of Filesystem is implied - when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding @@ -7243,12 +6710,11 @@ spec: then exposed to the pod. properties: fsType: - description: 'fsType is the filesystem type - to mount. Must be a filesystem type supported - by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. TODO: how do we prevent errors - in the filesystem from compromising the machine' + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine type: string lun: description: 'lun is Optional: FC target lun @@ -7256,9 +6722,9 @@ spec: format: int32 type: integer readOnly: - description: 'readOnly is Optional: Defaults - to false (read/write). ReadOnly here will - force the ReadOnly setting in VolumeMounts.' + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean targetWWNs: description: 'targetWWNs is Optional: FC target @@ -7267,29 +6733,27 @@ spec: type: string type: array wwids: - description: 'wwids Optional: FC volume world - wide identifiers (wwids) Either wwids or combination - of targetWWNs and lun must be set, but not - both simultaneously.' + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. items: type: string type: array type: object flexVolume: - description: flexVolume represents a generic volume - resource that is provisioned/attached using an - exec based plugin. + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. properties: driver: description: driver is the name of the driver to use for this volume. type: string fsType: - description: fsType is the filesystem type to - mount. Must be a filesystem type supported - by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends - on FlexVolume script. + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. type: string options: additionalProperties: @@ -7298,24 +6762,23 @@ spec: holds extra command options if any.' type: object readOnly: - description: 'readOnly is Optional: defaults - to false (read/write). ReadOnly here will - force the ReadOnly setting in VolumeMounts.' + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: 'secretRef is Optional: secretRef - is reference to the secret object containing - sensitive information to pass to the plugin - scripts. This may be empty if no secret object - is specified. If the secret object contains - more than one secret, all secrets are passed - to the plugin scripts.' + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -7328,9 +6791,9 @@ spec: on the Flocker control service being running properties: datasetName: - description: datasetName is Name of the dataset - stored as metadata -> name on the dataset - for Flocker should be considered as deprecated + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated type: string datasetUUID: description: datasetUUID is the UUID of the @@ -7339,59 +6802,55 @@ spec: type: string type: object gcePersistentDisk: - description: 'gcePersistentDisk represents a GCE - Disk resource that is attached to a kubelet''s - host machine and then exposed to the pod. More - info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: - description: 'fsType is filesystem type of the - volume that you want to mount. Tip: Ensure - that the filesystem type is supported by the - host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if - unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem - from compromising the machine' + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: - description: 'partition is the partition in - the volume that you want to mount. If omitted, - the default is to mount by volume name. Examples: - For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for - /dev/sda is "0" (or you can leave the property - empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk format: int32 type: integer pdName: - description: 'pdName is unique name of the PD - resource in GCE. Used to identify the disk - in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk type: string readOnly: - description: 'readOnly here will force the ReadOnly - setting in VolumeMounts. Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk type: boolean required: - pdName type: object gitRepo: - description: 'gitRepo represents a git repository - at a particular revision. DEPRECATED: GitRepo - is deprecated. To provision a container with a - git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the - EmptyDir into the Pod''s container.' + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. properties: directory: - description: directory is the target directory - name. Must not contain or start with '..'. If - '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, - the volume will contain the git repository - in the subdirectory with the given name. + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. type: string repository: description: repository is the URL @@ -7404,55 +6863,61 @@ spec: - repository type: object glusterfs: - description: 'glusterfs represents a Glusterfs mount - on the host that shares a pod''s lifetime. More - info: https://examples.k8s.io/volumes/glusterfs/README.md' + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: 'endpoints is the endpoint name - that details Glusterfs topology. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod type: string path: - description: 'path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod type: string readOnly: - description: 'readOnly here will force the Glusterfs - volume to be mounted with read-only permissions. - Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod type: boolean required: - endpoints - path type: object hostPath: - description: 'hostPath represents a pre-existing - file or directory on the host machine that is - directly exposed to the container. This is generally - used for system agents or other privileged things - that are allowed to see the host machine. Most - containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can - use host directory mounts and who can/can not - mount host directories as read/write.' + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. properties: path: - description: 'path of the directory on the host. - If the path is a symlink, it will follow the - link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath type: string type: - description: 'type for HostPath Volume Defaults - to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath type: string required: - path type: object iscsi: - description: 'iscsi represents an ISCSI Disk resource - that is attached to a kubelet''s host machine - and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether @@ -7463,30 +6928,27 @@ spec: support iSCSI Session CHAP authentication type: boolean fsType: - description: 'fsType is the filesystem type - of the volume that you want to mount. Tip: - Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem - from compromising the machine' + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine type: string initiatorName: - description: initiatorName is the custom iSCSI - Initiator Name. If initiatorName is specified - with iscsiInterface simultaneously, new iSCSI - interface : will - be created for the connection. + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. type: string iqn: description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: - description: iscsiInterface is the interface - Name that uses an iSCSI transport. Defaults - to 'default' (tcp). + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). type: string lun: description: lun represents iSCSI Target Lun @@ -7494,34 +6956,33 @@ spec: format: int32 type: integer portals: - description: portals is the iSCSI Target Portal - List. The portal is either an IP or ip_addr:port - if the port is other than default (typically - TCP ports 860 and 3260). + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). items: type: string type: array readOnly: - description: readOnly here will force the ReadOnly - setting in VolumeMounts. Defaults to false. + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. type: boolean secretRef: description: secretRef is the CHAP Secret for iSCSI target and initiator authentication properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic targetPortal: - description: targetPortal is iSCSI Target Portal. - The Portal is either an IP or ip_addr:port - if the port is other than default (typically - TCP ports 860 and 3260). + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). type: string required: - iqn @@ -7529,44 +6990,51 @@ spec: - targetPortal type: object name: - description: 'name of the volume. Must be a DNS_LABEL - and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string nfs: - description: 'nfs represents an NFS mount on the - host that shares a pod''s lifetime More info: - https://kubernetes.io/docs/concepts/storage/volumes#nfs' + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs properties: path: - description: 'path that is exported by the NFS - server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs type: string readOnly: - description: 'readOnly here will force the NFS - export to be mounted with read-only permissions. - Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs type: boolean server: - description: 'server is the hostname or IP address - of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs type: string required: - path - server type: object persistentVolumeClaim: - description: 'persistentVolumeClaimVolumeSource - represents a reference to a PersistentVolumeClaim - in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims properties: claimName: - description: 'claimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this - volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims type: string readOnly: - description: readOnly Will force the ReadOnly - setting in VolumeMounts. Default false. + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. type: boolean required: - claimName @@ -7577,11 +7045,10 @@ spec: host machine properties: fsType: - description: fsType is the filesystem type to - mount. Must be a filesystem type supported - by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string pdID: description: pdID is the ID that identifies @@ -7595,16 +7062,15 @@ spec: volume attached and mounted on kubelets host machine properties: fsType: - description: fSType represents the filesystem - type to mount Must be a filesystem type supported - by the host operating system. Ex. "ext4", - "xfs". Implicitly inferred to be "ext4" if - unspecified. + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. type: string readOnly: - description: readOnly defaults to false (read/write). - ReadOnly here will force the ReadOnly setting - in VolumeMounts. + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean volumeID: description: volumeID uniquely identifies a @@ -7618,16 +7084,13 @@ spec: secrets, configmaps, and downward API properties: defaultMode: - description: defaultMode are the mode bits used - to set permissions on created files by default. - Must be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. Directories - within the path are not affected by this setting. - This might be in conflict with other options - that affect the file mode, like fsGroup, and - the result can be other mode bits set. + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer sources: @@ -7641,21 +7104,14 @@ spec: the configMap data to project properties: items: - description: items if unspecified, - each key-value pair in the Data - field of the referenced ConfigMap - will be projected into the volume - as a file whose name is the key - and content is the value. If specified, - the listed keys will be projected - into the specified paths, and unlisted - keys will not be present. If a key - is specified which is not present - in the ConfigMap, the volume setup - will error unless it is marked optional. - Paths must be relative and may not - contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -7665,30 +7121,21 @@ spec: to project. type: string mode: - description: 'mode is Optional: - mode bits used to set permissions - on this file. Must be an octal - value between 0000 and 0777 - or a decimal value between - 0 and 511. YAML accepts both - octal and decimal values, - JSON requires decimal values - for mode bits. If not specified, - the volume defaultMode will - be used. This might be in - conflict with other options - that affect the file mode, - like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative - path of the file to map the - key to. May not be an absolute - path. May not contain the - path element '..'. May not - start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key @@ -7696,10 +7143,10 @@ spec: type: object type: array name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional specify whether @@ -7742,21 +7189,13 @@ spec: type: object x-kubernetes-map-type: atomic mode: - description: 'Optional: mode - bits used to set permissions - on this file, must be an octal - value between 0000 and 0777 - or a decimal value between - 0 and 511. YAML accepts both - octal and decimal values, - JSON requires decimal values - for mode bits. If not specified, - the volume defaultMode will - be used. This might be in - conflict with other options - that affect the file mode, - like fsGroup, and the result - can be other mode bits set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -7770,12 +7209,9 @@ spec: start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource - of the container: only resources - limits and requests (limits.cpu, - limits.memory, requests.cpu - and requests.memory) are currently - supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container @@ -7810,21 +7246,14 @@ spec: the secret data to project properties: items: - description: items if unspecified, - each key-value pair in the Data - field of the referenced Secret will - be projected into the volume as - a file whose name is the key and - content is the value. If specified, - the listed keys will be projected - into the specified paths, and unlisted - keys will not be present. If a key - is specified which is not present - in the Secret, the volume setup - will error unless it is marked optional. - Paths must be relative and may not - contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -7834,30 +7263,21 @@ spec: to project. type: string mode: - description: 'mode is Optional: - mode bits used to set permissions - on this file. Must be an octal - value between 0000 and 0777 - or a decimal value between - 0 and 511. YAML accepts both - octal and decimal values, - JSON requires decimal values - for mode bits. If not specified, - the volume defaultMode will - be used. This might be in - conflict with other options - that affect the file mode, - like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative - path of the file to map the - key to. May not be an absolute - path. May not contain the - path element '..'. May not - start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key @@ -7865,10 +7285,10 @@ spec: type: object type: array name: - description: 'Name of the referent. + description: |- + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: optional field specify @@ -7883,34 +7303,26 @@ spec: project properties: audience: - description: audience is the intended - audience of the token. A recipient - of a token must identify itself - with an identifier specified in - the audience of the token, and otherwise - should reject the token. The audience - defaults to the identifier of the - apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is - the requested duration of validity - of the service account token. As - the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. - The kubelet will start trying to - rotate the token if the token is - older than 80 percent of its time - to live or if the token is older - than 24 hours.Defaults to 1 hour + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative - to the mount point of the file to - project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -7923,30 +7335,30 @@ spec: on the host that shares a pod's lifetime properties: group: - description: group to map volume access to Default - is no group + description: |- + group to map volume access to + Default is no group type: string readOnly: - description: readOnly here will force the Quobyte - volume to be mounted with read-only permissions. + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. type: boolean registry: - description: registry represents a single or - multiple Quobyte Registry services specified - as a string as host:port pair (multiple entries - are separated with commas) which acts as the - central registry for volumes + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes type: string tenant: - description: tenant owning the given Quobyte - volume in the Backend Used with dynamically - provisioned Quobyte volumes, value is set - by the plugin + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin type: string user: - description: user to map volume access to Defaults - to serivceaccount user + description: |- + user to map volume access to + Defaults to serivceaccount user type: string volume: description: volume is a string that references @@ -7957,60 +7369,68 @@ spec: - volume type: object rbd: - description: 'rbd represents a Rados Block Device - mount on the host that shares a pod''s lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md' + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: - description: 'fsType is the filesystem type - of the volume that you want to mount. Tip: - Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem - from compromising the machine' + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine type: string image: - description: 'image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string keyring: - description: 'keyring is the path to key ring - for RBDUser. Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string monitors: - description: 'monitors is a collection of Ceph - monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it items: type: string type: array pool: - description: 'pool is the rados pool name. Default - is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string readOnly: - description: 'readOnly here will force the ReadOnly - setting in VolumeMounts. Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: boolean secretRef: - description: 'secretRef is name of the authentication - secret for RBDUser. If provided overrides - keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic user: - description: 'user is the rados user name. Default - is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string required: - image @@ -8021,10 +7441,11 @@ spec: volume attached and mounted on Kubernetes nodes. properties: fsType: - description: fsType is the filesystem type to - mount. Must be a filesystem type supported - by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". type: string gateway: description: gateway is the host address of @@ -8036,21 +7457,20 @@ spec: storage. type: string readOnly: - description: readOnly Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting - in VolumeMounts. + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: secretRef references to the secret - for ScaleIO user and other sensitive information. - If this is not provided, Login operation will - fail. + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic @@ -8059,9 +7479,9 @@ spec: SSL communication with Gateway, default false type: boolean storageMode: - description: storageMode indicates whether the - storage for a volume should be ThickProvisioned - or ThinProvisioned. Default is ThinProvisioned. + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. type: string storagePool: description: storagePool is the ScaleIO Storage @@ -8072,9 +7492,9 @@ spec: system as configured in ScaleIO. type: string volumeName: - description: volumeName is the name of a volume - already created in the ScaleIO system that - is associated with this volume source. + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. type: string required: - gateway @@ -8082,35 +7502,30 @@ spec: - system type: object secret: - description: 'secret represents a secret that should - populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret properties: defaultMode: - description: 'defaultMode is Optional: mode - bits used to set permissions on created files - by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 - and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. Defaults to 0644. Directories within - the path are not affected by this setting. - This might be in conflict with other options - that affect the file mode, like fsGroup, and - the result can be other mode bits set.' + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer items: - description: items If unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -8119,26 +7534,21 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key @@ -8150,9 +7560,9 @@ spec: the Secret or its keys must be defined type: boolean secretName: - description: 'secretName is the name of the - secret in the pod''s namespace to use. More - info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret type: string type: object storageos: @@ -8160,46 +7570,42 @@ spec: attached and mounted on Kubernetes nodes. properties: fsType: - description: fsType is the filesystem type to - mount. Must be a filesystem type supported - by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string readOnly: - description: readOnly defaults to false (read/write). - ReadOnly here will force the ReadOnly setting - in VolumeMounts. + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: secretRef specifies the secret - to use for obtaining the StorageOS API credentials. If - not specified, default values will be attempted. + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. properties: name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, - kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string type: object x-kubernetes-map-type: atomic volumeName: - description: volumeName is the human-readable - name of the StorageOS volume. Volume names - are only unique within a namespace. + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. type: string volumeNamespace: - description: volumeNamespace specifies the scope - of the volume within StorageOS. If no namespace - is specified then the Pod's namespace will - be used. This allows the Kubernetes name - scoping to be mirrored within StorageOS for - tighter integration. Set VolumeName to any - name to override the default behaviour. Set - to "default" if you are not using namespaces - within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. type: string type: object vsphereVolume: @@ -8207,11 +7613,10 @@ spec: volume attached and mounted on kubelets host machine properties: fsType: - description: fsType is filesystem type to mount. - Must be a filesystem type supported by the - host operating system. Ex. "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if - unspecified. + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string storagePolicyID: description: storagePolicyID is the storage @@ -8238,14 +7643,14 @@ spec: type: object type: object ttlSecondsAfterFinished: - description: ttlSecondsAfterFinished limits the lifetime of a - Job that has finished execution (either Complete or Failed). - If this field is set, ttlSecondsAfterFinished after the Job - finishes, it is eligible to be automatically deleted. When the - Job is being deleted, its lifecycle guarantees (e.g. finalizers) - will be honored. If this field is unset, the Job won't be automatically - deleted. If this field is set to zero, the Job becomes eligible - to be deleted immediately after it finishes. + description: |- + ttlSecondsAfterFinished limits the lifetime of a Job that has finished + execution (either Complete or Failed). If this field is set, + ttlSecondsAfterFinished after the Job finishes, it is eligible to be + automatically deleted. When the Job is being deleted, its lifecycle + guarantees (e.g. finalizers) will be honored. If this field is unset, + the Job won't be automatically deleted. If this field is set to zero, + the Job becomes eligible to be deleted immediately after it finishes. format: int32 type: integer required: @@ -8295,9 +7700,9 @@ spec: description: ScaleTriggers reference the scaler that will be used properties: authenticationRef: - description: AuthenticationRef points to the TriggerAuthentication - or ClusterTriggerAuthentication object that is used to authenticate - the scaler with the environment + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment properties: kind: description: Kind of the resource being referred to. Defaults @@ -8313,9 +7718,9 @@ spec: type: string type: object metricType: - description: MetricTargetType specifies the type of metric being - targeted, and should be either "Value", "AverageValue", or - "Utilization" + description: |- + MetricTargetType specifies the type of metric being targeted, and should be either + "Value", "AverageValue", or "Utilization" type: string name: type: string diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml index c35bb68a1e2..f103979c8b0 100644 --- a/config/crd/bases/keda.sh_scaledobjects.yaml +++ b/config/crd/bases/keda.sh_scaledobjects.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: scaledobjects.keda.sh spec: group: keda.sh @@ -56,14 +56,19 @@ spec: description: ScaledObject is a specification for a ScaledObject resource properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -78,31 +83,29 @@ spec: scale config properties: behavior: - description: HorizontalPodAutoscalerBehavior configures the - scaling behavior of the target in both Up and Down directions - (scaleUp and scaleDown fields respectively). + description: |- + HorizontalPodAutoscalerBehavior configures the scaling behavior of the target + in both Up and Down directions (scaleUp and scaleDown fields respectively). properties: scaleDown: - description: scaleDown is scaling policy for scaling Down. - If not set, the default value is to allow to scale down - to minReplicas pods, with a 300 second stabilization - window (i.e., the highest recommendation for the last - 300sec is used). + description: |- + scaleDown is scaling policy for scaling Down. + If not set, the default value is to allow to scale down to minReplicas pods, with a + 300 second stabilization window (i.e., the highest recommendation for + the last 300sec is used). properties: policies: - description: policies is a list of potential scaling - polices which can be used during scaling. At least - one policy must be specified, otherwise the HPAScalingRules - will be discarded as invalid + description: |- + policies is a list of potential scaling polices which can be used during scaling. + At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid items: description: HPAScalingPolicy is a single policy which must hold true for a specified past interval. properties: periodSeconds: - description: periodSeconds specifies the window - of time for which the policy should hold true. - PeriodSeconds must be greater than zero and - less than or equal to 1800 (30 min). + description: |- + periodSeconds specifies the window of time for which the policy should hold true. + PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). format: int32 type: integer type: @@ -110,9 +113,9 @@ spec: policy. type: string value: - description: value contains the amount of change - which is permitted by the policy. It must - be greater than zero + description: |- + value contains the amount of change which is permitted by the policy. + It must be greater than zero format: int32 type: integer required: @@ -123,43 +126,41 @@ spec: type: array x-kubernetes-list-type: atomic selectPolicy: - description: selectPolicy is used to specify which - policy should be used. If not set, the default value - Max is used. + description: |- + selectPolicy is used to specify which policy should be used. + If not set, the default value Max is used. type: string stabilizationWindowSeconds: - description: 'stabilizationWindowSeconds is the number - of seconds for which past recommendations should - be considered while scaling up or scaling down. - StabilizationWindowSeconds must be greater than - or equal to zero and less than or equal to 3600 - (one hour). If not set, use the default values: + description: |- + stabilizationWindowSeconds is the number of seconds for which past recommendations should be + considered while scaling up or scaling down. + StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - - For scale down: 300 (i.e. the stabilization window - is 300 seconds long).' + - For scale down: 300 (i.e. the stabilization window is 300 seconds long). format: int32 type: integer type: object scaleUp: - description: 'scaleUp is scaling policy for scaling Up. - If not set, the default value is the higher of: * increase - no more than 4 pods per 60 seconds * double the number - of pods per 60 seconds No stabilization is used.' + description: |- + scaleUp is scaling policy for scaling Up. + If not set, the default value is the higher of: + * increase no more than 4 pods per 60 seconds + * double the number of pods per 60 seconds + No stabilization is used. properties: policies: - description: policies is a list of potential scaling - polices which can be used during scaling. At least - one policy must be specified, otherwise the HPAScalingRules - will be discarded as invalid + description: |- + policies is a list of potential scaling polices which can be used during scaling. + At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid items: description: HPAScalingPolicy is a single policy which must hold true for a specified past interval. properties: periodSeconds: - description: periodSeconds specifies the window - of time for which the policy should hold true. - PeriodSeconds must be greater than zero and - less than or equal to 1800 (30 min). + description: |- + periodSeconds specifies the window of time for which the policy should hold true. + PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). format: int32 type: integer type: @@ -167,9 +168,9 @@ spec: policy. type: string value: - description: value contains the amount of change - which is permitted by the policy. It must - be greater than zero + description: |- + value contains the amount of change which is permitted by the policy. + It must be greater than zero format: int32 type: integer required: @@ -180,20 +181,18 @@ spec: type: array x-kubernetes-list-type: atomic selectPolicy: - description: selectPolicy is used to specify which - policy should be used. If not set, the default value - Max is used. + description: |- + selectPolicy is used to specify which policy should be used. + If not set, the default value Max is used. type: string stabilizationWindowSeconds: - description: 'stabilizationWindowSeconds is the number - of seconds for which past recommendations should - be considered while scaling up or scaling down. - StabilizationWindowSeconds must be greater than - or equal to zero and less than or equal to 3600 - (one hour). If not set, use the default values: + description: |- + stabilizationWindowSeconds is the number of seconds for which past recommendations should be + considered while scaling up or scaling down. + StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - - For scale down: 300 (i.e. the stabilization window - is 300 seconds long).' + - For scale down: 300 (i.e. the stabilization window is 300 seconds long). format: int32 type: integer type: object @@ -212,9 +211,9 @@ spec: formula: type: string metricType: - description: MetricTargetType specifies the type of metric - being targeted, and should be either "Value", "AverageValue", - or "Utilization" + description: |- + MetricTargetType specifies the type of metric being targeted, and should be either + "Value", "AverageValue", or "Utilization" type: string target: type: string @@ -267,9 +266,9 @@ spec: description: ScaleTriggers reference the scaler that will be used properties: authenticationRef: - description: AuthenticationRef points to the TriggerAuthentication - or ClusterTriggerAuthentication object that is used to authenticate - the scaler with the environment + description: |- + AuthenticationRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that + is used to authenticate the scaler with the environment properties: kind: description: Kind of the resource being referred to. Defaults @@ -285,9 +284,9 @@ spec: type: string type: object metricType: - description: MetricTargetType specifies the type of metric being - targeted, and should be either "Value", "AverageValue", or - "Utilization" + description: |- + MetricTargetType specifies the type of metric being targeted, and should be either + "Value", "AverageValue", or "Utilization" type: string name: type: string diff --git a/config/crd/bases/keda.sh_triggerauthentications.yaml b/config/crd/bases/keda.sh_triggerauthentications.yaml index d1fcdaf7226..6bb8748c429 100644 --- a/config/crd/bases/keda.sh_triggerauthentications.yaml +++ b/config/crd/bases/keda.sh_triggerauthentications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: triggerauthentications.keda.sh spec: group: keda.sh @@ -44,14 +44,19 @@ spec: description: TriggerAuthentication defines how a trigger can authenticate properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -128,8 +133,9 @@ spec: - accessSecretKey type: object podIdentity: - description: AuthPodIdentity allows users to select the platform - native identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string @@ -226,8 +232,9 @@ spec: - tenantId type: object podIdentity: - description: AuthPodIdentity allows users to select the platform - native identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string @@ -295,8 +302,9 @@ spec: type: array env: items: - description: AuthEnvironment is used to authenticate using environment - variables in the destination ScaleTarget spec + description: |- + AuthEnvironment is used to authenticate using environment variables + in the destination ScaleTarget spec properties: containerName: type: string @@ -337,8 +345,9 @@ spec: - clientSecret type: object podIdentity: - description: AuthPodIdentity allows users to select the platform - native identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string @@ -452,8 +461,9 @@ spec: - secrets type: object podIdentity: - description: AuthPodIdentity allows users to select the platform native - identity mechanism + description: |- + AuthPodIdentity allows users to select the platform native identity + mechanism properties: identityId: type: string diff --git a/go.mod b/go.mod index dcbade891b3..0b955cce7a2 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,9 @@ go 1.21 require ( cloud.google.com/go/compute/metadata v0.2.3 - cloud.google.com/go/monitoring v1.17.0 - cloud.google.com/go/secretmanager v1.11.4 - cloud.google.com/go/storage v1.36.0 + cloud.google.com/go/monitoring v1.17.1 + cloud.google.com/go/secretmanager v1.11.5 + cloud.google.com/go/storage v1.37.0 dario.cat/mergo v1.0.0 github.com/Azure/azure-amqp-common-go/v4 v4.2.0 github.com/Azure/azure-event-hubs-go/v3 v3.6.2 @@ -25,23 +25,23 @@ require ( github.com/IBM/sarama v1.42.1 github.com/arangodb/go-driver v1.6.1 github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.26.4 - github.com/aws/aws-sdk-go-v2/credentials v1.16.15 + github.com/aws/aws-sdk-go-v2/config v1.26.6 + github.com/aws/aws-sdk-go-v2/credentials v1.16.16 github.com/aws/aws-sdk-go-v2/service/amp v1.22.1 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.32.2 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.9 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.27.0 github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.7 github.com/aws/aws-sdk-go-v2/service/kinesis v1.24.7 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.2 github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 github.com/bradleyfalzon/ghinstallation/v2 v2.9.0 - github.com/cloudevents/sdk-go/v2 v2.14.0 + github.com/cloudevents/sdk-go/v2 v2.15.0 github.com/denisenkom/go-mssqldb v0.12.3 github.com/dysnix/predictkube-libs v0.0.4-0.20230109175007-5a82fccd31c7 github.com/dysnix/predictkube-proto v0.0.0-20220713123213-7135dce1e9c9 github.com/elastic/go-elasticsearch/v7 v7.17.10 - github.com/expr-lang/expr v1.15.8 + github.com/expr-lang/expr v1.16.0 github.com/go-kivik/couchdb/v3 v3.4.1 github.com/go-kivik/kivik/v3 v3.2.4 github.com/go-logr/logr v1.4.1 @@ -52,9 +52,9 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/go-github/v50 v50.2.0 - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/gophercloud/gophercloud v1.8.0 - github.com/hashicorp/vault/api v1.10.0 + github.com/hashicorp/vault/api v1.11.0 github.com/influxdata/influxdb-client-go/v2 v2.13.0 github.com/jackc/pgx/v5 v5.5.2 github.com/joho/godotenv v1.5.1 @@ -63,7 +63,7 @@ require ( github.com/mitchellh/hashstructure v1.1.0 github.com/newrelic/newrelic-client-go v1.1.0 github.com/onsi/ginkgo/v2 v2.15.0 - github.com/onsi/gomega v1.31.0 + github.com/onsi/gomega v1.31.1 github.com/open-policy-agent/cert-controller v0.10.1 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/pkg/errors v0.9.1 @@ -83,31 +83,31 @@ require ( github.com/xdg/scram v1.0.5 github.com/xhit/go-str2duration/v2 v2.1.0 github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a - go.etcd.io/etcd/client/v3 v3.5.11 + go.etcd.io/etcd/client/v3 v3.5.12 go.mongodb.org/mongo-driver v1.13.1 go.opentelemetry.io/otel v1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 go.opentelemetry.io/otel/metric v1.22.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.6.0 - google.golang.org/api v0.156.0 - google.golang.org/grpc v1.60.1 + google.golang.org/api v0.161.0 + google.golang.org/grpc v1.61.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.32.0 - k8s.io/api v0.29.0 - k8s.io/apimachinery v0.29.0 - k8s.io/apiserver v0.29.0 - k8s.io/client-go v0.29.0 - k8s.io/code-generator v0.29.0 - k8s.io/component-base v0.29.0 - k8s.io/klog/v2 v2.110.1 - k8s.io/kube-openapi v0.0.0-20240103051144-eec4567ac022 - k8s.io/metrics v0.29.0 + k8s.io/api v0.29.1 + k8s.io/apimachinery v0.29.1 + k8s.io/apiserver v0.29.1 + k8s.io/client-go v1.5.2 + k8s.io/code-generator v0.29.1 + k8s.io/component-base v0.29.1 + k8s.io/klog/v2 v2.120.1 + k8s.io/kube-openapi v0.0.0-20240126223410-2919ad4fcfec + k8s.io/metrics v0.29.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e - knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 - sigs.k8s.io/controller-runtime v0.16.3 - sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1 - sigs.k8s.io/controller-tools v0.13.0 + knative.dev/pkg v0.0.0-20240201013110-e85c3cf6d5f1 + sigs.k8s.io/controller-runtime v0.17.0 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 + sigs.k8s.io/controller-tools v0.14.0 sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1 sigs.k8s.io/kustomize/kustomize/v5 v5.3.0 ) @@ -178,11 +178,11 @@ require ( github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.11 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -202,7 +202,7 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/evanphx/json-patch v5.8.1+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -313,11 +313,11 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xdg/stringprep v1.0.3 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.11 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect + go.etcd.io/etcd/api/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect go.opentelemetry.io/otel/sdk v1.22.0 // indirect @@ -342,8 +342,8 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect gopkg.in/evanphx/json-patch.v5 v5.8.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/go.sum b/go.sum index a8211e3da90..139cec8d2b4 100644 --- a/go.sum +++ b/go.sum @@ -500,8 +500,8 @@ cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtD cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= -cloud.google.com/go/monitoring v1.17.0 h1:blrdvF0MkPPivSO041ihul7rFMhXdVp8Uq7F59DKXTU= -cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= +cloud.google.com/go/monitoring v1.17.1 h1:xqcNr+JXmFMCPXnent/i1r0De6zrcqzgcMy5X1xa5vg= +cloud.google.com/go/monitoring v1.17.1/go.mod h1:SJzPMakCF0GHOuKEH/r4hxVKF04zl+cRPQyc3d/fqII= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -644,8 +644,8 @@ cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENin cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= -cloud.google.com/go/secretmanager v1.11.4 h1:krnX9qpG2kR2fJ+u+uNyNo+ACVhplIAS4Pu7u+4gd+k= -cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= +cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= +cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= @@ -711,8 +711,8 @@ cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= +cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -919,11 +919,11 @@ github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0o github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= github.com/aws/aws-sdk-go-v2/config v1.17.2/go.mod h1:jumS/AMwul4WaG8vyXsF6kUndG9zndR+yfYBwl4i9ds= -github.com/aws/aws-sdk-go-v2/config v1.26.4 h1:Juj7LhtxNudNUlfX22K5AnLafO+v4eq9PA3VWSCIQs4= -github.com/aws/aws-sdk-go-v2/config v1.26.4/go.mod h1:tioqQ7wvxMYnTDpoTTLHhV3Zh+z261i/f2oz+ds8eNI= +github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= +github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= github.com/aws/aws-sdk-go-v2/credentials v1.12.15/go.mod h1:41zTC6U/78fUD7ZCa5NymTJANDjfqySg5YEAYVFl2Ic= -github.com/aws/aws-sdk-go-v2/credentials v1.16.15 h1:P0/m1LU08MF2kRzx4P//+7lNjiJod1z4xI2WpWhdpTQ= -github.com/aws/aws-sdk-go-v2/credentials v1.16.15/go.mod h1:pgtMCf7Dx4GWw5EpHOTc2Sy17LIP0A0N2C9nQ83pQ/0= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13/go.mod h1:y0eXmsNBFIVjUE8ZBjES8myOHlMsXDz7qGT93+MVdjk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= @@ -934,14 +934,14 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13/go.mod h1:lB12mkZqCSo github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20/go.mod h1:bfTcsThj5a9P5pIGRy0QudJ8k4+issxXX+O6Djnd5Cs= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/service/amp v1.22.1 h1:09O7NJKub+PsLAi1S+j/melSkjQROVV2RsDGqt3i34k= github.com/aws/aws-sdk-go-v2/service/amp v1.22.1/go.mod h1:zXysWREb7sWv3Mr80IBeQmbbWtBD4OvA5r/W+E+aSyA= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.32.2 h1:vQfCIHSDouEvbE4EuDrlCGKcrtABEqF3cMt61nGEV4g= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.32.2/go.mod h1:3ToKMEhVj+Q+HzZ8Hqin6LdAKtsi3zVXVNUPpQMd+Xk= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.9 h1:LQy/ItO8N4sd2beDIFuXnr7y02mHJGebFrYnrNZH5E4= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.9/go.mod h1:N5tqZcYMM0N1PN7UQYJNWuGyO886OfnMhf/3MAbqMcI= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.27.0 h1:e/HPLjLas04wKnmCUSSXD44cYdVjT/Dcd9CkmlYNyNU= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.27.0/go.mod h1:N5tqZcYMM0N1PN7UQYJNWuGyO886OfnMhf/3MAbqMcI= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.7 h1:srShyROqxzC7p18Ws8mqM2sqxJO/8L3Kpiqf+NboJLg= github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.7/go.mod h1:9efZgg4nJCGRp91MuHhkwd2kvyp7PWLRYYk5WjEQ5ts= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= @@ -958,8 +958,8 @@ github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.2/go.mod h1:qutL00aW8G github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 h1:tRNrFDGRm81e6nTX5Q4CFblea99eAfm0dxXazGpLceU= github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7/go.mod h1:8GWUDux5Z2h6z2efAtr54RdHXtLm8sq7Rg85ZNY/CZM= github.com/aws/aws-sdk-go-v2/service/sso v1.11.18/go.mod h1:ytmEi5+qwcSNcV2pVA8PIb1DnKT/0Bu/K4nfJHwoM6c= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 h1:dGrs+Q/WzhsiUKh82SfTVN66QzyulXuMDTV/G8ZxOac= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.6/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1/go.mod h1:NY+G+8PW0ISyJ7/6t5mgOe6qpJiwZa9Jix05WPscJjg= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= @@ -1006,8 +1006,8 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= -github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= +github.com/cloudevents/sdk-go/v2 v2.15.0 h1:aKnhLQhyoJXqEECQdOIZnbZ9VupqlidE6hedugDGr+I= +github.com/cloudevents/sdk-go/v2 v2.15.0/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= @@ -1083,10 +1083,10 @@ github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBF github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.8.1+incompatible h1:2toJaoe7/rNa1zpeQx0UnVEjqk6z2ecyA20V/zg8vTU= github.com/evanphx/json-patch v5.8.1+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.1 h1:iPEdwg0XayoS+E7Mth9JxwUtOgyVxnDTXHtKhZPlZxA= -github.com/evanphx/json-patch/v5 v5.8.1/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/expr-lang/expr v1.15.8 h1:FL8+d3rSSP4tmK9o+vKfSMqqpGL8n15pEPiHcnBpxoI= -github.com/expr-lang/expr v1.15.8/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/expr-lang/expr v1.16.0 h1:BQabx+PbjsL2PEQwkJ4GIn3CcuUh8flduHhJ0lHjWwE= +github.com/expr-lang/expr v1.16.0/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -1306,8 +1306,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= @@ -1402,8 +1402,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= -github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.11.0 h1:AChWByeHf4/P9sX3Y1B7vFsQhZO2BgQiCMQ2SA1P1UY= +github.com/hashicorp/vault/api v1.11.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -1582,8 +1582,8 @@ github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042 github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= -github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/open-policy-agent/cert-controller v0.10.1 h1:RXSYoyn8FdCenWecRP//UV5nbVfmstNpj4kHQFkvPK4= github.com/open-policy-agent/cert-controller v0.10.1/go.mod h1:4uRbBLY5DsPOog+a9pqk3JLxuuhrWsbUedQW65HcLTI= github.com/open-policy-agent/frameworks/constraint v0.0.0-20230822235116-f0b62fe1e4c4 h1:5dum5SLEz+95JDLkMls7Z7IDPjvSq3UhJSFe4f5einQ= @@ -1795,14 +1795,14 @@ gitlab.com/flimzy/testy v0.11.0/go.mod h1:tcu652e6AyD5wS8q2JRUI+j5SlwIYsl3yq3ulH go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= -go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= -go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= -go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= -go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= -go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= +go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= @@ -1820,10 +1820,10 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 h1:+RbSCde0ERway5FwKvXR3aRJIFeDu9rtwC6E7BC6uoM= @@ -2241,8 +2241,8 @@ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2 google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= -google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ= -google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY= +google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= +google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -2409,8 +2409,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go. google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac h1:OZkkudMUu9LVQMCoRUbI/1p5VCo9BOrlvkqMvWtqa6s= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -2423,8 +2423,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2515,8 +2515,8 @@ k8s.io/metrics v0.28.5 h1:Yh29Yi/x3ojK9ofYuY76Ny0IJdXeTZ9TiVjqAd9MssY= k8s.io/metrics v0.28.5/go.mod h1:QR2NHc/RWIZz9DqrV4Yt1eArZ6uexORBHRocpv5MQ2Q= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 h1:H6+JJN23fhwYWCHY1339sY6uhIyoUwDy1a8dN233fdk= -knative.dev/pkg v0.0.0-20240116073220-b488e7be5902/go.mod h1:NYk8mMYoLkO7CQWnNkti4YGGnvLxN6MIDbUvtgeo0C0= +knative.dev/pkg v0.0.0-20240201013110-e85c3cf6d5f1 h1:xGmWQyA+hwyFT1BN5RWi8wx0DxwZQZni8SPN/FZ02kI= +knative.dev/pkg v0.0.0-20240201013110-e85c3cf6d5f1/go.mod h1:cGCJe6wkr0vQMAXTaUHi0XA/12JbxSTK15TnyBmn7ms= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= @@ -2576,12 +2576,12 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1 h1:1/GQWB9rabeYd3oANeTQH7OHrtShvVgH0FmqHWBpR6I= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= -sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI= -sigs.k8s.io/controller-tools v0.13.0/go.mod h1:5vw3En2NazbejQGCeWKRrE7q4P+CW8/klfVqP8QZkgA= +sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS26ur/s= +sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 h1:iTQSA9XwAEiPjoO/z3tZ4vAlbn6UXShvbRKWnN6P52w= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= +sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= +sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1 h1:HC57TVRzncE2giocqSMfw/ZSVSO9RxBDd5P6olipw1A= sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1/go.mod h1:Ior2OoZaYHIYR4J/OETrkLln7LzbMd4906WT6Shvpdw= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go index d82a3be7a59..be257cb277f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -48,7 +48,9 @@ type AlertPolicyCallOptions struct { func defaultAlertPolicyGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go index a1224c63f6f..4de74e773e1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go index 50d7edfc45f..e8c40364753 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go index 26dcd67547a..e18457af4ab 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,7 +50,9 @@ type GroupCallOptions struct { func defaultGroupGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go index 0cc29ae4960..f38f7239839 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -54,7 +54,9 @@ type MetricCallOptions struct { func defaultMetricGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go index 2b4c429742a..a5995ab1474 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/alert.proto @@ -237,7 +237,7 @@ type AlertPolicy struct { // Required if the policy exists. The resource name for this policy. The // format is: // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] // // `[ALERT_POLICY_ID]` is assigned by Cloud Monitoring when the policy // is created. When calling the @@ -310,7 +310,7 @@ type AlertPolicy struct { // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] // method. The format of the entries in this field is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` // A read-only record of the creation of the alerting policy. If provided // in a call to create or update, this field will be ignored. @@ -546,7 +546,7 @@ type AlertPolicy_Condition struct { // Required if the condition exists. The unique resource name for this // condition. Its format is: // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] // // `[CONDITION_ID]` is assigned by Cloud Monitoring when the // condition is created as part of a new or updated alerting policy. @@ -576,6 +576,7 @@ type AlertPolicy_Condition struct { // Only one of the following condition types will be specified. // // Types that are assignable to Condition: + // // *AlertPolicy_Condition_ConditionThreshold // *AlertPolicy_Condition_ConditionAbsent // *AlertPolicy_Condition_ConditionMatchedLog @@ -795,6 +796,7 @@ type AlertPolicy_Condition_Trigger struct { // A type of trigger. // // Types that are assignable to Type: + // // *AlertPolicy_Condition_Trigger_Count // *AlertPolicy_Condition_Trigger_Percent Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` @@ -1623,7 +1625,7 @@ type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct { // notification_channels field of this AlertPolicy. // The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] NotificationChannelNames []string `protobuf:"bytes,1,rep,name=notification_channel_names,json=notificationChannelNames,proto3" json:"notification_channel_names,omitempty"` // The frequency at which to send reminder notifications for open // incidents. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go index 5045498c807..d4785261cc8 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/alert_service.proto @@ -52,7 +52,7 @@ type CreateAlertPolicyRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which // to create the alerting policy. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] // // Note that this field names the parent container in which the alerting // policy will be written, not the name of the created policy. |name| must be @@ -122,7 +122,7 @@ type GetAlertPolicyRequest struct { // Required. The alerting policy to retrieve. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } @@ -175,7 +175,7 @@ type ListAlertPoliciesRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose // alert policies are to be listed. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] // // Note that this field names the parent container in which the alerting // policies to be listed are stored. To retrieve a single alerting policy @@ -361,10 +361,10 @@ type UpdateAlertPolicyRequest struct { // existing policy. It is the same as deleting the existing policy and // adding the supplied policy, except for the following: // - // + The new policy will have the same `[ALERT_POLICY_ID]` as the former + // - The new policy will have the same `[ALERT_POLICY_ID]` as the former // policy. This gives you continuity with the former policy in your // notifications and incidents. - // + Conditions in the new policy will keep their former `[CONDITION_ID]` if + // - Conditions in the new policy will keep their former `[CONDITION_ID]` if // the supplied condition includes the `name` field with that // `[CONDITION_ID]`. If the supplied condition omits the `name` field, // then a new `[CONDITION_ID]` is created. @@ -430,7 +430,7 @@ type DeleteAlertPolicyRequest struct { // Required. The alerting policy to delete. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] // // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go index 739509158f2..7ef01677a67 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/common.proto @@ -547,6 +547,7 @@ type TypedValue struct { // The typed value field. // // Types that are assignable to Value: + // // *TypedValue_BoolValue // *TypedValue_Int64Value // *TypedValue_DoubleValue diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go index ecfd38d17da..fe910f8b43c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/dropped_labels.proto diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go index c36317ada25..fa3a54702a9 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/group.proto @@ -69,7 +69,7 @@ type Group struct { // Output only. The name of this group. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] // // When creating a group, this field is ignored and a new name is created // consisting of the project specified in the call to `CreateGroup` @@ -79,7 +79,7 @@ type Group struct { DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` // The name of the group's parent, if it has one. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] // // For groups with no parent, `parent_name` is the empty string, `""`. ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go index 74e4b4e6f44..6cd61b0d9f4 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/group_service.proto @@ -51,13 +51,14 @@ type ListGroupsRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) // whose groups are to be listed. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` // An optional filter consisting of a single group name. The filters limit // the groups returned based on their parent-child relationship with the // specified group. If no filter is specified, all groups are returned. // // Types that are assignable to Filter: + // // *ListGroupsRequest_ChildrenOfGroup // *ListGroupsRequest_AncestorsOfGroup // *ListGroupsRequest_DescendantsOfGroup @@ -158,7 +159,7 @@ type isListGroupsRequest_Filter interface { type ListGroupsRequest_ChildrenOfGroup struct { // A group name. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] // // Returns groups whose `parent_name` field contains the group // name. If no groups have this parent, the results are empty. @@ -168,7 +169,7 @@ type ListGroupsRequest_ChildrenOfGroup struct { type ListGroupsRequest_AncestorsOfGroup struct { // A group name. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] // // Returns groups that are ancestors of the specified group. // The groups are returned in order, starting with the immediate parent and @@ -180,7 +181,7 @@ type ListGroupsRequest_AncestorsOfGroup struct { type ListGroupsRequest_DescendantsOfGroup struct { // A group name. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] // // Returns the descendants of the specified group. This is a superset of // the results returned by the `children_of_group` filter, and includes @@ -262,7 +263,7 @@ type GetGroupRequest struct { // Required. The group to retrieve. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } @@ -314,7 +315,7 @@ type CreateGroupRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) in // which to create the group. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // Required. A group definition. It is an error to define the `name` field because // the system assigns the name. @@ -444,7 +445,7 @@ type DeleteGroupRequest struct { // Required. The group to delete. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` // If this field is true, then the request means to delete a group with all // its descendants. Otherwise, the request means to delete a group only when @@ -506,7 +507,7 @@ type ListGroupMembersRequest struct { // Required. The group whose members are listed. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` // A positive number that is the maximum number of results to return. PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` @@ -521,7 +522,7 @@ type ListGroupMembersRequest struct { // example, to return only resources representing Compute Engine VM instances, // use this filter: // - // `resource.type = "gce_instance"` + // `resource.type = "gce_instance"` Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` // An optional time interval for which results should be returned. Only // members that were part of the group during the specified interval are diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go index 2c0759951ff..0fb4b914f19 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/metric.proto @@ -367,6 +367,7 @@ type LabelValue struct { // The label value can be a bool, int64, or string. // // Types that are assignable to Value: + // // *LabelValue_BoolValue // *LabelValue_Int64Value // *LabelValue_StringValue diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go index 61c57cc7fe6..c852599639f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/metric_service.proto @@ -104,14 +104,14 @@ type ListMonitoredResourceDescriptorsRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on // which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters) // describing the descriptors to be returned. The filter can reference the // descriptor's type and labels. For example, the following filter returns // only Google Compute Engine descriptors that have an `id` label: // - // resource.type = starts_with("gce_") AND resource.label:id + // resource.type = starts_with("gce_") AND resource.label:id Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // A positive number that is the maximum number of results to return. PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` @@ -250,7 +250,7 @@ type GetMonitoredResourceDescriptorRequest struct { // Required. The monitored resource descriptor to get. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] // // The `[RESOURCE_TYPE]` is a predefined type, such as // `cloudsql_database`. @@ -305,7 +305,7 @@ type ListMetricDescriptorsRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on // which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` // If this field is empty, all custom and // system-defined metric descriptors are returned. @@ -314,7 +314,7 @@ type ListMetricDescriptorsRequest struct { // returned. For example, the following filter matches all // [custom metrics](https://cloud.google.com/monitoring/custom-metrics): // - // metric.type = starts_with("custom.googleapis.com/") + // metric.type = starts_with("custom.googleapis.com/") Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // A positive number that is the maximum number of results to return. PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` @@ -453,7 +453,7 @@ type GetMetricDescriptorRequest struct { // Required. The metric descriptor on which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] // // An example value of `[METRIC_ID]` is // `"compute.googleapis.com/instance/disk/read_bytes_count"`. @@ -508,7 +508,8 @@ type CreateMetricDescriptorRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on // which to execute the request. The format is: // 4 - // projects/[PROJECT_ID_OR_NUMBER] + // + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` // Required. The new [custom metric](https://cloud.google.com/monitoring/custom-metrics) // descriptor. @@ -569,7 +570,7 @@ type DeleteMetricDescriptorRequest struct { // Required. The metric descriptor on which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] // // An example of `[METRIC_ID]` is: // `"custom.googleapis.com/my_test_metric"`. @@ -624,17 +625,17 @@ type ListTimeSeriesRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name), // organization or folder on which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] - // organizations/[ORGANIZATION_ID] - // folders/[FOLDER_ID] + // projects/[PROJECT_ID_OR_NUMBER] + // organizations/[ORGANIZATION_ID] + // folders/[FOLDER_ID] Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` // Required. A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) // that specifies which time series should be returned. The filter must // specify a single metric type, and can additionally specify metric labels // and other information. For example: // - // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND - // metric.labels.instance_name = "my-instance-name" + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.labels.instance_name = "my-instance-name" Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // Required. The time interval for which results should be returned. Only time series // that contain data points in the specified interval are included @@ -854,7 +855,7 @@ type CreateTimeSeriesRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on // which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` // Required. The new data to be added to a list of time series. // Adds at most one data point to each of several time series. The new data @@ -1052,7 +1053,7 @@ type QueryTimeSeriesRequest struct { // Required. The [project](https://cloud.google.com/monitoring/api/v3#project_name) on // which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The query in the [Monitoring Query // Language](https://cloud.google.com/monitoring/mql/reference) format. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go index 3515c01688b..6432dcdc3ec 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/mutation_record.proto diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go index 0e9ed407b8e..7e926dc8f92 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/notification.proto @@ -114,7 +114,7 @@ type NotificationChannelDescriptor struct { // The full REST resource name for this descriptor. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] // // In the above, `[TYPE]` is the value of the `type` field. Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` @@ -243,7 +243,7 @@ type NotificationChannel struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The full REST resource name for this channel. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] // // The `[CHANNEL_ID]` is automatically assigned by the server on creation. Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go index 1de07534476..bbafb89803c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/notification_service.proto @@ -52,7 +52,7 @@ type ListNotificationChannelDescriptorsRequest struct { // Required. The REST resource name of the parent from which to retrieve // the notification channel descriptors. The expected syntax is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] // // Note that this // [names](https://cloud.google.com/monitoring/api/v3#project_name) the parent @@ -194,7 +194,7 @@ type GetNotificationChannelDescriptorRequest struct { // Required. The channel type for which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } @@ -247,7 +247,7 @@ type CreateNotificationChannelRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which // to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] // // This names the container into which the channel will be // written, this does not name the newly created channel. The resulting @@ -314,7 +314,7 @@ type ListNotificationChannelsRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which // to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] // // This names the container // in which to look for the notification channels; it does not name a @@ -492,7 +492,7 @@ type GetNotificationChannelRequest struct { // Required. The channel for which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } @@ -604,7 +604,7 @@ type DeleteNotificationChannelRequest struct { // Required. The channel for which to execute the request. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` // If true, the notification channel will be deleted regardless of its // use in alert policies (the policies will be updated to remove the diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go index 0b21998dc06..f3f4029cf1f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/query_service.proto diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go index 2a521a84ff1..b4f87bd6ec8 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/service.proto @@ -110,13 +110,14 @@ type Service struct { // Resource name for this Service. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Name used for UI elements listing this Service. DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` // REQUIRED. Service-identifying atoms specifying the underlying service. // // Types that are assignable to Identifier: + // // *Service_Custom_ // *Service_AppEngine_ // *Service_CloudEndpoints_ @@ -305,7 +306,7 @@ type ServiceLevelObjective struct { // Resource name for this `ServiceLevelObjective`. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Name used for UI elements listing this SLO. DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` @@ -319,6 +320,7 @@ type ServiceLevelObjective struct { // The time period over which the objective will be evaluated. // // Types that are assignable to Period: + // // *ServiceLevelObjective_RollingPeriod // *ServiceLevelObjective_CalendarPeriod Period isServiceLevelObjective_Period `protobuf_oneof:"period"` @@ -463,6 +465,7 @@ type ServiceLevelIndicator struct { // time windows // // Types that are assignable to Type: + // // *ServiceLevelIndicator_BasicSli // *ServiceLevelIndicator_RequestBased // *ServiceLevelIndicator_WindowsBased @@ -586,6 +589,7 @@ type BasicSli struct { // This SLI can be evaluated on the basis of availability or latency. // // Types that are assignable to SliCriteria: + // // *BasicSli_Availability // *BasicSli_Latency SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"` @@ -753,6 +757,7 @@ type RequestBasedSli struct { // The means to compute a ratio of `good_service` to `total_service`. // // Types that are assignable to Method: + // // *RequestBasedSli_GoodTotalRatio // *RequestBasedSli_DistributionCut Method isRequestBasedSli_Method `protobuf_oneof:"method"` @@ -991,6 +996,7 @@ type WindowsBasedSli struct { // The criterion to use for evaluating window goodness. // // Types that are assignable to WindowCriterion: + // // *WindowsBasedSli_GoodBadMetricFilter // *WindowsBasedSli_GoodTotalRatioThreshold // *WindowsBasedSli_MetricMeanInRange @@ -1634,6 +1640,7 @@ type WindowsBasedSli_PerformanceThreshold struct { // performance over a window. // // Types that are assignable to Type: + // // *WindowsBasedSli_PerformanceThreshold_Performance // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"` diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go index 15da64f3d2a..63ef6606081 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/service_service.proto @@ -51,7 +51,7 @@ type CreateServiceRequest struct { // Required. Resource [name](https://cloud.google.com/monitoring/api/v3#project_name) of // the parent workspace. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. The Service id to use for this Service. If omitted, an id will be // generated instead. Must match the pattern `[a-z0-9\-]+` @@ -121,7 +121,7 @@ type GetServiceRequest struct { // Required. Resource name of the `Service`. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -174,22 +174,22 @@ type ListServicesRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) or a // Monitoring Workspace. The formats are: // - // projects/[PROJECT_ID_OR_NUMBER] - // workspaces/[HOST_PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // A filter specifying what `Service`s to return. The filter currently // supports the following fields: // - // - `identifier_case` - // - `app_engine.module_id` - // - `cloud_endpoints.service` (reserved for future use) - // - `mesh_istio.mesh_uid` - // - `mesh_istio.service_namespace` - // - `mesh_istio.service_name` - // - `cluster_istio.location` (deprecated) - // - `cluster_istio.cluster_name` (deprecated) - // - `cluster_istio.service_namespace` (deprecated) - // - `cluster_istio.service_name` (deprecated) + // - `identifier_case` + // - `app_engine.module_id` + // - `cloud_endpoints.service` (reserved for future use) + // - `mesh_istio.mesh_uid` + // - `mesh_istio.service_namespace` + // - `mesh_istio.service_name` + // - `cluster_istio.location` (deprecated) + // - `cluster_istio.cluster_name` (deprecated) + // - `cluster_istio.service_namespace` (deprecated) + // - `cluster_istio.service_name` (deprecated) // // `identifier_case` refers to which option in the identifier oneof is // populated. For example, the filter `identifier_case = "CUSTOM"` would match @@ -393,7 +393,7 @@ type DeleteServiceRequest struct { // Required. Resource name of the `Service` to delete. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -444,7 +444,7 @@ type CreateServiceLevelObjectiveRequest struct { // Required. Resource name of the parent `Service`. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. The ServiceLevelObjective id to use for this // ServiceLevelObjective. If omitted, an id will be generated instead. Must @@ -517,7 +517,7 @@ type GetServiceLevelObjectiveRequest struct { // Required. Resource name of the `ServiceLevelObjective` to get. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the @@ -581,8 +581,8 @@ type ListServiceLevelObjectivesRequest struct { // Required. Resource name of the parent containing the listed SLOs, either a // project or a Monitoring Workspace. The formats are: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // A filter specifying what `ServiceLevelObjective`s to return. Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` @@ -794,7 +794,7 @@ type DeleteServiceLevelObjectiveRequest struct { // Required. Resource name of the `ServiceLevelObjective` to delete. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go index 39e5d08f310..502a0dc985f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/snooze.proto @@ -47,7 +47,7 @@ type Snooze struct { // Required. The name of the `Snooze`. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] // // The ID of the `Snooze` will be generated by the system. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -136,7 +136,7 @@ type Snooze_Criteria struct { // The specific `AlertPolicy` names for the alert that should be snoozed. // The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] // // There is a limit of 16 policies per snooze. This limit is checked during // snooze creation. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go index 82e491b81fd..ba658638e9a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/snooze_service.proto @@ -52,7 +52,7 @@ type CreateSnoozeRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which // a `Snooze` should be created. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The `Snooze` to create. Omit the `name` field, as it will be // filled in by the API. @@ -116,20 +116,20 @@ type ListSnoozesRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose // `Snooze`s should be listed. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. Optional filter to restrict results to the given criteria. The // following fields are supported. // - // * `interval.start_time` - // * `interval.end_time` + // - `interval.start_time` + // - `interval.end_time` // // For example: // - // ``` - // interval.start_time > "2022-03-11T00:00:00-08:00" AND - // interval.end_time < "2022-03-12T00:00:00-08:00" - // ``` + // ``` + // interval.start_time > "2022-03-11T00:00:00-08:00" AND + // interval.end_time < "2022-03-12T00:00:00-08:00" + // ``` Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // Optional. The maximum number of results to return for a single query. The // server may further constrain the maximum number of results returned in a @@ -271,7 +271,7 @@ type GetSnoozeRequest struct { // Required. The ID of the `Snooze` to retrieve. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -338,19 +338,19 @@ type UpdateSnoozeRequest struct { // // For each field listed in `update_mask`: // - // * If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a + // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a // value for that field, the value of the field in the existing `Snooze` // will be set to the value of the field in the supplied `Snooze`. - // * If the field does not have a value in the supplied `Snooze`, the field + // - If the field does not have a value in the supplied `Snooze`, the field // in the existing `Snooze` is set to its default value. // // Fields not listed retain their existing value. // // The following are the field names that are accepted in `update_mask`: // - // * `display_name` - // * `interval.start_time` - // * `interval.end_time` + // - `display_name` + // - `interval.start_time` + // - `interval.end_time` // // That said, the start time and end time of the `Snooze` determines which // fields can legally be updated. Before attempting an update, users should diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go index 9327a5c057d..f36b8b08c95 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/span_context.proto @@ -49,7 +49,7 @@ type SpanContext struct { // The resource name of the span. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] + // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] // // `[TRACE_ID]` is a unique identifier for a trace within a project; // it is a 32-character hexadecimal encoding of a 16-byte array. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go index 8a8991464a7..ecba7a85ff6 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/uptime.proto @@ -626,7 +626,7 @@ type InternalChecker struct { // A unique resource name for this InternalChecker. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] + // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] // // `[PROJECT_ID_OR_NUMBER]` is the Cloud Monitoring Metrics Scope project for // the Uptime check config associated with the internal checker. @@ -731,7 +731,7 @@ type UptimeCheckConfig struct { // A unique resource name for this Uptime check configuration. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] // // `[PROJECT_ID_OR_NUMBER]` is the Workspace host project associated with the // Uptime check. @@ -747,12 +747,14 @@ type UptimeCheckConfig struct { // The resource the check is checking. Required. // // Types that are assignable to Resource: + // // *UptimeCheckConfig_MonitoredResource // *UptimeCheckConfig_ResourceGroup_ Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` // The type of Uptime check request. // // Types that are assignable to CheckRequestType: + // // *UptimeCheckConfig_HttpCheck_ // *UptimeCheckConfig_TcpCheck_ CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` @@ -957,14 +959,15 @@ type UptimeCheckConfig_MonitoredResource struct { // resource](https://cloud.google.com/monitoring/api/resources) associated // with the configuration. // The following monitored resource types are valid for this field: - // `uptime_url`, - // `gce_instance`, - // `gae_app`, - // `aws_ec2_instance`, - // `aws_elb_load_balancer` - // `k8s_service` - // `servicedirectory_service` - // `cloud_run_revision` + // + // `uptime_url`, + // `gce_instance`, + // `gae_app`, + // `aws_ec2_instance`, + // `aws_elb_load_balancer` + // `k8s_service` + // `servicedirectory_service` + // `cloud_run_revision` MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` } @@ -1465,6 +1468,7 @@ type UptimeCheckConfig_ContentMatcher struct { // `JsonPathMatcher`; not used for other options. // // Types that are assignable to AdditionalMatcherInfo: + // // *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ AdditionalMatcherInfo isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo `protobuf_oneof:"additional_matcher_info"` } @@ -1612,6 +1616,7 @@ type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct { // Either a specific value or a class of status codes. // // Types that are assignable to StatusCode: + // // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ StatusCode isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode `protobuf_oneof:"status_code"` diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go index c4a66477770..665c8483d68 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/monitoring/v3/uptime_service.proto @@ -52,7 +52,7 @@ type ListUptimeCheckConfigsRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose // Uptime check configurations are listed. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // If provided, this field specifies the criteria that must be met by // uptime checks to be included in the response. @@ -211,7 +211,7 @@ type GetUptimeCheckConfigRequest struct { // Required. The Uptime check configuration to retrieve. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -264,7 +264,7 @@ type CreateUptimeCheckConfigRequest struct { // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which // to create the Uptime check. The format is: // - // projects/[PROJECT_ID_OR_NUMBER] + // projects/[PROJECT_ID_OR_NUMBER] Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The new Uptime check configuration. UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` @@ -395,7 +395,7 @@ type DeleteUptimeCheckConfigRequest struct { // Required. The Uptime check configuration to delete. The format is: // - // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go index 8425674d52b..b1e6939033b 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,7 +53,9 @@ type NotificationChannelCallOptions struct { func defaultNotificationChannelGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go index f6a91cb2327..bd8abc9b274 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -42,7 +42,9 @@ type QueryCallOptions struct { func defaultQueryGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go index bfd0902c4b9..d1177307866 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,7 +53,9 @@ type ServiceMonitoringCallOptions struct { func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go index 943292bf436..958406f3421 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -47,7 +47,9 @@ type SnoozeCallOptions struct { func defaultSnoozeGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go index 9501382828a..05fc20350d0 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -49,7 +49,9 @@ type UptimeCheckCallOptions struct { func defaultUptimeCheckGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go index 6607c695d5c..2d7d8774486 100644 --- a/vendor/cloud.google.com/go/monitoring/internal/version.go +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.17.0" +const Version = "1.17.1" diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go b/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go new file mode 100644 index 00000000000..1e842c9fd65 --- /dev/null +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/auxiliary.go @@ -0,0 +1,116 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package secretmanager + +import ( + secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" + "google.golang.org/api/iterator" +) + +// SecretIterator manages a stream of *secretmanagerpb.Secret. +type SecretIterator struct { + items []*secretmanagerpb.Secret + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*secretmanagerpb.Secret, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SecretIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SecretIterator) Next() (*secretmanagerpb.Secret, error) { + var item *secretmanagerpb.Secret + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SecretIterator) bufLen() int { + return len(it.items) +} + +func (it *SecretIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SecretVersionIterator manages a stream of *secretmanagerpb.SecretVersion. +type SecretVersionIterator struct { + items []*secretmanagerpb.SecretVersion + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*secretmanagerpb.SecretVersion, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SecretVersionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SecretVersionIterator) Next() (*secretmanagerpb.SecretVersion, error) { + var item *secretmanagerpb.SecretVersion + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SecretVersionIterator) bufLen() int { + return len(it.items) +} + +func (it *SecretVersionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/doc.go b/vendor/cloud.google.com/go/secretmanager/apiv1/doc.go index 04582cf8920..4255deed394 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/doc.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -68,22 +68,16 @@ // } // defer c.Close() // -// req := &secretmanagerpb.ListSecretsRequest{ +// req := &secretmanagerpb.AccessSecretVersionRequest{ // // TODO: Fill request struct fields. -// // See https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb#ListSecretsRequest. +// // See https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb#AccessSecretVersionRequest. // } -// it := c.ListSecrets(ctx, req) -// for { -// resp, err := it.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// // TODO: Handle error. -// } -// // TODO: Use resp. -// _ = resp +// resp, err := c.AccessSecretVersion(ctx, req) +// if err != nil { +// // TODO: Handle error. // } +// // TODO: Use resp. +// _ = resp // // # Use of Context // diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go b/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go index d65d36f1241..4e866e66d35 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/secret_manager_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -65,7 +65,9 @@ type CallOptions struct { func defaultGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("secretmanager.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("secretmanager.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("secretmanager.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://secretmanager.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), @@ -486,7 +488,9 @@ func NewRESTClient(ctx context.Context, opts ...option.ClientOption) (*Client, e func defaultRESTClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("https://secretmanager.googleapis.com"), + internaloption.WithDefaultEndpointTemplate("https://secretmanager.UNIVERSE_DOMAIN"), internaloption.WithDefaultMTLSEndpoint("https://secretmanager.mtls.googleapis.com"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://secretmanager.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), } @@ -1875,97 +1879,3 @@ func (c *restClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP } return resp, nil } - -// SecretIterator manages a stream of *secretmanagerpb.Secret. -type SecretIterator struct { - items []*secretmanagerpb.Secret - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*secretmanagerpb.Secret, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *SecretIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *SecretIterator) Next() (*secretmanagerpb.Secret, error) { - var item *secretmanagerpb.Secret - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *SecretIterator) bufLen() int { - return len(it.items) -} - -func (it *SecretIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// SecretVersionIterator manages a stream of *secretmanagerpb.SecretVersion. -type SecretVersionIterator struct { - items []*secretmanagerpb.SecretVersion - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*secretmanagerpb.SecretVersion, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *SecretVersionIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *SecretVersionIterator) Next() (*secretmanagerpb.SecretVersion, error) { - var item *secretmanagerpb.SecretVersion - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *SecretVersionIterator) bufLen() int { - return len(it.items) -} - -func (it *SecretVersionIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go index 05e82823750..1d874cd7150 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/resources.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/cloud/secretmanager/v1/resources.proto @@ -155,6 +155,7 @@ type Secret struct { // can be reversed. // // Types that are assignable to Expiration: + // // *Secret_ExpireTime // *Secret_Ttl Expiration isSecret_Expiration `protobuf_oneof:"expiration"` @@ -460,6 +461,7 @@ type Replication struct { // The replication policy for this secret. // // Types that are assignable to Replication: + // // *Replication_Automatic_ // *Replication_UserManaged_ Replication isReplication_Replication `protobuf_oneof:"replication"` @@ -611,6 +613,7 @@ type ReplicationStatus struct { // [SecretVersion][google.cloud.secretmanager.v1.SecretVersion]. // // Types that are assignable to ReplicationStatus: + // // *ReplicationStatus_Automatic // *ReplicationStatus_UserManaged ReplicationStatus isReplicationStatus_ReplicationStatus `protobuf_oneof:"replication_status"` diff --git a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go index ac0cf8128ea..9fd8664019d 100644 --- a/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go +++ b/vendor/cloud.google.com/go/secretmanager/apiv1/secretmanagerpb/service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/cloud/secretmanager/v1/service.proto diff --git a/vendor/cloud.google.com/go/secretmanager/internal/version.go b/vendor/cloud.google.com/go/secretmanager/internal/version.go index 1a7f5cb6cd4..db61bfa8f38 100644 --- a/vendor/cloud.google.com/go/secretmanager/internal/version.go +++ b/vendor/cloud.google.com/go/secretmanager/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.11.4" +const Version = "1.11.5" diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 8b3fa6fc483..90007144de9 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,19 @@ # Changes +## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24) + + +### Features + +* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c)) +* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51)) + + +### Bug Fixes + +* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189) + ## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index a51cf9c086b..e9e95993011 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -420,6 +420,11 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q ctx = setUserProjectMetadata(ctx, s.userProject) } fetch := func(pageSize int, pageToken string) (token string, err error) { + // IncludeFoldersAsPrefixes is not supported for gRPC + // TODO: remove this when support is added in the proto. + if it.query.IncludeFoldersAsPrefixes { + return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") + } var objects []*storagepb.Object var gitr *gapic.ObjectIterator err = run(it.ctx, func(ctx context.Context) error { diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 0e157e4ba99..fe081b60b0a 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -348,6 +348,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) req.MatchGlob(it.query.MatchGlob) + req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes) if selection := it.query.toFieldSelection(); selection != "" { req.Fields("nextPageToken", googleapi.Field(selection)) } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index c6fd4b341ff..415b2b585b5 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 8159589ea9b..ed5089ac7fd 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 64819950600..25b122a74fa 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 2d5cf890e06..e49d8a53288 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.36.0" +const Version = "1.37.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index dc79fd88bbc..1b52eb5d2c6 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -70,6 +70,9 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return internal.Retry(ctx, bo, func() (stop bool, err error) { ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) err = call(ctxWithHeaders) + if retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, err + } attempts++ return !errorFunc(err), err }) diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 78ecbf0e892..f047ef9cd4a 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -1620,6 +1620,11 @@ type Query struct { // for syntax details. When Delimiter is set in conjunction with MatchGlob, // it must be set to /. MatchGlob string + + // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of + // prefixes returned by the query. Only applicable if Delimiter is set to /. + // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. + IncludeFoldersAsPrefixes bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -2076,6 +2081,26 @@ func (wb *withBackoff) apply(config *retryConfig) { config.backoff = &wb.backoff } +// WithMaxAttempts configures the maximum number of times an API call can be made +// in the case of retryable errors. +// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5 +// times total (initial call plus 4 retries). +// Without this setting, operations will continue retrying indefinitely +// until either the context is canceled or a deadline is reached. +func WithMaxAttempts(maxAttempts int) RetryOption { + return &withMaxAttempts{ + maxAttempts: maxAttempts, + } +} + +type withMaxAttempts struct { + maxAttempts int +} + +func (wb *withMaxAttempts) apply(config *retryConfig) { + config.maxAttempts = &wb.maxAttempts +} + // RetryPolicy describes the available policies for which operations should be // retried. The default is `RetryIdempotent`. type RetryPolicy int @@ -2148,6 +2173,7 @@ type retryConfig struct { backoff *gax.Backoff policy RetryPolicy shouldRetry func(err error) bool + maxAttempts *int } func (r *retryConfig) clone() *retryConfig { @@ -2168,6 +2194,7 @@ func (r *retryConfig) clone() *retryConfig { backoff: bo, policy: r.policy, shouldRetry: r.shouldRetry, + maxAttempts: r.maxAttempts, } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 0cd31f00ecc..31feb8b1c4d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,12 @@ +# v1.26.6 (2024-01-22) + +* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.26.4 (2024-01-16) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 6de4538a4d5..51f6e332018 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.4" +const goModuleVersion = "1.26.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 6671a41c92a..989c4ea39be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.16.16 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.15 (2024-01-16) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index c8bcd2dceb5..fe92184dc52 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.15" +const goModuleVersion = "1.16.16" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index adbbf4adcf4..9a0b5e03776 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.7.3 (2024-01-22) + +* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. + # v1.7.2 (2023-12-08) * **Bug Fix**: Correct loading of [services *] sections into shared config. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index f0673f3a024..1d68f8346d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.7.2" +const goModuleVersion = "1.7.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go index 661588c2276..ed77d083517 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go @@ -67,12 +67,8 @@ func unquote(s string) string { // applies various legacy conversions to property values: // - remote wrapping single/doublequotes -// - expand escaped quote and newline sequences func legacyStrconv(s string) string { s = unquote(s) - s = strings.ReplaceAll(s, `\"`, `"`) - s = strings.ReplaceAll(s, `\'`, `'`) - s = strings.ReplaceAll(s, `\n`, "\n") return s } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md index 221f0524e8f..abf2876a63a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.27.0 (2024-01-19) + +* **Feature**: This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API. + # v1.26.9 (2024-01-17) * **Documentation**: Updating note for enabling streams for UpdateTable. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go index 81b43d04dfe..61f0972ee73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go @@ -42,6 +42,9 @@ type DisableKinesisStreamingDestinationInput struct { // This member is required. TableName *string + // The source for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + noSmithyDocumentSerde } @@ -50,6 +53,9 @@ type DisableKinesisStreamingDestinationOutput struct { // The current status of the replication. DestinationStatus types.DestinationStatus + // The destination for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + // The ARN for the specific Kinesis data stream. StreamArn *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go index 6eee7606b65..9532e9e42a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go @@ -44,6 +44,9 @@ type EnableKinesisStreamingDestinationInput struct { // This member is required. TableName *string + // The source for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + noSmithyDocumentSerde } @@ -52,6 +55,9 @@ type EnableKinesisStreamingDestinationOutput struct { // The current status of the replication. DestinationStatus types.DestinationStatus + // The destination for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + // The ARN for the specific Kinesis data stream. StreamArn *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go new file mode 100644 index 00000000000..4c727233ec6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go @@ -0,0 +1,207 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The command to update the Kinesis stream destination. +func (c *Client) UpdateKinesisStreamingDestination(ctx context.Context, params *UpdateKinesisStreamingDestinationInput, optFns ...func(*Options)) (*UpdateKinesisStreamingDestinationOutput, error) { + if params == nil { + params = &UpdateKinesisStreamingDestinationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateKinesisStreamingDestination", params, optFns, c.addOperationUpdateKinesisStreamingDestinationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateKinesisStreamingDestinationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateKinesisStreamingDestinationInput struct { + + // The ARN for the Kinesis stream input. + // + // This member is required. + StreamArn *string + + // The table name for the Kinesis streaming destination input. + // + // This member is required. + TableName *string + + // The command to update the Kinesis stream configuration. + UpdateKinesisStreamingConfiguration *types.UpdateKinesisStreamingConfiguration + + noSmithyDocumentSerde +} + +type UpdateKinesisStreamingDestinationOutput struct { + + // The status of the attempt to update the Kinesis streaming destination output. + DestinationStatus types.DestinationStatus + + // The ARN for the Kinesis stream input. + StreamArn *string + + // The table name for the Kinesis streaming destination output. + TableName *string + + // The command to update the Kinesis streaming destination configuration. + UpdateKinesisStreamingConfiguration *types.UpdateKinesisStreamingConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateKinesisStreamingDestination"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKinesisStreamingDestination(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func addOpUpdateKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateKinesisStreamingDestinationDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateKinesisStreamingDestinationInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateKinesisStreamingDestination", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go index 190bd78d1b9..5ff1bd9e917 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go @@ -94,8 +94,9 @@ type UpdateTableInput struct { SSESpecification *types.SSESpecification // Represents the DynamoDB Streams configuration for the table. You receive a - // ValidationException if you try to enable a stream on a table that already has a - // stream, or if you try to disable a stream on a table that doesn't have a stream. + // ResourceInUseException if you try to enable a stream on a table that already has + // a stream, or if you try to disable a stream on a table that doesn't have a + // stream. StreamSpecification *types.StreamSpecification // The table class of the table to be updated. Valid values are STANDARD and diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go index ccb22609a36..002ce8b9d6c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go @@ -6023,6 +6023,129 @@ func awsAwsjson10_deserializeOpErrorUpdateItem(response *smithyhttp.Response, me } } +type awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateKinesisStreamingDestination(response, &metadata) + } + output := &UpdateKinesisStreamingDestinationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateKinesisStreamingDestinationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson10_deserializeOpUpdateTable struct { } @@ -9661,6 +9784,46 @@ func awsAwsjson10_deserializeDocumentDuplicateItemException(v **types.DuplicateI return nil } +func awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(v **types.EnableKinesisStreamingConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EnableKinesisStreamingConfiguration + if *v == nil { + sv = &types.EnableKinesisStreamingConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApproximateCreationDateTimePrecision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value) + } + sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentEndpoint(v **types.Endpoint, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -12184,6 +12347,15 @@ func awsAwsjson10_deserializeDocumentKinesisDataStreamDestination(v **types.Kine for key, value := range shape { switch key { + case "ApproximateCreationDateTimePrecision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value) + } + sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv) + } + case "DestinationStatus": if value != nil { jtv, ok := value.(string) @@ -15487,6 +15659,46 @@ func awsAwsjson10_deserializeDocumentTransactionInProgressException(v **types.Tr return nil } +func awsAwsjson10_deserializeDocumentUpdateKinesisStreamingConfiguration(v **types.UpdateKinesisStreamingConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UpdateKinesisStreamingConfiguration + if *v == nil { + sv = &types.UpdateKinesisStreamingConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApproximateCreationDateTimePrecision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value) + } + sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentWriteRequest(v **types.WriteRequest, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -16533,6 +16745,11 @@ func awsAwsjson10_deserializeOpDocumentDisableKinesisStreamingDestinationOutput( sv.DestinationStatus = types.DestinationStatus(jtv) } + case "EnableKinesisStreamingConfiguration": + if err := awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(&sv.EnableKinesisStreamingConfiguration, value); err != nil { + return err + } + case "StreamArn": if value != nil { jtv, ok := value.(string) @@ -16591,6 +16808,11 @@ func awsAwsjson10_deserializeOpDocumentEnableKinesisStreamingDestinationOutput(v sv.DestinationStatus = types.DestinationStatus(jtv) } + case "EnableKinesisStreamingConfiguration": + if err := awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(&sv.EnableKinesisStreamingConfiguration, value); err != nil { + return err + } + case "StreamArn": if value != nil { jtv, ok := value.(string) @@ -17707,6 +17929,69 @@ func awsAwsjson10_deserializeOpDocumentUpdateItemOutput(v **UpdateItemOutput, va return nil } +func awsAwsjson10_deserializeOpDocumentUpdateKinesisStreamingDestinationOutput(v **UpdateKinesisStreamingDestinationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateKinesisStreamingDestinationOutput + if *v == nil { + sv = &UpdateKinesisStreamingDestinationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DestinationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value) + } + sv.DestinationStatus = types.DestinationStatus(jtv) + } + + case "StreamArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value) + } + sv.StreamArn = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "UpdateKinesisStreamingConfiguration": + if err := awsAwsjson10_deserializeDocumentUpdateKinesisStreamingConfiguration(&sv.UpdateKinesisStreamingConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentUpdateTableOutput(v **UpdateTableOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json index 7043e391d65..215c30f52ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json @@ -62,6 +62,7 @@ "api_op_UpdateGlobalTable.go", "api_op_UpdateGlobalTableSettings.go", "api_op_UpdateItem.go", + "api_op_UpdateKinesisStreamingDestination.go", "api_op_UpdateTable.go", "api_op_UpdateTableReplicaAutoScaling.go", "api_op_UpdateTimeToLive.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go index 77f9f4ec698..d06bf67a385 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go @@ -3,4 +3,4 @@ package dynamodb // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.9" +const goModuleVersion = "1.27.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go index 0317bd60ff7..833253c9199 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go @@ -2767,6 +2767,61 @@ func (m *awsAwsjson10_serializeOpUpdateItem) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } +type awsAwsjson10_serializeOpUpdateKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_serializeOpUpdateKinesisStreamingDestination) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateKinesisStreamingDestinationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateKinesisStreamingDestination") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpUpdateTable struct { } @@ -3515,6 +3570,18 @@ func awsAwsjson10_serializeDocumentDeleteRequest(v *types.DeleteRequest, value s return nil } +func awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v *types.EnableKinesisStreamingConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ApproximateCreationDateTimePrecision) > 0 { + ok := object.Key("ApproximateCreationDateTimePrecision") + ok.String(string(v.ApproximateCreationDateTimePrecision)) + } + + return nil +} + func awsAwsjson10_serializeDocumentExpectedAttributeMap(v map[string]types.ExpectedAttributeValue, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4843,6 +4910,18 @@ func awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v *types.Upd return nil } +func awsAwsjson10_serializeDocumentUpdateKinesisStreamingConfiguration(v *types.UpdateKinesisStreamingConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ApproximateCreationDateTimePrecision) > 0 { + ok := object.Key("ApproximateCreationDateTimePrecision") + ok.String(string(v.ApproximateCreationDateTimePrecision)) + } + + return nil +} + func awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v *types.UpdateReplicationGroupMemberAction, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -5343,6 +5422,13 @@ func awsAwsjson10_serializeOpDocumentDisableKinesisStreamingDestinationInput(v * object := value.Object() defer object.Close() + if v.EnableKinesisStreamingConfiguration != nil { + ok := object.Key("EnableKinesisStreamingConfiguration") + if err := awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v.EnableKinesisStreamingConfiguration, ok); err != nil { + return err + } + } + if v.StreamArn != nil { ok := object.Key("StreamArn") ok.String(*v.StreamArn) @@ -5360,6 +5446,13 @@ func awsAwsjson10_serializeOpDocumentEnableKinesisStreamingDestinationInput(v *E object := value.Object() defer object.Close() + if v.EnableKinesisStreamingConfiguration != nil { + ok := object.Key("EnableKinesisStreamingConfiguration") + if err := awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v.EnableKinesisStreamingConfiguration, ok); err != nil { + return err + } + } + if v.StreamArn != nil { ok := object.Key("StreamArn") ok.String(*v.StreamArn) @@ -6412,6 +6505,30 @@ func awsAwsjson10_serializeOpDocumentUpdateItemInput(v *UpdateItemInput, value s return nil } +func awsAwsjson10_serializeOpDocumentUpdateKinesisStreamingDestinationInput(v *UpdateKinesisStreamingDestinationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StreamArn != nil { + ok := object.Key("StreamArn") + ok.String(*v.StreamArn) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.UpdateKinesisStreamingConfiguration != nil { + ok := object.Key("UpdateKinesisStreamingConfiguration") + if err := awsAwsjson10_serializeDocumentUpdateKinesisStreamingConfiguration(v.UpdateKinesisStreamingConfiguration, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson10_serializeOpDocumentUpdateTableInput(v *UpdateTableInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go index a603b3406ef..ec617f936bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go @@ -2,6 +2,25 @@ package types +type ApproximateCreationDateTimePrecision string + +// Enum values for ApproximateCreationDateTimePrecision +const ( + ApproximateCreationDateTimePrecisionMillisecond ApproximateCreationDateTimePrecision = "MILLISECOND" + ApproximateCreationDateTimePrecisionMicrosecond ApproximateCreationDateTimePrecision = "MICROSECOND" +) + +// Values returns all known values for ApproximateCreationDateTimePrecision. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ApproximateCreationDateTimePrecision) Values() []ApproximateCreationDateTimePrecision { + return []ApproximateCreationDateTimePrecision{ + "MILLISECOND", + "MICROSECOND", + } +} + type AttributeAction string // Enum values for AttributeAction @@ -265,6 +284,7 @@ const ( DestinationStatusDisabling DestinationStatus = "DISABLING" DestinationStatusDisabled DestinationStatus = "DISABLED" DestinationStatusEnableFailed DestinationStatus = "ENABLE_FAILED" + DestinationStatusUpdating DestinationStatus = "UPDATING" ) // Values returns all known values for DestinationStatus. Note that this can be @@ -277,6 +297,7 @@ func (DestinationStatus) Values() []DestinationStatus { "DISABLING", "DISABLED", "ENABLE_FAILED", + "UPDATING", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go index 037de7ce4c0..ce38bd1f41d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go @@ -975,6 +975,16 @@ type DeleteRequest struct { noSmithyDocumentSerde } +// Enables setting the configuration for Kinesis Streaming. +type EnableKinesisStreamingConfiguration struct { + + // Toggle for the precision of Kinesis data stream timestamp. The values are + // either MILLISECOND or MICROSECOND . + ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision + + noSmithyDocumentSerde +} + // An endpoint information details. type Endpoint struct { @@ -1775,6 +1785,10 @@ type KeySchemaElement struct { // Describes a Kinesis data stream destination. type KinesisDataStreamDestination struct { + // The precision of the Kinesis data stream timestamp. The values are either + // MILLISECOND or MICROSECOND . + ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision + // The current status of replication. DestinationStatus DestinationStatus @@ -1886,10 +1900,10 @@ type LocalSecondaryIndexInfo struct { noSmithyDocumentSerde } -// Represents a PartiQL statment that uses parameters. +// Represents a PartiQL statement that uses parameters. type ParameterizedStatement struct { - // A PartiQL statment that uses parameters. + // A PartiQL statement that uses parameters. // // This member is required. Statement *string @@ -3016,6 +3030,15 @@ type UpdateGlobalSecondaryIndexAction struct { noSmithyDocumentSerde } +// Enables updating the configuration for Kinesis Streaming. +type UpdateKinesisStreamingConfiguration struct { + + // Enables updating the precision of Kinesis data stream timestamp. + ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision + + noSmithyDocumentSerde +} + // Represents a replica to be modified. type UpdateReplicationGroupMemberAction struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go index 1c76f46d976..4762e130283 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go @@ -850,6 +850,26 @@ func (m *validateOpUpdateItem) HandleInitialize(ctx context.Context, in middlewa return next.HandleInitialize(ctx, in) } +type validateOpUpdateKinesisStreamingDestination struct { +} + +func (*validateOpUpdateKinesisStreamingDestination) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateKinesisStreamingDestinationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateKinesisStreamingDestinationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpUpdateTable struct { } @@ -1078,6 +1098,10 @@ func addOpUpdateItemValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateItem{}, middleware.After) } +func addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateKinesisStreamingDestination{}, middleware.After) +} + func addOpUpdateTableValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateTable{}, middleware.After) } @@ -3231,6 +3255,24 @@ func validateOpUpdateItemInput(v *UpdateItemInput) error { } } +func validateOpUpdateKinesisStreamingDestinationInput(v *UpdateKinesisStreamingDestinationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateKinesisStreamingDestinationInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.StreamArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("StreamArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpUpdateTableInput(v *UpdateTableInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 9d5847a052f..46dea1b5068 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.18.7 (2024-01-18) + +* No change notes available for this release. + # v1.18.6 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index d2e5a8ab8d7..857a6af7ee0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.6" +const goModuleVersion = "1.18.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index f044afde47c..c8f7c09e46d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -283,6 +283,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "il-central-1", }, }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + }, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go index 8fa999789f9..ff92f68368e 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -4,7 +4,6 @@ */ /* - Package binding defines interfaces for protocol bindings. NOTE: Most applications that emit or consume events should use the ../client @@ -16,11 +15,11 @@ Receiver and a Sender belonging to different bindings. This is useful for intermediary applications that route or forward events, but not necessary for most "endpoint" applications that emit or consume events. -Protocol Bindings +# Protocol Bindings A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. -Read and write events +# Read and write events The core of this package is the binding.Message interface. Through binding.MessageReader It defines how to read a protocol specific message for an @@ -49,7 +48,7 @@ The binding.Write method tries to preserve the structured/binary encoding, in or Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. Every Message wrapper implements the MessageWrapper interface -Sender and Receiver +# Sender and Receiver A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. @@ -60,9 +59,8 @@ Message and ExactlyOnceMessage provide methods to allow acknowledgments to propagate when a reliable messages is forwarded from a Receiver to a Sender. QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. -Transport +# Transport A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. - */ package binding diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go index 5070b7295a1..bb8f914248c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -11,9 +11,9 @@ import "errors" type Encoding int const ( - // Binary encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message EncodingBinary Encoding = iota - // Structured encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message EncodingStructured // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) EncodingEvent diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go index f82c729c445..83d613af416 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -22,7 +22,9 @@ const ( // EventMessage type-converts a event.Event object to implement Message. // This allows local event.Event objects to be sent directly via Sender.Send() -// s.Send(ctx, binding.EventMessage(e)) +// +// s.Send(ctx, binding.EventMessage(e)) +// // When an event is wrapped into a EventMessage, the original event could be // potentially mutated. If you need to use the Event again, after wrapping it into // an Event message, you should copy it before diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go index e30e150c02a..2fb136c62db 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -66,7 +66,7 @@ type MessageMetadataReader interface { // Message is the interface to a binding-specific message containing an event. // -// Reliable Delivery +// # Reliable Delivery // // There are 3 reliable qualities of service for messages: // diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go index 44c0b3145bc..da5bc9f8547 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -8,6 +8,5 @@ Package spec provides spec-version metadata. For use by code that maps events using (prefixed) attribute name strings. Supports handling multiple spec versions uniformly. - */ package spec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go index ea8fbfbb4db..452304ffdf9 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -98,6 +98,7 @@ type ceClient struct { eventDefaulterFns []EventDefaulter pollGoroutines int blockingCallback bool + ackMalformedEvent bool } func (c *ceClient) applyOptions(opts ...Option) error { @@ -202,7 +203,13 @@ func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { return fmt.Errorf("client already has a receiver") } - invoker, err := newReceiveInvoker(fn, c.observabilityService, c.inboundContextDecorators, c.eventDefaulterFns...) + invoker, err := newReceiveInvoker( + fn, + c.observabilityService, + c.inboundContextDecorators, + c.eventDefaulterFns, + c.ackMalformedEvent, + ) if err != nil { return err } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go index 94a4b4e65e4..672581b5805 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -14,7 +14,7 @@ import ( ) func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { - invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil) //TODO(slinkydeveloper) maybe not nil? + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil, nil, false) //TODO(slinkydeveloper) maybe not nil? if err != nil { return nil, err } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go index 403fb0f5598..a3080b007a5 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -23,11 +23,18 @@ type Invoker interface { var _ Invoker = (*receiveInvoker)(nil) -func newReceiveInvoker(fn interface{}, observabilityService ObservabilityService, inboundContextDecorators []func(context.Context, binding.Message) context.Context, fns ...EventDefaulter) (Invoker, error) { +func newReceiveInvoker( + fn interface{}, + observabilityService ObservabilityService, + inboundContextDecorators []func(context.Context, binding.Message) context.Context, + fns []EventDefaulter, + ackMalformedEvent bool, +) (Invoker, error) { r := &receiveInvoker{ eventDefaulterFns: fns, observabilityService: observabilityService, inboundContextDecorators: inboundContextDecorators, + ackMalformedEvent: ackMalformedEvent, } if fn, err := receiver(fn); err != nil { @@ -44,6 +51,7 @@ type receiveInvoker struct { observabilityService ObservabilityService eventDefaulterFns []EventDefaulter inboundContextDecorators []func(context.Context, binding.Message) context.Context + ackMalformedEvent bool } func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { @@ -58,13 +66,13 @@ func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn p switch { case eventErr != nil && r.fn.hasEventIn: r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) - return respFn(ctx, nil, protocol.NewReceipt(false, "failed to convert Message to Event: %w", eventErr)) + return respFn(ctx, nil, protocol.NewReceipt(r.ackMalformedEvent, "failed to convert Message to Event: %w", eventErr)) case r.fn != nil: // Check if event is valid before invoking the receiver function if e != nil { if validationErr := e.Validate(); validationErr != nil { r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) - return respFn(ctx, nil, protocol.NewReceipt(false, "validation error in incoming event: %w", validationErr)) + return respFn(ctx, nil, protocol.NewReceipt(r.ackMalformedEvent, "validation error in incoming event: %w", validationErr)) } } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go index 938478162b0..44394be349c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -126,3 +126,16 @@ func WithBlockingCallback() Option { return nil } } + +// WithAckMalformedevents causes malformed events received within StartReceiver to be acknowledged +// rather than being permanently not-acknowledged. This can be useful when a protocol does not +// provide a responder implementation and would otherwise cause the receiver to be partially or +// fully stuck. +func WithAckMalformedEvent() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.ackMalformedEvent = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go index b1ab532d792..2cc0e649745 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -57,7 +57,6 @@ var ( // * func(event.Event) (*event.Event, protocol.Result) // * func(context.Context, event.Event) *event.Event // * func(context.Context, event.Event) (*event.Event, protocol.Result) -// func receiver(fn interface{}) (*receiverFn, error) { fnType := reflect.TypeOf(fn) if fnType.Kind() != reflect.Func { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go index 94b5aa0ada3..52495f9a392 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -55,13 +55,12 @@ func New(version ...string) Event { // Use functions in the types package to convert extension values. // For example replace this: // -// var i int -// err := e.ExtensionAs("foo", &i) +// var i int +// err := e.ExtensionAs("foo", &i) // // With this: // -// i, err := types.ToInteger(e.Extensions["foo"]) -// +// i, err := types.ToInteger(e.Extensions["foo"]) func (e Event) ExtensionAs(name string, obj interface{}) error { return e.Context.ExtensionAs(name, obj) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go index c511c81c458..3f0505547c1 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -179,7 +179,8 @@ func (ec EventContextV03) AsV1() *EventContextV1 { } // Validate returns errors based on requirements from the CloudEvents spec. -// For more details, see https://github.com/cloudevents/spec/blob/master/spec.md +// For more details, see +// https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md // As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e // + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding // + https://github.com/cloudevents/spec/pull/406 -> subject diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go index f826a1841db..3c771fc5c49 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -21,6 +21,5 @@ Available protocols: * Nats * Nats Streaming (stan) * Google PubSub - */ package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go index 0eec396a1e6..e973738c6cd 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -24,7 +24,7 @@ type RequestData struct { } // WithRequestDataAtContext uses the http.Request to add RequestData -// information to the Context. +// information to the Context. func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { if r == nil { return ctx diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go index 5e400905a70..6582af3eafc 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -158,7 +158,6 @@ func WithMethod(method string) Option { } } -// // Middleware is a function that takes an existing http.Handler and wraps it in middleware, // returning the wrapped http.Handler. type Middleware func(next nethttp.Handler) nethttp.Handler diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go index 71e7346f304..21fc7e9b3b2 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -10,9 +10,7 @@ import ( "context" "errors" "io" - "io/ioutil" "net/http" - "net/url" "time" "go.uber.org/zap" @@ -52,7 +50,7 @@ func (p *Protocol) doOnce(req *http.Request) (binding.Message, protocol.Result) } func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParams, req *http.Request) (binding.Message, error) { - then := time.Now() + start := time.Now() retry := 0 results := make([]protocol.Result, 0) @@ -67,7 +65,7 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) } }() - body, err = ioutil.ReadAll(req.Body) + body, err = io.ReadAll(req.Body) if err != nil { panic(err) } @@ -79,51 +77,34 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam // Fast track common case. if protocol.IsACK(result) { - return msg, NewRetriesResult(result, retry, then, results) + return msg, NewRetriesResult(result, retry, start, results) } - // Try again? - // - // Make sure the error was something we should retry. - - { - var uErr *url.Error - if errors.As(result, &uErr) { - goto DoBackoff + var httpResult *Result + if errors.As(result, &httpResult) { + sc := httpResult.StatusCode + if !p.isRetriableFunc(sc) { + cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", + zap.Error(httpResult), + zap.Int("statusCode", sc)) + return msg, NewRetriesResult(result, retry, start, results) } } - { - var httpResult *Result - if errors.As(result, &httpResult) { - sc := httpResult.StatusCode - if p.isRetriableFunc(sc) { - // retry! - goto DoBackoff - } else { - // Permanent error - cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", - zap.Error(httpResult), - zap.Int("statusCode", sc)) - return msg, NewRetriesResult(result, retry, then, results) - } - } - } - - DoBackoff: - resetBody(req, body) - - // Wait for the correct amount of backoff time. - // total tries = retry + 1 - if err := params.Backoff(ctx, retry+1); err != nil { + if err = params.Backoff(ctx, retry+1); err != nil { // do not try again. cecontext.LoggerFrom(ctx).Debugw("backoff error, will not try again", zap.Error(err)) - return msg, NewRetriesResult(result, retry, then, results) + return msg, NewRetriesResult(result, retry, start, results) } retry++ + resetBody(req, body) results = append(results, result) + if msg != nil { + // avoid leak, forget message, ignore error + _ = msg.Finish(nil) + } } } @@ -134,12 +115,12 @@ func resetBody(req *http.Request, body []byte) { return } - req.Body = ioutil.NopCloser(bytes.NewReader(body)) + req.Body = io.NopCloser(bytes.NewReader(body)) // do not modify existing GetBody function if req.GetBody == nil { req.GetBody = func() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(body)), nil + return io.NopCloser(bytes.NewReader(body)), nil } } } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go index 43ad36180c1..f22259a3af2 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -9,7 +9,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "net/http" "strings" @@ -58,7 +57,7 @@ func (b *httpRequestWriter) SetData(data io.Reader) error { func (b *httpRequestWriter) setBody(body io.Reader) error { rc, ok := body.(io.ReadCloser) if !ok && body != nil { - rc = ioutil.NopCloser(body) + rc = io.NopCloser(body) } b.Body = rc if body != nil { @@ -68,21 +67,21 @@ func (b *httpRequestWriter) setBody(body io.Reader) error { buf := v.Bytes() b.GetBody = func() (io.ReadCloser, error) { r := bytes.NewReader(buf) - return ioutil.NopCloser(r), nil + return io.NopCloser(r), nil } case *bytes.Reader: b.ContentLength = int64(v.Len()) snapshot := *v b.GetBody = func() (io.ReadCloser, error) { r := snapshot - return ioutil.NopCloser(&r), nil + return io.NopCloser(&r), nil } case *strings.Reader: b.ContentLength = int64(v.Len()) snapshot := *v b.GetBody = func() (io.ReadCloser, error) { r := snapshot - return ioutil.NopCloser(&r), nil + return io.NopCloser(&r), nil } default: // This is where we'd set it to -1 (at least @@ -137,5 +136,7 @@ func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { return nil } -var _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface -var _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +var ( + _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface + _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go index cf7a94f35c0..3a0a595a1d8 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -11,25 +11,25 @@ type has a corresponding native Go type and a canonical string encoding. The native Go types used to represent the CloudEvents types are: bool, int32, string, []byte, *url.URL, time.Time - +----------------+----------------+-----------------------------------+ - |CloudEvents Type|Native Type |Convertible From | - +================+================+===================================+ - |Bool |bool |bool | - +----------------+----------------+-----------------------------------+ - |Integer |int32 |Any numeric type with value in | - | | |range of int32 | - +----------------+----------------+-----------------------------------+ - |String |string |string | - +----------------+----------------+-----------------------------------+ - |Binary |[]byte |[]byte | - +----------------+----------------+-----------------------------------+ - |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | - +----------------+----------------+-----------------------------------+ - |URI |*url.URL |url.URL, types.URIRef, types.URI | - | | |Must be an absolute URI. | - +----------------+----------------+-----------------------------------+ - |Timestamp |time.Time |time.Time, types.Timestamp | - +----------------+----------------+-----------------------------------+ + +----------------+----------------+-----------------------------------+ + |CloudEvents Type|Native Type |Convertible From | + +================+================+===================================+ + |Bool |bool |bool | + +----------------+----------------+-----------------------------------+ + |Integer |int32 |Any numeric type with value in | + | | |range of int32 | + +----------------+----------------+-----------------------------------+ + |String |string |string | + +----------------+----------------+-----------------------------------+ + |Binary |[]byte |[]byte | + +----------------+----------------+-----------------------------------+ + |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | + +----------------+----------------+-----------------------------------+ + |URI |*url.URL |url.URL, types.URIRef, types.URI | + | | |Must be an absolute URI. | + +----------------+----------------+-----------------------------------+ + |Timestamp |time.Time |time.Time, types.Timestamp | + +----------------+----------------+-----------------------------------+ Extension attributes may be stored as a native type or a canonical string. The To functions will convert to the desired from any convertible type @@ -41,6 +41,5 @@ canonical strings. Note are no Parse or Format functions for URL or string. For URL use the standard url.Parse() and url.URL.String(). The canonical string format of a string is the string itself. - */ package types diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go index f643d0aa512..14004d3e18d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -86,7 +86,7 @@ func Format(v interface{}) (string, error) { } // Validate v is a valid CloudEvents attribute value, convert it to one of: -// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp func Validate(v interface{}) (interface{}, error) { switch v := v.(type) { case bool, int32, string, []byte: @@ -151,7 +151,9 @@ func Validate(v interface{}) (interface{}, error) { } // Clone v clones a CloudEvents attribute value, which is one of the valid types: -// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// // Returns the same type // Panics if the type is not valid func Clone(v interface{}) interface{} { diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go index a1819b16ace..2e6eca44878 100644 --- a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -167,6 +167,19 @@ func Marshal(v any) ([]byte, error) { return buf, nil } +func MarshalEscaped(v any, escape bool) ([]byte, error) { + e := newEncodeState() + defer encodeStatePool.Put(e) + + err := e.marshal(v, encOpts{escapeHTML: escape}) + if err != nil { + return nil, err + } + buf := append([]byte(nil), e.Bytes()...) + + return buf, nil +} + // MarshalIndent is like Marshal but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go index bbe9f85f28e..f79caf3135a 100644 --- a/vendor/github.com/evanphx/json-patch/v5/merge.go +++ b/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -10,26 +10,26 @@ import ( "github.com/evanphx/json-patch/v5/internal/json" ) -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() +func merge(cur, patch *lazyNode, mergeMerge bool, options *ApplyOptions) *lazyNode { + curDoc, err := cur.intoDoc(options) if err != nil { - pruneNulls(patch) + pruneNulls(patch, options) return patch } - patchDoc, err := patch.intoDoc() + patchDoc, err := patch.intoDoc(options) if err != nil { return patch } - mergeDocs(curDoc, patchDoc, mergeMerge) + mergeDocs(curDoc, patchDoc, mergeMerge, options) return cur } -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { +func mergeDocs(doc, patch *partialDoc, mergeMerge bool, options *ApplyOptions) { for k, v := range patch.obj { if v == nil { if mergeMerge { @@ -45,55 +45,55 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { } doc.obj[k] = nil } else { - _ = doc.remove(k, &ApplyOptions{}) + _ = doc.remove(k, options) } } else { cur, ok := doc.obj[k] if !ok || cur == nil { if !mergeMerge { - pruneNulls(v) + pruneNulls(v, options) } - _ = doc.set(k, v, &ApplyOptions{}) + _ = doc.set(k, v, options) } else { - _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) + _ = doc.set(k, merge(cur, v, mergeMerge, options), options) } } } } -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() +func pruneNulls(n *lazyNode, options *ApplyOptions) { + sub, err := n.intoDoc(options) if err == nil { - pruneDocNulls(sub) + pruneDocNulls(sub, options) } else { ary, err := n.intoAry() if err == nil { - pruneAryNulls(ary) + pruneAryNulls(ary, options) } } } -func pruneDocNulls(doc *partialDoc) *partialDoc { +func pruneDocNulls(doc *partialDoc, options *ApplyOptions) *partialDoc { for k, v := range doc.obj { if v == nil { _ = doc.remove(k, &ApplyOptions{}) } else { - pruneNulls(v) + pruneNulls(v, options) } } return doc } -func pruneAryNulls(ary *partialArray) *partialArray { +func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray { newAry := []*lazyNode{} for _, v := range ary.nodes { if v != nil { - pruneNulls(v) + pruneNulls(v, options) } newAry = append(newAry, v) } @@ -128,11 +128,17 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return nil, errBadJSONPatch } - doc := &partialDoc{} + options := NewApplyOptions() + + doc := &partialDoc{ + opts: options, + } docErr := doc.UnmarshalJSON(docData) - patch := &partialDoc{} + patch := &partialDoc{ + opts: options, + } patchErr := patch.UnmarshalJSON(patchData) @@ -158,7 +164,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { if mergeMerge { doc = patch } else { - doc = pruneDocNulls(patch) + doc = pruneDocNulls(patch, options) } } else { patchAry := &partialArray{} @@ -172,7 +178,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return nil, errBadJSONPatch } - pruneAryNulls(patchAry) + pruneAryNulls(patchAry, options) out, patchErr := json.Marshal(patchAry.nodes) @@ -183,7 +189,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return out, nil } } else { - mergeDocs(doc, patch, mergeMerge) + mergeDocs(doc, patch, mergeMerge, options) } return json.Marshal(doc) diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go index a3bbf1c73b8..7a7f71c8b66 100644 --- a/vendor/github.com/evanphx/json-patch/v5/patch.go +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -38,6 +38,8 @@ var ( ErrInvalid = errors.New("invalid state detected") ErrInvalidIndex = errors.New("invalid index referenced") + ErrExpectedObject = errors.New("invalid value, expected object") + rawJSONArray = []byte("[]") rawJSONObject = []byte("{}") rawJSONNull = []byte("null") @@ -60,6 +62,8 @@ type partialDoc struct { self *lazyNode keys []string obj map[string]*lazyNode + + opts *ApplyOptions } type partialArray struct { @@ -90,6 +94,8 @@ type ApplyOptions struct { // EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation. // Default to false. EnsurePathExistsOnAdd bool + + EscapeHTML bool } // NewApplyOptions creates a default set of options for calls to ApplyWithOptions. @@ -99,6 +105,7 @@ func NewApplyOptions() *ApplyOptions { AccumulatedCopySizeLimit: AccumulatedCopySizeLimit, AllowMissingPathOnRemove: false, EnsurePathExistsOnAdd: false, + EscapeHTML: true, } } @@ -134,16 +141,28 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error { } func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error { + if n.obj == nil { + return ErrExpectedObject + } + if err := buf.WriteByte('{'); err != nil { return err } + escaped := true + + // n.opts should always be set, but in case we missed a case, + // guard. + if n.opts != nil { + escaped = n.opts.EscapeHTML + } + for i, k := range n.keys { if i > 0 { if err := buf.WriteByte(','); err != nil { return err } } - key, err := json.Marshal(k) + key, err := json.MarshalEscaped(k, escaped) if err != nil { return err } @@ -153,7 +172,7 @@ func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error { if err := buf.WriteByte(':'); err != nil { return err } - value, err := json.Marshal(n.obj[k]) + value, err := json.MarshalEscaped(n.obj[k], escaped) if err != nil { return err } @@ -194,11 +213,11 @@ func (n *partialArray) RedirectMarshalJSON() (interface{}, error) { return n.nodes, nil } -func deepCopy(src *lazyNode) (*lazyNode, int, error) { +func deepCopy(src *lazyNode, options *ApplyOptions) (*lazyNode, int, error) { if src == nil { return nil, 0, nil } - a, err := json.Marshal(src) + a, err := json.MarshalEscaped(src, options.EscapeHTML) if err != nil { return nil, 0, err } @@ -216,7 +235,7 @@ func (n *lazyNode) nextByte() byte { return s[0] } -func (n *lazyNode) intoDoc() (*partialDoc, error) { +func (n *lazyNode) intoDoc(options *ApplyOptions) (*partialDoc, error) { if n.which == eDoc { return n.doc, nil } @@ -235,6 +254,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) { return nil, ErrInvalid } + n.doc.opts = options if err != nil { return nil, err } @@ -545,7 +565,7 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s return nil, "" } } else { - doc, err = next.intoDoc() + doc, err = next.intoDoc(options) if err != nil { return nil, "" @@ -557,6 +577,10 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s } func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error { + if d.obj == nil { + return ErrExpectedObject + } + found := false for _, k := range d.keys { if k == key { @@ -579,6 +603,11 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { if key == "" { return d.self, nil } + + if d.obj == nil { + return nil, ErrExpectedObject + } + v, ok := d.obj[key] if !ok { return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) @@ -587,6 +616,10 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { } func (d *partialDoc) remove(key string, options *ApplyOptions) error { + if d.obj == nil { + return ErrExpectedObject + } + _, ok := d.obj[key] if !ok { if options.AllowMissingPathOnRemove { @@ -750,6 +783,7 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error { } else { pd = &partialDoc{ self: val, + opts: options, } } @@ -855,7 +889,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error { newNode := newLazyNode(newRawMessage(rawJSONObject)) doc.add(part, newNode, options) - doc, err = newNode.intoDoc() + doc, err = newNode.intoDoc(options) if err != nil { return err } @@ -868,7 +902,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error { return err } } else { - doc, err = target.intoDoc() + doc, err = target.intoDoc(options) if err != nil { return err @@ -954,6 +988,8 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro if !val.tryAry() { return errors.Wrapf(err, "replace operation value must be object or array") } + } else { + val.doc.opts = options } } @@ -1115,7 +1151,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) } - valCopy, sz, err := deepCopy(val) + valCopy, sz, err := deepCopy(val, options) if err != nil { return errors.Wrapf(err, "error while performing deep copy") } @@ -1202,6 +1238,7 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO } else { pd = &partialDoc{ self: self, + opts: options, } } @@ -1238,11 +1275,18 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO } } - if indent != "" { - return json.MarshalIndent(pd, "", indent) + data, err := json.MarshalEscaped(pd, options.EscapeHTML) + if err != nil { + return nil, err + } + + if indent == "" { + return data, nil } - return json.Marshal(pd) + var buf bytes.Buffer + json.Indent(&buf, data, "", indent) + return buf.Bytes(), nil } // From http://tools.ietf.org/html/rfc6901#section-4 : diff --git a/vendor/github.com/expr-lang/expr/.gitignore b/vendor/github.com/expr-lang/expr/.gitignore index b089dce8a82..da1a8182fa2 100644 --- a/vendor/github.com/expr-lang/expr/.gitignore +++ b/vendor/github.com/expr-lang/expr/.gitignore @@ -7,3 +7,4 @@ *.out *.html custom_tests.json +pro/ diff --git a/vendor/github.com/expr-lang/expr/ast/node.go b/vendor/github.com/expr-lang/expr/ast/node.go index 68c9e7bef8e..03e8cf62223 100644 --- a/vendor/github.com/expr-lang/expr/ast/node.go +++ b/vendor/github.com/expr-lang/expr/ast/node.go @@ -133,6 +133,7 @@ type MemberNode struct { Node Node // Node of the member access. Like "foo" in "foo.bar". Property Node // Property of the member access. For property access it is a StringNode. Optional bool // If true then the member access is optional. Like "foo?.bar". + Method bool } // SliceNode represents access to a slice of an array. diff --git a/vendor/github.com/expr-lang/expr/ast/print.go b/vendor/github.com/expr-lang/expr/ast/print.go index 9a7d123912a..7ecb1c04cc3 100644 --- a/vendor/github.com/expr-lang/expr/ast/print.go +++ b/vendor/github.com/expr-lang/expr/ast/print.go @@ -58,17 +58,43 @@ func (n *UnaryNode) String() string { } func (n *BinaryNode) String() string { + if n.Operator == ".." { + return fmt.Sprintf("%s..%s", n.Left, n.Right) + } + var lhs, rhs string + var lwrap, rwrap bool lb, ok := n.Left.(*BinaryNode) - if ok && (operator.Less(lb.Operator, n.Operator) || lb.Operator == "??") { + if ok { + if operator.Less(lb.Operator, n.Operator) { + lwrap = true + } + if lb.Operator == "??" { + lwrap = true + } + if operator.IsBoolean(lb.Operator) && n.Operator != lb.Operator { + lwrap = true + } + } + + rb, ok := n.Right.(*BinaryNode) + if ok { + if operator.Less(rb.Operator, n.Operator) { + rwrap = true + } + if operator.IsBoolean(rb.Operator) && n.Operator != rb.Operator { + rwrap = true + } + } + + if lwrap { lhs = fmt.Sprintf("(%s)", n.Left.String()) } else { lhs = n.Left.String() } - rb, ok := n.Right.(*BinaryNode) - if ok && operator.Less(rb.Operator, n.Operator) { + if rwrap { rhs = fmt.Sprintf("(%s)", n.Right.String()) } else { rhs = n.Right.String() @@ -132,7 +158,7 @@ func (n *ClosureNode) String() string { } func (n *PointerNode) String() string { - return "#" + return fmt.Sprintf("#%s", n.Name) } func (n *VariableDeclaratorNode) String() string { diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go index cd943bd0baf..14fc8a4b7b0 100644 --- a/vendor/github.com/expr-lang/expr/builtin/builtin.go +++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go @@ -12,16 +12,6 @@ import ( "github.com/expr-lang/expr/vm/runtime" ) -type Function struct { - Name string - Func func(args ...any) (any, error) - Fast func(arg any) any - ValidateArgs func(args ...any) (any, error) - Types []reflect.Type - Validate func(args []reflect.Type) (reflect.Type, error) - Predicate bool -} - var ( Index map[string]int Names []string diff --git a/vendor/github.com/expr-lang/expr/builtin/function.go b/vendor/github.com/expr-lang/expr/builtin/function.go new file mode 100644 index 00000000000..e6d882340ad --- /dev/null +++ b/vendor/github.com/expr-lang/expr/builtin/function.go @@ -0,0 +1,22 @@ +package builtin + +import ( + "reflect" +) + +type Function struct { + Name string + Func func(args ...any) (any, error) + Fast func(arg any) any + ValidateArgs func(args ...any) (any, error) + Types []reflect.Type + Validate func(args []reflect.Type) (reflect.Type, error) + Predicate bool +} + +func (f *Function) Type() reflect.Type { + if len(f.Types) > 0 { + return f.Types[0] + } + return reflect.TypeOf(f.Func) +} diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go index 1deca3f9948..228a74893da 100644 --- a/vendor/github.com/expr-lang/expr/builtin/lib.go +++ b/vendor/github.com/expr-lang/expr/builtin/lib.go @@ -62,76 +62,76 @@ func Type(arg any) any { } func Abs(x any) any { - switch x.(type) { + switch x := x.(type) { case float32: - if x.(float32) < 0 { - return -x.(float32) + if x < 0 { + return -x } else { return x } case float64: - if x.(float64) < 0 { - return -x.(float64) + if x < 0 { + return -x } else { return x } case int: - if x.(int) < 0 { - return -x.(int) + if x < 0 { + return -x } else { return x } case int8: - if x.(int8) < 0 { - return -x.(int8) + if x < 0 { + return -x } else { return x } case int16: - if x.(int16) < 0 { - return -x.(int16) + if x < 0 { + return -x } else { return x } case int32: - if x.(int32) < 0 { - return -x.(int32) + if x < 0 { + return -x } else { return x } case int64: - if x.(int64) < 0 { - return -x.(int64) + if x < 0 { + return -x } else { return x } case uint: - if x.(uint) < 0 { - return -x.(uint) + if x < 0 { + return -x } else { return x } case uint8: - if x.(uint8) < 0 { - return -x.(uint8) + if x < 0 { + return -x } else { return x } case uint16: - if x.(uint16) < 0 { - return -x.(uint16) + if x < 0 { + return -x } else { return x } case uint32: - if x.(uint32) < 0 { - return -x.(uint32) + if x < 0 { + return -x } else { return x } case uint64: - if x.(uint64) < 0 { - return -x.(uint64) + if x < 0 { + return -x } else { return x } diff --git a/vendor/github.com/expr-lang/expr/checker/checker.go b/vendor/github.com/expr-lang/expr/checker/checker.go index 4dced34eaad..3dc4e95addd 100644 --- a/vendor/github.com/expr-lang/expr/checker/checker.go +++ b/vendor/github.com/expr-lang/expr/checker/checker.go @@ -54,7 +54,6 @@ type checker struct { config *conf.Config predicateScopes []predicateScope varScopes []varScope - parents []ast.Node err *file.Error } @@ -83,7 +82,6 @@ type info struct { func (v *checker) visit(node ast.Node) (reflect.Type, info) { var t reflect.Type var i info - v.parents = append(v.parents, node) switch n := node.(type) { case *ast.NilNode: t, i = v.NilNode(n) @@ -130,7 +128,6 @@ func (v *checker) visit(node ast.Node) (reflect.Type, info) { default: panic(fmt.Sprintf("undefined node type (%T)", node)) } - v.parents = v.parents[:len(v.parents)-1] node.SetType(t) return t, i } @@ -156,24 +153,25 @@ func (v *checker) IdentifierNode(node *ast.IdentifierNode) (reflect.Type, info) if node.Value == "$env" { return mapType, info{} } - if fn, ok := v.config.Builtins[node.Value]; ok { - return functionType, info{fn: fn} - } - if fn, ok := v.config.Functions[node.Value]; ok { - return functionType, info{fn: fn} - } - return v.env(node, node.Value, true) + return v.ident(node, node.Value, true, true) } -// env method returns type of environment variable. env only lookups for -// environment variables, no builtins, no custom functions. -func (v *checker) env(node ast.Node, name string, strict bool) (reflect.Type, info) { +// ident method returns type of environment variable, builtin or function. +func (v *checker) ident(node ast.Node, name string, strict, builtins bool) (reflect.Type, info) { if t, ok := v.config.Types[name]; ok { if t.Ambiguous { return v.error(node, "ambiguous identifier %v", name) } return t.Type, info{method: t.Method} } + if builtins { + if fn, ok := v.config.Functions[name]; ok { + return fn.Type(), info{fn: fn} + } + if fn, ok := v.config.Builtins[name]; ok { + return fn.Type(), info{fn: fn} + } + } if v.config.Strict && strict { return v.error(node, "unknown name %v", name) } @@ -430,9 +428,7 @@ func (v *checker) ChainNode(node *ast.ChainNode) (reflect.Type, info) { } func (v *checker) MemberNode(node *ast.MemberNode) (reflect.Type, info) { - base, _ := v.visit(node.Node) - prop, _ := v.visit(node.Property) - + // $env variable if an, ok := node.Node.(*ast.IdentifierNode); ok && an.Value == "$env" { if name, ok := node.Property.(*ast.StringNode); ok { strict := v.config.Strict @@ -443,11 +439,14 @@ func (v *checker) MemberNode(node *ast.MemberNode) (reflect.Type, info) { // should throw error if field is not found & v.config.Strict. strict = false } - return v.env(node, name.Value, strict) + return v.ident(node, name.Value, strict, false /* no builtins and no functions */) } return anyType, info{} } + base, _ := v.visit(node.Node) + prop, _ := v.visit(node.Property) + if name, ok := node.Property.(*ast.StringNode); ok { if base == nil { return v.error(node, "type %v has no field %v", base, name.Value) @@ -496,10 +495,8 @@ func (v *checker) MemberNode(node *ast.MemberNode) (reflect.Type, info) { if field, ok := fetchField(base, propertyName); ok { return field.Type, info{} } - if len(v.parents) > 1 { - if _, ok := v.parents[len(v.parents)-2].(*ast.CallNode); ok { - return v.error(node, "type %v has no method %v", base, propertyName) - } + if node.Method { + return v.error(node, "type %v has no method %v", base, propertyName) } return v.error(node, "type %v has no field %v", base, propertyName) } @@ -537,9 +534,22 @@ func (v *checker) SliceNode(node *ast.SliceNode) (reflect.Type, info) { func (v *checker) CallNode(node *ast.CallNode) (reflect.Type, info) { t, i := v.functionReturnType(node) - if node.Type() != nil { + + // Check if type was set on node (for example, by patcher) + // and use node type instead of function return type. + // + // If node type is anyType, then we should use function + // return type. For example, on error we return anyType + // for a call `errCall().Method()` and method will be + // evaluated on `anyType.Method()`, so return type will + // be anyType `anyType.Method(): anyType`. Patcher can + // fix `errCall()` to return proper type, so on second + // checker pass we should replace anyType on method node + // with new correct function return type. + if node.Type() != nil && node.Type() != anyType { return node.Type(), i } + return t, i } @@ -831,7 +841,7 @@ func (v *checker) checkFunction(f *builtin.Function, node ast.Node, arguments [] } return t, info{} } else if len(f.Types) == 0 { - t, err := v.checkArguments(f.Name, functionType, false, arguments, node) + t, err := v.checkArguments(f.Name, f.Type(), false, arguments, node) if err != nil { if v.err == nil { v.err = err @@ -896,28 +906,38 @@ func (v *checker) checkArguments( fnInOffset = 1 } + var err *file.Error if fn.IsVariadic() { if len(arguments) < fnNumIn-1 { - return anyType, &file.Error{ + err = &file.Error{ Location: node.Location(), Message: fmt.Sprintf("not enough arguments to call %v", name), } } } else { if len(arguments) > fnNumIn { - return anyType, &file.Error{ + err = &file.Error{ Location: node.Location(), Message: fmt.Sprintf("too many arguments to call %v", name), } } if len(arguments) < fnNumIn { - return anyType, &file.Error{ + err = &file.Error{ Location: node.Location(), Message: fmt.Sprintf("not enough arguments to call %v", name), } } } + if err != nil { + // If we have an error, we should still visit all arguments to + // type check them, as a patch can fix the error later. + for _, arg := range arguments { + _, _ = v.visit(arg) + } + return fn.Out(0), err + } + for i, arg := range arguments { t, _ := v.visit(arg) diff --git a/vendor/github.com/expr-lang/expr/checker/types.go b/vendor/github.com/expr-lang/expr/checker/types.go index 662139c3752..8c080504965 100644 --- a/vendor/github.com/expr-lang/expr/checker/types.go +++ b/vendor/github.com/expr-lang/expr/checker/types.go @@ -18,7 +18,6 @@ var ( anyType = reflect.TypeOf(new(any)).Elem() timeType = reflect.TypeOf(time.Time{}) durationType = reflect.TypeOf(time.Duration(0)) - functionType = reflect.TypeOf(new(func(...any) (any, error))).Elem() ) func combined(a, b reflect.Type) reflect.Type { diff --git a/vendor/github.com/expr-lang/expr/compiler/compiler.go b/vendor/github.com/expr-lang/expr/compiler/compiler.go index 1f1c00c9297..ac11805e013 100644 --- a/vendor/github.com/expr-lang/expr/compiler/compiler.go +++ b/vendor/github.com/expr-lang/expr/compiler/compiler.go @@ -49,6 +49,7 @@ func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err erro program = NewProgram( tree.Source, + tree.Node, c.locations, c.variables, c.constants, @@ -64,7 +65,7 @@ type compiler struct { config *conf.Config locations []file.Location bytecode []Opcode - variables []any + variables int scopes []scope constants []any constantsIndex map[any]int @@ -137,10 +138,9 @@ func (c *compiler) addConstant(constant any) int { } func (c *compiler) addVariable(name string) int { - c.variables = append(c.variables, nil) - p := len(c.variables) - 1 - c.debugInfo[fmt.Sprintf("var_%d", p)] = name - return p + c.variables++ + c.debugInfo[fmt.Sprintf("var_%d", c.variables-1)] = name + return c.variables - 1 } // emitFunction adds builtin.Function.Func to the program.functions and emits call opcode. diff --git a/vendor/github.com/expr-lang/expr/conf/config.go b/vendor/github.com/expr-lang/expr/conf/config.go index 48d491a96b5..baf5dee051f 100644 --- a/vendor/github.com/expr-lang/expr/conf/config.go +++ b/vendor/github.com/expr-lang/expr/conf/config.go @@ -98,20 +98,14 @@ func (c *Config) Check() { } } } - for fnName, t := range c.Types { - if kind(t.Type) == reflect.Func { - for _, b := range c.Builtins { - if b.Name == fnName { - panic(fmt.Errorf(`cannot override builtin %s(): use expr.DisableBuiltin("%s") to override`, b.Name, b.Name)) - } - } - } +} + +func (c *Config) IsOverridden(name string) bool { + if _, ok := c.Functions[name]; ok { + return true } - for _, f := range c.Functions { - for _, b := range c.Builtins { - if b.Name == f.Name { - panic(fmt.Errorf(`cannot override builtin %s(); use expr.DisableBuiltin("%s") to override`, f.Name, f.Name)) - } - } + if _, ok := c.Types[name]; ok { + return true } + return false } diff --git a/vendor/github.com/expr-lang/expr/expr.go b/vendor/github.com/expr-lang/expr/expr.go index 8569de8fb2f..f32df1d3c0d 100644 --- a/vendor/github.com/expr-lang/expr/expr.go +++ b/vendor/github.com/expr-lang/expr/expr.go @@ -65,6 +65,7 @@ func AsAny() Option { func AsKind(kind reflect.Kind) Option { return func(c *conf.Config) { c.Expect = kind + c.ExpectAny = true } } @@ -72,6 +73,7 @@ func AsKind(kind reflect.Kind) Option { func AsBool() Option { return func(c *conf.Config) { c.Expect = reflect.Bool + c.ExpectAny = true } } @@ -79,6 +81,7 @@ func AsBool() Option { func AsInt() Option { return func(c *conf.Config) { c.Expect = reflect.Int + c.ExpectAny = true } } @@ -86,6 +89,7 @@ func AsInt() Option { func AsInt64() Option { return func(c *conf.Config) { c.Expect = reflect.Int64 + c.ExpectAny = true } } @@ -93,6 +97,17 @@ func AsInt64() Option { func AsFloat64() Option { return func(c *conf.Config) { c.Expect = reflect.Float64 + c.ExpectAny = true + } +} + +// WarnOnAny tells the compiler to warn if expression return any type. +func WarnOnAny() Option { + return func(c *conf.Config) { + if c.Expect == reflect.Invalid { + panic("WarnOnAny() works only with combination with AsInt(), AsBool(), etc. options") + } + c.ExpectAny = false } } diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/state.go b/vendor/github.com/expr-lang/expr/parser/lexer/state.go index 9999fd3c526..72f02bf4ef7 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/state.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/state.go @@ -37,11 +37,14 @@ func root(l *lexer) stateFn { case r == '|': l.accept("|") l.emit(Operator) + case r == ':': + l.accept(":") + l.emit(Operator) case strings.ContainsRune("([{", r): l.emit(Bracket) case strings.ContainsRune(")]}", r): l.emit(Bracket) - case strings.ContainsRune(",:;%+-^", r): // single rune operator + case strings.ContainsRune(",;%+-^", r): // single rune operator l.emit(Operator) case strings.ContainsRune("&!=*<>", r): // possible double rune operator l.accept("&=*") diff --git a/vendor/github.com/expr-lang/expr/parser/operator/operator.go b/vendor/github.com/expr-lang/expr/parser/operator/operator.go index 455c06f17ce..8d804c7b3e5 100644 --- a/vendor/github.com/expr-lang/expr/parser/operator/operator.go +++ b/vendor/github.com/expr-lang/expr/parser/operator/operator.go @@ -16,6 +16,10 @@ func Less(a, b string) bool { return Binary[a].Precedence < Binary[b].Precedence } +func IsBoolean(op string) bool { + return op == "and" || op == "or" || op == "&&" || op == "||" +} + var Unary = map[string]Operator{ "not": {50, Left}, "!": {50, Left}, diff --git a/vendor/github.com/expr-lang/expr/parser/parser.go b/vendor/github.com/expr-lang/expr/parser/parser.go index 5f29279bc70..07c1be54748 100644 --- a/vendor/github.com/expr-lang/expr/parser/parser.go +++ b/vendor/github.com/expr-lang/expr/parser/parser.go @@ -15,22 +15,31 @@ import ( "github.com/expr-lang/expr/parser/utils" ) +type arg byte + +const ( + expr arg = 1 << iota + closure +) + +const optional arg = 1 << 7 + var predicates = map[string]struct { - arity int + args []arg }{ - "all": {2}, - "none": {2}, - "any": {2}, - "one": {2}, - "filter": {2}, - "map": {2}, - "count": {2}, - "find": {2}, - "findIndex": {2}, - "findLast": {2}, - "findLastIndex": {2}, - "groupBy": {2}, - "reduce": {3}, + "all": {[]arg{expr, closure}}, + "none": {[]arg{expr, closure}}, + "any": {[]arg{expr, closure}}, + "one": {[]arg{expr, closure}}, + "filter": {[]arg{expr, closure}}, + "map": {[]arg{expr, closure}}, + "count": {[]arg{expr, closure}}, + "find": {[]arg{expr, closure}}, + "findIndex": {[]arg{expr, closure}}, + "findLast": {[]arg{expr, closure}}, + "findLastIndex": {[]arg{expr, closure}}, + "groupBy": {[]arg{expr, closure}}, + "reduce": {[]arg{expr, closure, expr | optional}}, } type parser struct { @@ -143,7 +152,9 @@ func (p *parser) parseExpression(precedence int) Node { p.next() if opToken.Value == "|" { - nodeLeft = p.parsePipe(nodeLeft) + identToken := p.current + p.expect(Identifier) + nodeLeft = p.parseCall(identToken, []Node{nodeLeft}, true) goto next } @@ -275,6 +286,13 @@ func (p *parser) parsePrimary() Node { } } + if token.Is(Operator, "::") { + p.next() + token = p.current + p.expect(Identifier) + return p.parsePostfixExpression(p.parseCall(token, []Node{}, false)) + } + return p.parseSecondary() } @@ -300,7 +318,12 @@ func (p *parser) parseSecondary() Node { node.SetLocation(token.Location) return node default: - node = p.parseCall(token) + if p.current.Is(Bracket, "(") { + node = p.parseCall(token, []Node{}, true) + } else { + node = &IdentifierNode{Value: token.Value} + node.SetLocation(token.Location) + } } case Number: @@ -379,66 +402,86 @@ func (p *parser) toFloatNode(number float64) Node { return &FloatNode{Value: number} } -func (p *parser) parseCall(token Token) Node { +func (p *parser) parseCall(token Token, arguments []Node, checkOverrides bool) Node { var node Node - if p.current.Is(Bracket, "(") { - var arguments []Node - if b, ok := predicates[token.Value]; ok { - p.expect(Bracket, "(") + isOverridden := p.config.IsOverridden(token.Value) + isOverridden = isOverridden && checkOverrides - // TODO: Refactor parser to use builtin.Builtins instead of predicates map. + if b, ok := predicates[token.Value]; ok && !isOverridden { + p.expect(Bracket, "(") - if b.arity == 1 { - arguments = make([]Node, 1) - arguments[0] = p.parseExpression(0) - } else if b.arity == 2 { - arguments = make([]Node, 2) - arguments[0] = p.parseExpression(0) - p.expect(Operator, ",") - arguments[1] = p.parseClosure() - } + // In case of the pipe operator, the first argument is the left-hand side + // of the operator, so we do not parse it as an argument inside brackets. + args := b.args[len(arguments):] - if token.Value == "reduce" { - arguments = make([]Node, 2) - arguments[0] = p.parseExpression(0) - p.expect(Operator, ",") - arguments[1] = p.parseClosure() - if p.current.Is(Operator, ",") { - p.next() - arguments = append(arguments, p.parseExpression(0)) + for i, arg := range args { + if arg&optional == optional { + if p.current.Is(Bracket, ")") { + break + } + } else { + if p.current.Is(Bracket, ")") { + p.error("expected at least %d arguments", len(args)) } } - p.expect(Bracket, ")") - - node = &BuiltinNode{ - Name: token.Value, - Arguments: arguments, - } - node.SetLocation(token.Location) - } else if _, ok := builtin.Index[token.Value]; ok && !p.config.Disabled[token.Value] { - node = &BuiltinNode{ - Name: token.Value, - Arguments: p.parseArguments(), + if i > 0 { + p.expect(Operator, ",") } - node.SetLocation(token.Location) - } else { - callee := &IdentifierNode{Value: token.Value} - callee.SetLocation(token.Location) - node = &CallNode{ - Callee: callee, - Arguments: p.parseArguments(), + var node Node + switch { + case arg&expr == expr: + node = p.parseExpression(0) + case arg&closure == closure: + node = p.parseClosure() } - node.SetLocation(token.Location) + arguments = append(arguments, node) + } + + p.expect(Bracket, ")") + + node = &BuiltinNode{ + Name: token.Value, + Arguments: arguments, + } + node.SetLocation(token.Location) + } else if _, ok := builtin.Index[token.Value]; ok && !p.config.Disabled[token.Value] && !isOverridden { + node = &BuiltinNode{ + Name: token.Value, + Arguments: p.parseArguments(arguments), } + node.SetLocation(token.Location) } else { - node = &IdentifierNode{Value: token.Value} + callee := &IdentifierNode{Value: token.Value} + callee.SetLocation(token.Location) + node = &CallNode{ + Callee: callee, + Arguments: p.parseArguments(arguments), + } node.SetLocation(token.Location) } return node } +func (p *parser) parseArguments(arguments []Node) []Node { + // If pipe operator is used, the first argument is the left-hand side + // of the operator, so we do not parse it as an argument inside brackets. + offset := len(arguments) + + p.expect(Bracket, "(") + for !p.current.Is(Bracket, ")") && p.err == nil { + if len(arguments) > offset { + p.expect(Operator, ",") + } + node := p.parseExpression(0) + arguments = append(arguments, node) + } + p.expect(Bracket, ")") + + return arguments +} + func (p *parser) parseClosure() Node { startToken := p.current expectClosingBracket := false @@ -563,9 +606,10 @@ func (p *parser) parsePostfixExpression(node Node) Node { memberNode.SetLocation(propertyToken.Location) if p.current.Is(Bracket, "(") { + memberNode.Method = true node = &CallNode{ Callee: memberNode, - Arguments: p.parseArguments(), + Arguments: p.parseArguments([]Node{}), } node.SetLocation(propertyToken.Location) } else { @@ -631,72 +675,3 @@ func (p *parser) parsePostfixExpression(node Node) Node { } return node } - -func (p *parser) parsePipe(node Node) Node { - identifier := p.current - p.expect(Identifier) - - arguments := []Node{node} - - if b, ok := predicates[identifier.Value]; ok { - p.expect(Bracket, "(") - - // TODO: Refactor parser to use builtin.Builtins instead of predicates map. - - if b.arity == 2 { - arguments = append(arguments, p.parseClosure()) - } - - if identifier.Value == "reduce" { - arguments = append(arguments, p.parseClosure()) - if p.current.Is(Operator, ",") { - p.next() - arguments = append(arguments, p.parseExpression(0)) - } - } - - p.expect(Bracket, ")") - - node = &BuiltinNode{ - Name: identifier.Value, - Arguments: arguments, - } - node.SetLocation(identifier.Location) - } else if _, ok := builtin.Index[identifier.Value]; ok { - arguments = append(arguments, p.parseArguments()...) - - node = &BuiltinNode{ - Name: identifier.Value, - Arguments: arguments, - } - node.SetLocation(identifier.Location) - } else { - callee := &IdentifierNode{Value: identifier.Value} - callee.SetLocation(identifier.Location) - - arguments = append(arguments, p.parseArguments()...) - - node = &CallNode{ - Callee: callee, - Arguments: arguments, - } - node.SetLocation(identifier.Location) - } - - return node -} - -func (p *parser) parseArguments() []Node { - p.expect(Bracket, "(") - nodes := make([]Node, 0) - for !p.current.Is(Bracket, ")") && p.err == nil { - if len(nodes) > 0 { - p.expect(Operator, ",") - } - node := p.parseExpression(0) - nodes = append(nodes, node) - } - p.expect(Bracket, ")") - - return nodes -} diff --git a/vendor/github.com/expr-lang/expr/patcher/with_context.go b/vendor/github.com/expr-lang/expr/patcher/with_context.go index 85f095d5dc1..55b6042614f 100644 --- a/vendor/github.com/expr-lang/expr/patcher/with_context.go +++ b/vendor/github.com/expr-lang/expr/patcher/with_context.go @@ -13,10 +13,12 @@ type WithContext struct { // Visit adds WithContext.Name argument to all functions calls with a context.Context argument. func (w WithContext) Visit(node *ast.Node) { - switch (*node).(type) { + switch call := (*node).(type) { case *ast.CallNode: - call := (*node).(*ast.CallNode) fn := call.Callee.Type() + if fn == nil { + return + } if fn.Kind() != reflect.Func { return } diff --git a/vendor/github.com/expr-lang/expr/vm/debug.go b/vendor/github.com/expr-lang/expr/vm/debug.go new file mode 100644 index 00000000000..ab95bf9a02d --- /dev/null +++ b/vendor/github.com/expr-lang/expr/vm/debug.go @@ -0,0 +1,5 @@ +//go:build expr_debug + +package vm + +const debug = true diff --git a/vendor/github.com/expr-lang/expr/vm/debug_off.go b/vendor/github.com/expr-lang/expr/vm/debug_off.go new file mode 100644 index 00000000000..e0f2955a125 --- /dev/null +++ b/vendor/github.com/expr-lang/expr/vm/debug_off.go @@ -0,0 +1,5 @@ +//go:build !expr_debug + +package vm + +const debug = false diff --git a/vendor/github.com/expr-lang/expr/vm/generated.go b/vendor/github.com/expr-lang/expr/vm/func_types[generated].go similarity index 73% rename from vendor/github.com/expr-lang/expr/vm/generated.go rename to vendor/github.com/expr-lang/expr/vm/func_types[generated].go index 1a0b3657189..1375f22f2ad 100644 --- a/vendor/github.com/expr-lang/expr/vm/generated.go +++ b/vendor/github.com/expr-lang/expr/vm/func_types[generated].go @@ -13,14 +13,14 @@ var FuncTypes = []any{ 3: new(func() time.Time), 4: new(func() time.Weekday), 5: new(func() []uint8), - 6: new(func() []any), + 6: new(func() []interface{}), 7: new(func() bool), 8: new(func() uint8), 9: new(func() float64), 10: new(func() int), 11: new(func() int64), - 12: new(func() any), - 13: new(func() map[string]any), + 12: new(func() interface{}), + 13: new(func() map[string]interface{}), 14: new(func() int32), 15: new(func() string), 16: new(func() uint), @@ -29,7 +29,7 @@ var FuncTypes = []any{ 19: new(func(time.Duration) time.Time), 20: new(func(time.Time) time.Duration), 21: new(func(time.Time) bool), - 22: new(func([]any, string) string), + 22: new(func([]interface{}, string) string), 23: new(func([]string, string) string), 24: new(func(bool) bool), 25: new(func(bool) float64), @@ -56,20 +56,20 @@ var FuncTypes = []any{ 46: new(func(string, int32) int), 47: new(func(string, string) bool), 48: new(func(string, string) string), - 49: new(func(any) bool), - 50: new(func(any) float64), - 51: new(func(any) int), - 52: new(func(any) string), - 53: new(func(any) any), - 54: new(func(any) []any), - 55: new(func(any) map[string]any), - 56: new(func([]any) any), - 57: new(func([]any) []any), - 58: new(func([]any) map[string]any), - 59: new(func(any, any) bool), - 60: new(func(any, any) string), - 61: new(func(any, any) any), - 62: new(func(any, any) []any), + 49: new(func(interface{}) bool), + 50: new(func(interface{}) float64), + 51: new(func(interface{}) int), + 52: new(func(interface{}) string), + 53: new(func(interface{}) interface{}), + 54: new(func(interface{}) []interface{}), + 55: new(func(interface{}) map[string]interface{}), + 56: new(func([]interface{}) interface{}), + 57: new(func([]interface{}) []interface{}), + 58: new(func([]interface{}) map[string]interface{}), + 59: new(func(interface{}, interface{}) bool), + 60: new(func(interface{}, interface{}) string), + 61: new(func(interface{}, interface{}) interface{}), + 62: new(func(interface{}, interface{}) []interface{}), } func (vm *VM) call(fn any, kind int) any { @@ -85,7 +85,7 @@ func (vm *VM) call(fn any, kind int) any { case 5: return fn.(func() []uint8)() case 6: - return fn.(func() []any)() + return fn.(func() []interface{})() case 7: return fn.(func() bool)() case 8: @@ -97,9 +97,9 @@ func (vm *VM) call(fn any, kind int) any { case 11: return fn.(func() int64)() case 12: - return fn.(func() any)() + return fn.(func() interface{})() case 13: - return fn.(func() map[string]any)() + return fn.(func() map[string]interface{})() case 14: return fn.(func() int32)() case 15: @@ -122,8 +122,8 @@ func (vm *VM) call(fn any, kind int) any { return fn.(func(time.Time) bool)(arg1) case 22: arg2 := vm.pop().(string) - arg1 := vm.pop().([]any) - return fn.(func([]any, string) string)(arg1, arg2) + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}, string) string)(arg1, arg2) case 23: arg2 := vm.pop().(string) arg1 := vm.pop().([]string) @@ -212,50 +212,50 @@ func (vm *VM) call(fn any, kind int) any { return fn.(func(string, string) string)(arg1, arg2) case 49: arg1 := vm.pop() - return fn.(func(any) bool)(arg1) + return fn.(func(interface{}) bool)(arg1) case 50: arg1 := vm.pop() - return fn.(func(any) float64)(arg1) + return fn.(func(interface{}) float64)(arg1) case 51: arg1 := vm.pop() - return fn.(func(any) int)(arg1) + return fn.(func(interface{}) int)(arg1) case 52: arg1 := vm.pop() - return fn.(func(any) string)(arg1) + return fn.(func(interface{}) string)(arg1) case 53: arg1 := vm.pop() - return fn.(func(any) any)(arg1) + return fn.(func(interface{}) interface{})(arg1) case 54: arg1 := vm.pop() - return fn.(func(any) []any)(arg1) + return fn.(func(interface{}) []interface{})(arg1) case 55: arg1 := vm.pop() - return fn.(func(any) map[string]any)(arg1) + return fn.(func(interface{}) map[string]interface{})(arg1) case 56: - arg1 := vm.pop().([]any) - return fn.(func([]any) any)(arg1) + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}) interface{})(arg1) case 57: - arg1 := vm.pop().([]any) - return fn.(func([]any) []any)(arg1) + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}) []interface{})(arg1) case 58: - arg1 := vm.pop().([]any) - return fn.(func([]any) map[string]any)(arg1) + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}) map[string]interface{})(arg1) case 59: arg2 := vm.pop() arg1 := vm.pop() - return fn.(func(any, any) bool)(arg1, arg2) + return fn.(func(interface{}, interface{}) bool)(arg1, arg2) case 60: arg2 := vm.pop() arg1 := vm.pop() - return fn.(func(any, any) string)(arg1, arg2) + return fn.(func(interface{}, interface{}) string)(arg1, arg2) case 61: arg2 := vm.pop() arg1 := vm.pop() - return fn.(func(any, any) any)(arg1, arg2) + return fn.(func(interface{}, interface{}) interface{})(arg1, arg2) case 62: arg2 := vm.pop() arg1 := vm.pop() - return fn.(func(any, any) []any)(arg1, arg2) + return fn.(func(interface{}, interface{}) []interface{})(arg1, arg2) } panic(fmt.Sprintf("unknown function kind (%v)", kind)) diff --git a/vendor/github.com/expr-lang/expr/vm/program.go b/vendor/github.com/expr-lang/expr/vm/program.go index 085b18a7fd5..041cab51d93 100644 --- a/vendor/github.com/expr-lang/expr/vm/program.go +++ b/vendor/github.com/expr-lang/expr/vm/program.go @@ -9,6 +9,7 @@ import ( "strings" "text/tabwriter" + "github.com/expr-lang/expr/ast" "github.com/expr-lang/expr/builtin" "github.com/expr-lang/expr/file" "github.com/expr-lang/expr/vm/runtime" @@ -21,8 +22,9 @@ type Program struct { Constants []any source *file.Source + node ast.Node locations []file.Location - variables []any + variables int functions []Function debugInfo map[string]string } @@ -30,8 +32,9 @@ type Program struct { // NewProgram returns a new Program. It's used by the compiler. func NewProgram( source *file.Source, + node ast.Node, locations []file.Location, - variables []any, + variables int, constants []any, bytecode []Opcode, arguments []int, @@ -40,6 +43,7 @@ func NewProgram( ) *Program { return &Program{ source: source, + node: node, locations: locations, variables: variables, Constants: constants, @@ -50,6 +54,16 @@ func NewProgram( } } +// Source returns origin file.Source. +func (program *Program) Source() *file.Source { + return program.source +} + +// Node returns origin ast.Node. +func (program *Program) Node() ast.Node { + return program.node +} + // Disassemble returns opcodes as a string. func (program *Program) Disassemble() string { var buf bytes.Buffer @@ -346,8 +360,3 @@ func (program *Program) DisassembleWriter(w io.Writer) { } } } - -// Source returns origin file.Source. -func (program *Program) Source() *file.Source { - return program.source -} diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/generated.go b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go similarity index 99% rename from vendor/github.com/expr-lang/expr/vm/runtime/generated.go rename to vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go index a5b92f47780..720feb45543 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/generated.go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go @@ -3372,4 +3372,4 @@ func Modulo(a, b interface{}) int { } } panic(fmt.Sprintf("invalid operation: %T %% %T", a, b)) -} \ No newline at end of file +} diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go index 406f8509631..09c58bdf7aa 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go @@ -1,6 +1,6 @@ package runtime -//go:generate sh -c "go run ./helpers > ./generated.go" +//go:generate sh -c "go run ./helpers > ./helpers[generated].go" import ( "fmt" diff --git a/vendor/github.com/expr-lang/expr/vm/utils.go b/vendor/github.com/expr-lang/expr/vm/utils.go new file mode 100644 index 00000000000..f9ec81a8c75 --- /dev/null +++ b/vendor/github.com/expr-lang/expr/vm/utils.go @@ -0,0 +1,11 @@ +package vm + +import ( + "reflect" +) + +type Function = func(params ...any) (any, error) + +var MemoryBudget uint = 1e6 + +var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/github.com/expr-lang/expr/vm/vm.go b/vendor/github.com/expr-lang/expr/vm/vm.go index fa2c7589983..0bde6663426 100644 --- a/vendor/github.com/expr-lang/expr/vm/vm.go +++ b/vendor/github.com/expr-lang/expr/vm/vm.go @@ -1,6 +1,6 @@ package vm -//go:generate sh -c "go run ./func_types > ./generated.go" +//go:generate sh -c "go run ./func_types > ./func_types[generated].go" import ( "fmt" @@ -13,11 +13,6 @@ import ( "github.com/expr-lang/expr/vm/runtime" ) -var MemoryBudget uint = 1e6 -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -type Function = func(params ...any) (any, error) - func Run(program *Program, env any) (any, error) { if program == nil { return nil, fmt.Errorf("program is nil") @@ -27,15 +22,25 @@ func Run(program *Program, env any) (any, error) { return vm.Run(program, env) } +func Debug() *VM { + vm := &VM{ + debug: true, + step: make(chan struct{}, 0), + curr: make(chan int, 0), + } + return vm +} + type VM struct { - stack []any + Stack []any + Scopes []*Scope + Variables []any ip int - scopes []*Scope + memory uint + memoryBudget uint debug bool step chan struct{} curr chan int - memory uint - memoryBudget uint } type Scope struct { @@ -47,15 +52,6 @@ type Scope struct { Acc any } -func Debug() *VM { - vm := &VM{ - debug: true, - step: make(chan struct{}, 0), - curr: make(chan int, 0), - } - return vm -} - func (vm *VM) Run(program *Program, env any) (_ any, err error) { defer func() { if r := recover(); r != nil { @@ -74,14 +70,16 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { } }() - if vm.stack == nil { - vm.stack = make([]any, 0, 2) + if vm.Stack == nil { + vm.Stack = make([]any, 0, 2) } else { - vm.stack = vm.stack[0:0] + vm.Stack = vm.Stack[0:0] } - - if vm.scopes != nil { - vm.scopes = vm.scopes[0:0] + if vm.Scopes != nil { + vm.Scopes = vm.Scopes[0:0] + } + if len(vm.Variables) < program.variables { + vm.Variables = make([]any, program.variables) } vm.memoryBudget = MemoryBudget @@ -89,7 +87,7 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { vm.ip = 0 for vm.ip < len(program.Bytecode) { - if vm.debug { + if debug && vm.debug { <-vm.step } @@ -112,10 +110,10 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { vm.pop() case OpStore: - program.variables[arg] = vm.pop() + vm.Variables[arg] = vm.pop() case OpLoadVar: - vm.push(program.variables[arg]) + vm.push(vm.Variables[arg]) case OpLoadConst: vm.push(runtime.Fetch(env, program.Constants[arg])) @@ -204,7 +202,7 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { } case OpJumpIfEnd: - scope := vm.Scope() + scope := vm.scope() if scope.Index >= scope.Len { vm.ip += arg } @@ -399,7 +397,7 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { case OpValidateArgs: fn := vm.pop().(Function) - mem, err := fn(vm.stack[len(vm.stack)-arg:]...) + mem, err := fn(vm.Stack[len(vm.Stack)-arg:]...) if err != nil { panic(err) } @@ -443,49 +441,49 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { vm.push(runtime.Deref(a)) case OpIncrementIndex: - vm.Scope().Index++ + vm.scope().Index++ case OpDecrementIndex: - scope := vm.Scope() + scope := vm.scope() scope.Index-- case OpIncrementCount: - scope := vm.Scope() + scope := vm.scope() scope.Count++ case OpGetIndex: - vm.push(vm.Scope().Index) + vm.push(vm.scope().Index) case OpSetIndex: - scope := vm.Scope() + scope := vm.scope() scope.Index = vm.pop().(int) case OpGetCount: - scope := vm.Scope() + scope := vm.scope() vm.push(scope.Count) case OpGetLen: - scope := vm.Scope() + scope := vm.scope() vm.push(scope.Len) case OpGetGroupBy: - vm.push(vm.Scope().GroupBy) + vm.push(vm.scope().GroupBy) case OpGetAcc: - vm.push(vm.Scope().Acc) + vm.push(vm.scope().Acc) case OpSetAcc: - vm.Scope().Acc = vm.pop() + vm.scope().Acc = vm.pop() case OpPointer: - scope := vm.Scope() + scope := vm.scope() vm.push(scope.Array.Index(scope.Index).Interface()) case OpThrow: panic(vm.pop().(error)) case OpGroupBy: - scope := vm.Scope() + scope := vm.scope() if scope.GroupBy == nil { scope.GroupBy = make(map[any][]any) } @@ -496,29 +494,29 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { case OpBegin: a := vm.pop() array := reflect.ValueOf(a) - vm.scopes = append(vm.scopes, &Scope{ + vm.Scopes = append(vm.Scopes, &Scope{ Array: array, Len: array.Len(), }) case OpEnd: - vm.scopes = vm.scopes[:len(vm.scopes)-1] + vm.Scopes = vm.Scopes[:len(vm.Scopes)-1] default: panic(fmt.Sprintf("unknown bytecode %#x", op)) } - if vm.debug { + if debug && vm.debug { vm.curr <- vm.ip } } - if vm.debug { + if debug && vm.debug { close(vm.curr) close(vm.step) } - if len(vm.stack) > 0 { + if len(vm.Stack) > 0 { return vm.pop(), nil } @@ -526,16 +524,16 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { } func (vm *VM) push(value any) { - vm.stack = append(vm.stack, value) + vm.Stack = append(vm.Stack, value) } func (vm *VM) current() any { - return vm.stack[len(vm.stack)-1] + return vm.Stack[len(vm.Stack)-1] } func (vm *VM) pop() any { - value := vm.stack[len(vm.stack)-1] - vm.stack = vm.stack[:len(vm.stack)-1] + value := vm.Stack[len(vm.Stack)-1] + vm.Stack = vm.Stack[:len(vm.Stack)-1] return value } @@ -546,15 +544,8 @@ func (vm *VM) memGrow(size uint) { } } -func (vm *VM) Stack() []any { - return vm.stack -} - -func (vm *VM) Scope() *Scope { - if len(vm.scopes) > 0 { - return vm.scopes[len(vm.scopes)-1] - } - return nil +func (vm *VM) scope() *Scope { + return vm.Scopes[len(vm.Scopes)-1] } func (vm *VM) Step() { diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829dc64..7ec5ac7ea90 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec27..dc60082d3b3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb689..3167b643d45 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/hashicorp/vault/api/.copywrite.hcl b/vendor/github.com/hashicorp/vault/api/.copywrite.hcl new file mode 100644 index 00000000000..c4b09f33640 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index 1ba9da48eae..52c991b1e2f 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -82,6 +82,8 @@ const ( const ( EnvVaultAgentAddress = "VAULT_AGENT_ADDR" EnvVaultInsecure = "VAULT_SKIP_VERIFY" + + DefaultAddress = "https://127.0.0.1:8200" ) // WrappingLookupFunc is a function that, given an HTTP verb and a path, @@ -248,7 +250,7 @@ type TLSConfig struct { // If an error is encountered, the Error field on the returned *Config will be populated with the specific error. func DefaultConfig() *Config { config := &Config{ - Address: "https://127.0.0.1:8200", + Address: DefaultAddress, HttpClient: cleanhttp.DefaultPooledClient(), Timeout: time.Second * 60, MinRetryWait: time.Millisecond * 1000, @@ -528,6 +530,7 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) { return nil, err } + previousAddress := c.Address c.Address = address if strings.HasPrefix(address, "unix://") { @@ -550,7 +553,7 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) { } else { return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") } - } else if strings.HasPrefix(c.Address, "unix://") { + } else if strings.HasPrefix(previousAddress, "unix://") { // When the address being set does not begin with unix:// but the previous // address in the Config did, change the transport's DialContext back to // use the default configuration that cleanhttp uses. @@ -589,6 +592,7 @@ type Client struct { requestCallbacks []RequestCallback responseCallbacks []ResponseCallback replicationStateStore *replicationStateStore + hcpCookie *http.Cookie } // NewClient returns a new client for the given configuration. @@ -1025,6 +1029,33 @@ func (c *Client) SetToken(v string) { c.token = v } +// HCPCookie returns the HCP cookie being used by this client. It will +// return an empty cookie when no cookie is set. +func (c *Client) HCPCookie() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.hcpCookie == nil { + return "" + } + return c.hcpCookie.String() +} + +// SetHCPCookie sets the hcp cookie directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetHCPCookie(v *http.Cookie) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if err := v.Valid(); err != nil { + return err + } + + c.hcpCookie = v + + return nil +} + // ClearToken deletes the token if it is set or does nothing otherwise. func (c *Client) ClearToken() { c.modifyLock.Lock() @@ -1299,6 +1330,8 @@ func (c *Client) NewRequest(method, requestPath string) *Request { Params: make(map[string][]string), } + req.HCPCookie = c.hcpCookie + var lookupPath string switch { case strings.HasPrefix(requestPath, "/v1/"): diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index 927dd168e44..068e9068f38 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -212,6 +212,17 @@ func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[st return c.write(ctx, path, r) } +func (c *Logical) WriteRaw(path string, data []byte) (*Response, error) { + return c.WriteRawWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteRawWithContext(ctx context.Context, path string, data []byte) (*Response, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + r.BodyBytes = data + + return c.writeRaw(ctx, r) +} + func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) r.Headers.Set("Content-Type", "application/merge-patch+json") @@ -261,6 +272,14 @@ func (c *Logical) write(ctx context.Context, path string, request *Request) (*Se return ParseSecret(resp.Body) } +func (c *Logical) writeRaw(ctx context.Context, request *Request) (*Response, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) + return resp, err +} + func (c *Logical) Delete(path string) (*Secret, error) { return c.DeleteWithContext(context.Background(), path) } diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go index a8d23252977..3705c7310a8 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -51,6 +51,7 @@ type PluginAPIClientMeta struct { flagCAPath string flagClientCert string flagClientKey string + flagServerName string flagInsecure bool } @@ -62,6 +63,7 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { fs.StringVar(&f.flagCAPath, "ca-path", "", "") fs.StringVar(&f.flagClientCert, "client-cert", "", "") fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.StringVar(&f.flagServerName, "tls-server-name", "", "") fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") return fs @@ -70,13 +72,13 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { // GetTLSConfig will return a TLSConfig based off the values from the flags func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { // If we need custom TLS configuration, then set it - if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure || f.flagServerName != "" { t := &TLSConfig{ CACert: f.flagCACert, CAPath: f.flagCAPath, ClientCert: f.flagClientCert, ClientKey: f.flagClientKey, - TLSServerName: "", + TLSServerName: f.flagServerName, Insecure: f.flagInsecure, } diff --git a/vendor/github.com/hashicorp/vault/api/plugin_types.go b/vendor/github.com/hashicorp/vault/api/plugin_types.go index 4c759a2decc..c8f69ae404f 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_types.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_types.go @@ -7,7 +7,10 @@ package api // https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go // Any changes made should be made to both files at the same time. -import "fmt" +import ( + "encoding/json" + "fmt" +) var PluginTypes = []PluginType{ PluginTypeUnknown, @@ -64,3 +67,34 @@ func ParsePluginType(pluginType string) (PluginType, error) { return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) } } + +// UnmarshalJSON implements json.Unmarshaler. It supports unmarshaling either a +// string or a uint32. All new serialization will be as a string, but we +// previously serialized as a uint32 so we need to support that for backwards +// compatibility. +func (p *PluginType) UnmarshalJSON(data []byte) error { + var asString string + err := json.Unmarshal(data, &asString) + if err == nil { + *p, err = ParsePluginType(asString) + return err + } + + var asUint32 uint32 + err = json.Unmarshal(data, &asUint32) + if err != nil { + return err + } + *p = PluginType(asUint32) + switch *p { + case PluginTypeUnknown, PluginTypeCredential, PluginTypeDatabase, PluginTypeSecrets: + return nil + default: + return fmt.Errorf("%d is not a supported plugin type", asUint32) + } +} + +// MarshalJSON implements json.Marshaler. +func (p PluginType) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} diff --git a/vendor/github.com/hashicorp/vault/api/replication_status.go b/vendor/github.com/hashicorp/vault/api/replication_status.go index 1668daf19c1..9bc02d53935 100644 --- a/vendor/github.com/hashicorp/vault/api/replication_status.go +++ b/vendor/github.com/hashicorp/vault/api/replication_status.go @@ -19,11 +19,13 @@ const ( ) type ClusterInfo struct { - APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"` - ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"` - ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"` - LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"` - NodeID string `json:"node_id,omitempty" mapstructure:"node_id"` + APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"` + ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"` + ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"` + LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"` + LastHeartBeatDurationMillis string `json:"last_heartbeat_duration_ms,omitempty" mapstructure:"last_heartbeat_duration_ms"` + ClockSkewMillis string `json:"clock_skew_ms,omitempty" mapstructure:"clock_skew_ms"` + NodeID string `json:"node_id,omitempty" mapstructure:"node_id"` } type ReplicationStatusGenericResponse struct { diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go index ecf783701ad..a2d912c64dc 100644 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -39,6 +39,9 @@ type Request struct { // EGPs). If set, the override flag will take effect for all policies // evaluated during the request. PolicyOverride bool + + // HCPCookie is used to set a http cookie when client is connected to HCP + HCPCookie *http.Cookie } // SetJSONBody is used to set a request body that is a JSON-encoded value. @@ -145,5 +148,9 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { req.Header.Set("X-Vault-Policy-Override", "true") } + if r.HCPCookie != nil { + req.AddCookie(r.HCPCookie) + } + return req, nil } diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index 3d15f7a806a..d37bf3cf06b 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -42,6 +42,10 @@ type Secret struct { // cubbyhole of the given token (which has a TTL of the given number of // seconds) WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` + + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `json:"mount_type,omitempty"` } // TokenID returns the standardized token ID (token) for the given secret. diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go index 6310d42fcf4..d57b7571175 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -78,3 +78,56 @@ func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ( return res, nil } + +func (c *Sys) CapabilitiesAccessor(accessor, path string) ([]string, error) { + return c.CapabilitiesAccessorWithContext(context.Background(), accessor, path) +} + +func (c *Sys) CapabilitiesAccessorWithContext(ctx context.Context, accessor, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "accessor": accessor, + "path": path, + } + + reqPath := "/v1/sys/capabilities-accessor" + + r := c.c.NewRequest(http.MethodPost, reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go index 2b2aa7c3e98..58a73b89cbb 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go +++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go @@ -35,12 +35,14 @@ type HAStatusResponse struct { } type HANode struct { - Hostname string `json:"hostname"` - APIAddress string `json:"api_address"` - ClusterAddress string `json:"cluster_address"` - ActiveNode bool `json:"active_node"` - LastEcho *time.Time `json:"last_echo"` - Version string `json:"version"` - UpgradeVersion string `json:"upgrade_version,omitempty"` - RedundancyZone string `json:"redundancy_zone,omitempty"` + Hostname string `json:"hostname"` + APIAddress string `json:"api_address"` + ClusterAddress string `json:"cluster_address"` + ActiveNode bool `json:"active_node"` + LastEcho *time.Time `json:"last_echo"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go index 13fd8d4d374..0dc849885ff 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_health.go +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -49,4 +49,7 @@ type HealthResponse struct { ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` LastWAL uint64 `json:"last_wal,omitempty"` + Enterprise bool `json:"enterprise"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go index a6c2a0f5412..b9f4f8f6f83 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -271,6 +271,9 @@ type MountConfigInput struct { AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` PluginVersion string `json:"plugin_version,omitempty"` UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` + DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"` + IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } @@ -303,6 +306,9 @@ type MountConfigOutput struct { TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` + DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"` + IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go index 68320d2d8a2..9d424d009ec 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -36,6 +36,8 @@ type ListPluginsResponse struct { type PluginDetails struct { Type string `json:"type"` Name string `json:"name"` + OCIImage string `json:"oci_image,omitempty" mapstructure:"oci_image"` + Runtime string `json:"runtime,omitempty"` Version string `json:"version,omitempty"` Builtin bool `json:"builtin"` DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` @@ -144,9 +146,10 @@ type GetPluginResponse struct { Args []string `json:"args"` Builtin bool `json:"builtin"` Command string `json:"command"` - OCIImage string `json:"oci_image"` Name string `json:"name"` SHA256 string `json:"sha256"` + OCIImage string `json:"oci_image,omitempty"` + Runtime string `json:"runtime,omitempty"` DeprecationStatus string `json:"deprecation_status,omitempty"` Version string `json:"version,omitempty"` } @@ -206,6 +209,9 @@ type RegisterPluginInput struct { // OCIImage specifies the container image to run as a plugin. OCIImage string `json:"oci_image,omitempty"` + // Runtime is the Vault plugin runtime to use when running the plugin. + Runtime string `json:"runtime,omitempty"` + // Env specifies a list of key=value pairs to add to the plugin's environment // variables. Env []string `json:"env,omitempty"` @@ -268,6 +274,22 @@ func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPlug return err } +// RootReloadPluginInput is used as input to the RootReloadPlugin function. +type RootReloadPluginInput struct { + Plugin string `json:"-"` // Plugin name, as registered in the plugin catalog. + Type PluginType `json:"-"` // Plugin type: auth, secret, or database. + Scope string `json:"scope,omitempty"` // Empty to reload on current node, "global" for all nodes. +} + +// RootReloadPlugin reloads plugins, possibly returning reloadID for a global +// scoped reload. This is only available in the root namespace, and reloads +// plugins across all namespaces, whereas ReloadPlugin is available in all +// namespaces but only reloads plugins in use in the request's namespace. +func (c *Sys) RootReloadPlugin(ctx context.Context, i *RootReloadPluginInput) (string, error) { + path := fmt.Sprintf("/v1/sys/plugins/reload/%s/%s", i.Type.String(), i.Plugin) + return c.reloadPluginInternal(ctx, path, i, i.Scope == "global") +} + // ReloadPluginInput is used as input to the ReloadPlugin function. type ReloadPluginInput struct { // Plugin is the name of the plugin to reload, as registered in the plugin catalog @@ -286,15 +308,20 @@ func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { } // ReloadPluginWithContext reloads mounted plugin backends, possibly returning -// reloadId for a cluster scoped reload +// reloadID for a cluster scoped reload. It is limited to reloading plugins that +// are in use in the request's namespace. See RootReloadPlugin for an API that +// can reload plugins across all namespaces. func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + return c.reloadPluginInternal(ctx, "/v1/sys/plugins/reload/backend", i, i.Scope == "global") +} + +func (c *Sys) reloadPluginInternal(ctx context.Context, path string, body any, global bool) (string, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - path := "/v1/sys/plugins/reload/backend" req := c.c.NewRequest(http.MethodPut, path) - if err := req.SetJSONBody(i); err != nil { + if err := req.SetJSONBody(body); err != nil { return "", err } @@ -304,7 +331,7 @@ func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) } defer resp.Body.Close() - if i.Scope == "global" { + if global { // Get the reload id secret, parseErr := ParseSecret(resp.Body) if parseErr != nil { diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go b/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go index c3380a85d1b..b56a899f650 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go @@ -64,8 +64,9 @@ type RegisterPluginRuntimeInput struct { OCIRuntime string `json:"oci_runtime,omitempty"` CgroupParent string `json:"cgroup_parent,omitempty"` - CPU int64 `json:"cpu,omitempty"` - Memory int64 `json:"memory,omitempty"` + CPU int64 `json:"cpu_nanos,omitempty"` + Memory int64 `json:"memory_bytes,omitempty"` + Rootless bool `json:"rootless,omitempty"` } // RegisterPluginRuntime registers the plugin with the given information. diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go index 7a9c5621ed1..62002496c36 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -109,6 +109,7 @@ type SealStatusResponse struct { ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` RecoverySeal bool `json:"recovery_seal"` + RecoverySealType string `json:"recovery_seal_type,omitempty"` StorageType string `json:"storage_type,omitempty"` HCPLinkStatus string `json:"hcp_link_status,omitempty"` HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` diff --git a/vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go b/vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go new file mode 100644 index 00000000000..a129efea763 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" +) + +const ( + // baseEndpoint is the common base URL path for all endpoints used in this + // module. + baseEndpoint string = "/v1/sys/config/ui/custom-messages" +) + +// ListUICustomMessages calls ListUICustomMessagesWithContext using a background +// Context. +func (c *Sys) ListUICustomMessages(req UICustomMessageListRequest) (*Secret, error) { + return c.ListUICustomMessagesWithContext(context.Background(), req) +} + +// ListUICustomMessagesWithContext sends a request to the List custom messages +// endpoint using the provided Context and UICustomMessageListRequest value as +// the inputs. It returns a pointer to a Secret if a response was obtained from +// the server, including error responses; or an error if a response could not be +// obtained due to an error. +func (c *Sys) ListUICustomMessagesWithContext(ctx context.Context, req UICustomMessageListRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", fmt.Sprintf("%s/", baseEndpoint)) + if req.Active != nil { + r.Params.Add("active", strconv.FormatBool(*req.Active)) + } + if req.Authenticated != nil { + r.Params.Add("authenticated", strconv.FormatBool(*req.Authenticated)) + } + if req.Type != nil { + r.Params.Add("type", *req.Type) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// CreateUICustomMessage calls CreateUICustomMessageWithContext using a +// background Context. +func (c *Sys) CreateUICustomMessage(req UICustomMessageRequest) (*Secret, error) { + return c.CreateUICustomMessageWithContext(context.Background(), req) +} + +// CreateUICustomMessageWithContext sends a request to the Create custom +// messages endpoint using the provided Context and UICustomMessageRequest +// values as the inputs. It returns a pointer to a Secret if a response was +// obtained from the server, including error responses; or an error if a +// response could not be obtained due to an error. +func (c *Sys) CreateUICustomMessageWithContext(ctx context.Context, req UICustomMessageRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, baseEndpoint) + if err := r.SetJSONBody(&req); err != nil { + return nil, fmt.Errorf("error encoding request body to json: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("could not parse secret from server response: %w", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// ReadUICustomMessage calls ReadUICustomMessageWithContext using a background +// Context. +func (c *Sys) ReadUICustomMessage(id string) (*Secret, error) { + return c.ReadUICustomMessageWithContext(context.Background(), id) +} + +// ReadUICustomMessageWithContext sends a request to the Read custom message +// endpoint using the provided Context and id values. It returns a pointer to a +// Secret if a response was obtained from the server, including error responses; +// or an error if a response could not be obtained due to an error. +func (c *Sys) ReadUICustomMessageWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", baseEndpoint, id)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("could not parse secret from server response: %w", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// UpdateUICustomMessage calls UpdateUICustomMessageWithContext using a +// background Context. +func (c *Sys) UpdateUICustomMessage(id string, req UICustomMessageRequest) error { + return c.UpdateUICustomMessageWithContext(context.Background(), id, req) +} + +// UpdateUICustomMessageWithContext sends a request to the Update custom message +// endpoint using the provided Context, id, and UICustomMessageRequest values. +// It returns a pointer to a Secret if a response was obtained from the server, +// including error responses; or an error if a response could not be obtained +// due to an error. +func (c *Sys) UpdateUICustomMessageWithContext(ctx context.Context, id string, req UICustomMessageRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("%s/%s", baseEndpoint, id)) + if err := r.SetJSONBody(&req); err != nil { + return fmt.Errorf("error encoding request body to json: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// DeleteUICustomMessage calls DeleteUICustomMessageWithContext using a +// background Context. +func (c *Sys) DeleteUICustomMessage(id string) error { + return c.DeletePolicyWithContext(context.Background(), id) +} + +// DeleteUICustomMessageWithContext sends a request to the Delete custom message +// endpoint using the provided Context and id values. It returns a pointer to a +// Secret if a response was obtained from the server, including error responses; +// or an error if a response could not be obtained due to an error. +func (c *Sys) DeleteUICustomMessageWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("%s/%s", baseEndpoint, id)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// UICustomMessageListRequest is a struct used to contain inputs for the List +// custom messages request. Each field is optional, so their types are pointers. +// The With... methods can be used to easily set the fields with pointers to +// values. +type UICustomMessageListRequest struct { + Authenticated *bool + Type *string + Active *bool +} + +// WithAuthenticated sets the Authenticated field to a pointer referencing the +// provided bool value. +func (r *UICustomMessageListRequest) WithAuthenticated(value bool) *UICustomMessageListRequest { + r.Authenticated = &value + + return r +} + +// WithType sets the Type field to a pointer referencing the provided string +// value. +func (r *UICustomMessageListRequest) WithType(value string) *UICustomMessageListRequest { + r.Type = &value + + return r +} + +// WithActive sets the Active field to a pointer referencing the provided bool +// value. +func (r *UICustomMessageListRequest) WithActive(value bool) *UICustomMessageListRequest { + r.Active = &value + + return r +} + +// UICustomMessageRequest is a struct containing the properties of a custom +// message. The Link field can be set using the WithLink method. +type UICustomMessageRequest struct { + Title string `json:"title"` + Message string `json:"message"` + Authenticated bool `json:"authenticated"` + Type string `json:"type"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time,omitempty"` + Link *uiCustomMessageLink `json:"link,omitempty"` + Options map[string]any `json:"options,omitempty"` +} + +// WithLink sets the Link field to the address of a new uiCustomMessageLink +// struct constructed from the provided title and href values. +func (r *UICustomMessageRequest) WithLink(title, href string) *UICustomMessageRequest { + r.Link = &uiCustomMessageLink{ + Title: title, + Href: href, + } + + return r +} + +// uiCustomMessageLink is a utility struct used to represent a link associated +// with a custom message. +type uiCustomMessageLink struct { + Title string + Href string +} + +// MarshalJSON encodes the state of the receiver uiCustomMessageLink as JSON and +// returns those encoded bytes or an error. +func (l uiCustomMessageLink) MarshalJSON() ([]byte, error) { + m := make(map[string]string) + + m[l.Title] = l.Href + + return json.Marshal(m) +} + +// UnmarshalJSON updates the state of the receiver uiCustomMessageLink from the +// provided JSON encoded bytes. It returns an error if there was a failure. +func (l *uiCustomMessageLink) UnmarshalJSON(b []byte) error { + m := make(map[string]string) + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + for k, v := range m { + l.Title = k + l.Href = v + break + } + + return nil +} diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fece58b1110..9a14b81517e 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,14 @@ +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + ## 1.31.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 4f7ab2791b4..5b46a165815 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.31.0" +const GOMEGA_VERSION = "1.31.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb91949..4e3897858c7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 52be9f964f6..4858a08bfe3 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.11" + Version = "3.5.12" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go index f4492009d6c..b314e068fea 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go @@ -25,18 +25,24 @@ import ( ) func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true) } func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { doneC := make(chan struct{}) - errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true) + return doneC, errC +} + +func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false) return doneC, errC } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. // if donec is non-nil, the function closes it to notify its exit. -func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error { if lg == nil { lg = zap.NewNop() } @@ -67,20 +73,25 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval fnames = newfnames for len(newfnames) > int(max) { f := filepath.Join(dirname, newfnames[0]) - l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) - if err != nil { - lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) - break + var l *LockedFile + if flock { + l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) + break + } } if err = os.Remove(f); err != nil { lg.Error("failed to remove file", zap.String("path", f), zap.Error(err)) errC <- err return } - if err = l.Close(); err != nil { - lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) - errC <- err - return + if flock { + if err = l.Close(); err != nil { + lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + errC <- err + return + } } lg.Info("purged", zap.String("path", f)) newfnames = newfnames[1:] diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index e41e6df6186..bbe5d658585 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -43,7 +43,7 @@ type serverHandler struct { *config } -// NewServerHandler creates a stats.Handler for gRPC server. +// NewServerHandler creates a stats.Handler for a gRPC server. func NewServerHandler(opts ...Option) stats.Handler { h := &serverHandler{ config: newConfig(opts, "server"), @@ -54,9 +54,6 @@ func NewServerHandler(opts ...Option) stats.Handler { // TagConn can attach some information to the given context. func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { - span := trace.SpanFromContext(ctx) - attrs := peerAttr(peerFromCtx(ctx)) - span.SetAttributes(attrs...) return ctx } @@ -85,14 +82,15 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont // HandleRPC processes the RPC stats. func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - h.handleRPC(ctx, rs) + isServer := true + h.handleRPC(ctx, rs, isServer) } type clientHandler struct { *config } -// NewClientHandler creates a stats.Handler for gRPC client. +// NewClientHandler creates a stats.Handler for a gRPC client. func NewClientHandler(opts ...Option) stats.Handler { h := &clientHandler{ config: newConfig(opts, "client"), @@ -121,14 +119,12 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont // HandleRPC processes the RPC stats. func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - h.handleRPC(ctx, rs) + isServer := false + h.handleRPC(ctx, rs, isServer) } // TagConn can attach some information to the given context. -func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - span := trace.SpanFromContext(ctx) - attrs := peerAttr(cti.RemoteAddr.String()) - span.SetAttributes(attrs...) +func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { return ctx } @@ -137,20 +133,23 @@ func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { // no-op } -func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { +func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag. span := trace.SpanFromContext(ctx) - gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) + var metricAttrs []attribute.KeyValue var messageId int64 - metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) - metricAttrs = append(metricAttrs, gctx.metricAttrs...) - wctx := withoutCancel(ctx) + + gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) + if gctx != nil { + metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) + metricAttrs = append(metricAttrs, gctx.metricAttrs...) + } switch rs := rs.(type) { case *stats.Begin: case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) } if c.ReceivedEvent { @@ -166,7 +165,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) } if c.SentEvent { @@ -185,7 +184,12 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { if rs.Error != nil { s, _ := status.FromError(rs.Error) - span.SetStatus(codes.Error, s.Message()) + if isServer { + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + } else { + span.SetStatus(codes.Error, s.Message()) + } rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) } else { rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) @@ -198,41 +202,12 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(wctx, elapsedTime, metric.WithAttributes(metricAttrs...)) - c.rpcRequestsPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + if gctx != nil { + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + } default: return } } - -func withoutCancel(parent context.Context) context.Context { - if parent == nil { - panic("cannot create context from nil parent") - } - return withoutCancelCtx{parent} -} - -type withoutCancelCtx struct { - c context.Context -} - -func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (withoutCancelCtx) Done() <-chan struct{} { - return nil -} - -func (withoutCancelCtx) Err() error { - return nil -} - -func (w withoutCancelCtx) Value(key any) any { - return w.c.Value(key) -} - -func (w withoutCancelCtx) String() string { - return "withoutCancel" -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index f47c8a67514..ae1f8ea5e70 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.46.1" + return "0.47.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 9a8260059d9..af84f0e4bb5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -43,10 +43,12 @@ type middleware struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - counters map[string]metric.Int64Counter - valueRecorders map[string]metric.Float64Histogram publicEndpoint bool publicEndpointFn func(*http.Request) bool + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -104,33 +106,27 @@ func handleErr(err error) { } func (h *middleware) createMeasures() { - h.counters = make(map[string]metric.Int64Counter) - h.valueRecorders = make(map[string]metric.Float64Histogram) - - requestBytesCounter, err := h.meter.Int64Counter( + var err error + h.requestBytesCounter, err = h.meter.Int64Counter( RequestContentLength, metric.WithUnit("By"), metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), ) handleErr(err) - responseBytesCounter, err := h.meter.Int64Counter( + h.responseBytesCounter, err = h.meter.Int64Counter( ResponseContentLength, metric.WithUnit("By"), metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), ) handleErr(err) - serverLatencyMeasure, err := h.meter.Float64Histogram( + h.serverLatencyMeasure, err = h.meter.Float64Histogram( ServerLatency, metric.WithUnit("ms"), metric.WithDescription("Measures the duration of HTTP request handling"), ) handleErr(err) - - h.counters[RequestContentLength] = requestBytesCounter - h.counters[ResponseContentLength] = responseBytesCounter - h.valueRecorders[ServerLatency] = serverLatencyMeasure } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -236,13 +232,13 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } o := metric.WithAttributes(attributes...) - h.counters[RequestContentLength].Add(ctx, bw.read, o) - h.counters[ResponseContentLength].Add(ctx, rww.written, o) + h.requestBytesCounter.Add(ctx, bw.read, o) + h.responseBytesCounter.Add(ctx, rww.written, o) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) + h.serverLatencyMeasure.Record(ctx, elapsedTime, o) } func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index d3dede9ebbd..794d4c26a45 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // HTTPClientResponse returns trace attributes for an HTTP response received by a @@ -149,9 +149,7 @@ func HTTPResponseHeader(h http.Header) []attribute.KeyValue { type httpConv struct { NetConv *netConv - EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPResponseContentLengthKey attribute.Key @@ -161,15 +159,13 @@ type httpConv struct { HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key + UserAgentOriginalKey attribute.Key } var hc = &httpConv{ NetConv: nc, - EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, @@ -179,7 +175,7 @@ var hc = &httpConv{ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, - HTTPUserAgentKey: semconv.HTTPUserAgentKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, } // ClientResponse returns attributes for an HTTP response received by a client @@ -193,6 +189,10 @@ var hc = &httpConv{ // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ var n int if resp.StatusCode > 0 { n++ @@ -212,11 +212,31 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { } // ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "user_agent.original". func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ n := 3 // URL, peer name, proto, and method. var h string if req.URL != nil { @@ -234,14 +254,10 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { if req.ContentLength > 0 { n++ } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.flavor(req.Proto)) var u string if req.URL != nil { @@ -260,17 +276,13 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { } if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if l := req.ContentLength; l > 0 { attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) } - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) - } - return attrs } @@ -291,18 +303,35 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. - + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ n := 4 // Method, scheme, proto, and host name. var host string var p int @@ -330,19 +359,31 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K if useragent != "" { n++ } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) if clientIP != "" { n++ } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { @@ -359,17 +400,24 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K } if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) - } - - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if clientIP != "" { attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) } + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + return attrs } @@ -394,14 +442,18 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K // "http.flavor", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port". func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. - - n := 4 // Method, scheme, proto, and host name. + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ + + n := 3 // Method, scheme, and host name. var host string var p int if server == "" { @@ -417,16 +469,29 @@ func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attr if hostPort > 0 { n++ } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.methodMetric(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } return attrs } @@ -455,21 +520,6 @@ func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive return c.HTTPSchemeHTTP } -func (c *httpConv) flavor(proto string) attribute.KeyValue { - switch proto { - case "HTTP/1.0": - return c.HTTPFlavorKey.String("1.0") - case "HTTP/1.1": - return c.HTTPFlavorKey.String("1.1") - case "HTTP/2": - return c.HTTPFlavorKey.String("2.0") - case "HTTP/3": - return c.HTTPFlavorKey.String("3.0") - default: - return c.HTTPFlavorKey.String(proto) - } -} - func serverClientIP(xForwardedFor string) string { if idx := strings.Index(xForwardedFor, ","); idx >= 0 { xForwardedFor = xForwardedFor[:idx] diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index bde8893437d..cb4cb9355dd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -22,7 +22,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // NetTransport returns a trace attribute describing the transport protocol of the @@ -57,6 +57,8 @@ type netConv struct { NetHostPortKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key NetSockFamilyKey attribute.Key NetSockPeerAddrKey attribute.Key NetSockPeerPortKey attribute.Key @@ -73,6 +75,8 @@ var nc = &netConv{ NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, @@ -366,3 +370,9 @@ func splitHostPort(hostport string) (host string, port int) { } return host, int(p) } + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index bd41c180421..9a4a02143d5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.46.1" + return "0.47.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 00000000000..67d1d4c44d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1209 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go new file mode 100644 index 00000000000..359c5a69624 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 00000000000..8ac9350d2b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go new file mode 100644 index 00000000000..09ff4dfdbf7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 00000000000..342aede95f1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 00000000000..a2b906742a8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2071 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go new file mode 100644 index 00000000000..e449e5c3b9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 00000000000..8517741485c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2610 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index 829383f55b5..fbf4ef1c6e1 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -35,6 +35,7 @@ package internal import ( "context" "crypto/tls" + "errors" "net" "net/url" "os" @@ -53,6 +54,12 @@ const ( // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") ) // getClientCertificateSourceAndEndpoint is a convenience function that invokes @@ -67,6 +74,14 @@ func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, if err != nil { return nil, "", err } + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + if settings.Endpoint == "" && !settings.IsUniverseDomainGDU() && settings.DefaultEndpointTemplate != "" { + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + // if settings.DefaultEndpointTemplate == "" { + // return nil, "", errors.New("internaloption.WithDefaultEndpointTemplate is required if option.WithUniverseDomain is not googleapis.com") + // } + endpoint = resolvedDefaultEndpoint(settings) + } return clientCertSource, endpoint, nil } @@ -80,9 +95,7 @@ type transportConfig struct { func getTransportConfig(settings *DialSettings) (*transportConfig, error) { clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) if err != nil { - return &transportConfig{ - clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", - }, err + return nil, err } defaultTransportConfig := transportConfig{ clientCertSource: clientCertSource, @@ -94,12 +107,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - - s2aMTLSEndpoint := settings.DefaultMTLSEndpoint - // If there is endpoint override, honor it. - if settings.Endpoint != "" { - s2aMTLSEndpoint = endpoint + if !settings.IsUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS } + s2aAddress := GetS2AAddress() if s2aAddress == "" { return &defaultTransportConfig, nil @@ -108,7 +119,7 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + s2aMTLSEndpoint: settings.DefaultMTLSEndpoint, }, nil } @@ -153,24 +164,41 @@ func isClientCertificateEnabled() bool { // WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if isMTLS(clientCertSource) { + if !settings.IsUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } return settings.DefaultMTLSEndpoint, nil } - return settings.DefaultEndpoint, nil + return resolvedDefaultEndpoint(settings), nil } if strings.Contains(settings.Endpoint, "://") { // User passed in a full URL path, use it verbatim. return settings.Endpoint, nil } - if settings.DefaultEndpoint == "" { + if resolvedDefaultEndpoint(settings) == "" { // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. return settings.Endpoint, nil } // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) + return mergeEndpoints(resolvedDefaultEndpoint(settings), settings.Endpoint) +} + +func isMTLS(clientCertSource cert.Source) bool { + mtlsMode := getMTLSMode() + return mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) +} + +// resolvedDefaultEndpoint returns the DefaultEndpointTemplate merged with the +// Universe Domain if the DefaultEndpointTemplate is set, otherwise returns the +// deprecated DefaultEndpoint value. +func resolvedDefaultEndpoint(settings *DialSettings) string { + if settings.DefaultEndpointTemplate == "" { + return settings.DefaultEndpoint + } + return strings.Replace(settings.DefaultEndpointTemplate, universeDomainPlaceholder, settings.GetUniverseDomain(), 1) } func getMTLSMode() string { @@ -274,25 +302,15 @@ func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { if !isGoogleS2AEnabled() { return false } - // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A. - if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" { - return false - } - // If MTLS is not enabled for this endpoint, skip S2A. - if !mtlsEndpointEnabledForS2A() { + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" || settings.Endpoint != "" { return false } // If custom HTTP client is provided, skip S2A. if settings.HTTPClient != nil { return false } - return true -} - -// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. -var mtlsEndpointEnabledForS2A = func() bool { - // TODO(xmenxk): determine this via discovery config. - return true + return !settings.EnableDirectPath && !settings.EnableDirectPathXds } func isGoogleS2AEnabled() bool { diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 05165f333b0..b6489309851 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -16,6 +16,7 @@ import ( "time" "golang.org/x/oauth2" + "google.golang.org/api/internal/cert" "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" @@ -90,11 +91,11 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g // Determine configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + clientCertSource, err := getClientCertificateSource(ds) if err != nil { return nil, err } - params.TokenURL = oauth2Endpoint + params.TokenURL = oAuth2Endpoint(clientCertSource) if clientCertSource != nil { tlsConfig := &tls.Config{ GetClientCertificate: clientCertSource, @@ -124,22 +125,37 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g return cred, err } +func oAuth2Endpoint(clientCertSource cert.Source) string { + if isMTLS(clientCertSource) { + return google.MTLSTokenURL + } + return google.Endpoint.TokenURL +} + func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { - if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && - ds.ImpersonationConfig == nil { - // Check if JSON is a service account and if so create a self-signed JWT. - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(data, &f); err != nil { - return false, err - } - return f.Type == serviceAccountKey, nil + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs with scopes. + if !ds.IsUniverseDomainGDU() { + return typeServiceAccount(data) + } + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && ds.ImpersonationConfig == nil { + return typeServiceAccount(data) } return false, nil } +// typeServiceAccount checks if JSON data is for a service account. +func typeServiceAccount(data []byte) (bool, error) { + var f struct { + Type string `json:"type"` + // The remaining JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil +} + func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { // Scopes are preferred in self-signed JWT unless the scope is not available @@ -188,15 +204,6 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * }, nil } -// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. -func oauth2DialSettings(ds *DialSettings) *DialSettings { - var ods DialSettings - ods.DefaultEndpoint = google.Endpoint.TokenURL - ods.DefaultMTLSEndpoint = google.MTLSTokenURL - ods.ClientCertSource = ds.ClientCertSource - return &ods -} - // customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. func customHTTPClient(tlsConfig *tls.Config) *http.Client { trans := baseTransport() @@ -219,3 +226,14 @@ func baseTransport() *http.Transport { ExpectContinueTimeout: 1 * time.Second, } } + +// ErrUniverseNotMatch composes an error string from the provided universe +// domain sources (DialSettings and Credentials, respectively). +func ErrUniverseNotMatch(settingsUD, credsUD string) error { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "WithUniverseDomain explicitly, \"googleapis.com\" is the default", + settingsUD, + credsUD) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 285e6e04d39..99210ebe0e5 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -19,7 +19,8 @@ import ( ) const ( - newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" + universeDomainDefault = "googleapis.com" ) // DialSettings holds information needed to establish a connection with a @@ -161,3 +162,28 @@ func (ds *DialSettings) Validate() error { } return nil } + +// GetDefaultUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with internaloption.WithDefaultUniverseDomain. +// The default value is "googleapis.com". +func (ds *DialSettings) GetDefaultUniverseDomain() string { + if ds.DefaultUniverseDomain == "" { + return universeDomainDefault + } + return ds.DefaultUniverseDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with option.WithUniverseDomain. +// The default value is the value of GetDefaultUniverseDomain, as configured +// with internaloption.WithDefaultUniverseDomain. +func (ds *DialSettings) GetUniverseDomain() string { + if ds.UniverseDomain == "" { + return ds.GetDefaultUniverseDomain() + } + return ds.UniverseDomain +} + +func (ds *DialSettings) IsUniverseDomainGDU() bool { + return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain() +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 8ecad3542b9..f33c762df20 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.156.0" +const Version = "0.161.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index c15be9faa96..e6b5c102555 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -38,7 +38,10 @@ func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) { // WithDefaultEndpointTemplate provides a template for creating the endpoint // using a universe domain. See also WithDefaultUniverseDomain and -// option.WithUniverseDomain. +// option.WithUniverseDomain. The placeholder UNIVERSE_DOMAIN should be used +// instead of a concrete universe domain such as "googleapis.com". +// +// Example: WithDefaultEndpointTemplate("https://logging.UNIVERSE_DOMAIN/") // // It should only be used internally by generated clients. func WithDefaultEndpointTemplate(url string) option.ClientOption { @@ -163,6 +166,11 @@ func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { // EnableJwtWithScope returns a ClientOption that specifies if scope can be used // with self-signed JWT. +// +// EnableJwtWithScope is ignored when option.WithUniverseDomain is set +// to a value other than the Google Default Universe (GDU) of "googleapis.com". +// For non-GDU domains, token exchange is impossible and services must +// support self-signed JWTs with scopes. func EnableJwtWithScope() option.ClientOption { return enableJwtWithScope(true) } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 10830f01644..9e8b5a12296 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -177,6 +177,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } + credsUniverseDomain, err := creds.GetUniverseDomain() + if err != nil { + return nil, err + } + if o.TokenSource == nil { + // We only validate non-tokensource creds, as TokenSource-based credentials + // don't propagate universe. + if o.GetUniverseDomain() != credsUniverseDomain { + return nil, internal.ErrUniverseNotMatch(o.GetUniverseDomain(), credsUniverseDomain) + } + } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource}, quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 7e322a17c68..75b266acf74 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -88,6 +88,17 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + credsUniverseDomain, err := creds.GetUniverseDomain() + if err != nil { + return nil, err + } + if settings.TokenSource == nil { + // We only validate non-tokensource creds, as TokenSource-based credentials + // don't propagate universe. + if settings.GetUniverseDomain() != credsUniverseDomain { + return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain) + } + } paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 83774fbcbe7..d5dccb93377 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc v4.24.4 // source: google/api/client.proto package annotations @@ -1033,6 +1033,18 @@ type MethodSettings struct { // total_poll_timeout: // seconds: 54000 # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } func (x *MethodSettings) Reset() { @@ -1081,6 +1093,13 @@ func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { return nil } +func (x *MethodSettings) GetAutoPopulatedFields() []string { + if x != nil { + return x.AutoPopulatedFields + } + return nil +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1452,69 +1471,73 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, - 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, - 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, - 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, - 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, - 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, - 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, - 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, - 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, - 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, - 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, - 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, - 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, - 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, - 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, + 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, + 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, + 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, + 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, + 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, + 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, + 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, + 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, + 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, + 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, + 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, + 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/vendor/modules.txt b/vendor/modules.txt index 84c2153f7a3..fdaa953a12a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -14,17 +14,17 @@ cloud.google.com/go/compute/metadata ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/monitoring v1.17.0 +# cloud.google.com/go/monitoring v1.17.1 ## explicit; go 1.19 cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal -# cloud.google.com/go/secretmanager v1.11.4 +# cloud.google.com/go/secretmanager v1.11.5 ## explicit; go 1.19 cloud.google.com/go/secretmanager/apiv1 cloud.google.com/go/secretmanager/apiv1/secretmanagerpb cloud.google.com/go/secretmanager/internal -# cloud.google.com/go/storage v1.36.0 +# cloud.google.com/go/storage v1.37.0 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal @@ -302,10 +302,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.26.4 +# github.com/aws/aws-sdk-go-v2/config v1.26.6 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.16.15 +# github.com/aws/aws-sdk-go-v2/credentials v1.16.16 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -324,7 +324,7 @@ github.com/aws/aws-sdk-go-v2/internal/configsources # github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/service/amp v1.22.1 @@ -337,7 +337,7 @@ github.com/aws/aws-sdk-go-v2/service/amp/types github.com/aws/aws-sdk-go-v2/service/cloudwatch github.com/aws/aws-sdk-go-v2/service/cloudwatch/internal/endpoints github.com/aws/aws-sdk-go-v2/service/cloudwatch/types -# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.9 +# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.27.0 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/dynamodb github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations @@ -373,7 +373,7 @@ github.com/aws/aws-sdk-go-v2/service/secretsmanager/types github.com/aws/aws-sdk-go-v2/service/sqs github.com/aws/aws-sdk-go-v2/service/sqs/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sqs/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 +# github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints @@ -431,8 +431,8 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cloudevents/sdk-go/v2 v2.14.0 -## explicit; go 1.17 +# github.com/cloudevents/sdk-go/v2 v2.15.0 +## explicit; go 1.18 github.com/cloudevents/sdk-go/v2 github.com/cloudevents/sdk-go/v2/binding github.com/cloudevents/sdk-go/v2/binding/format @@ -525,11 +525,11 @@ github.com/emicklei/go-restful/v3/log # github.com/evanphx/json-patch v5.8.1+incompatible ## explicit github.com/evanphx/json-patch -# github.com/evanphx/json-patch/v5 v5.8.1 +# github.com/evanphx/json-patch/v5 v5.9.0 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5/internal/json -# github.com/expr-lang/expr v1.15.8 +# github.com/expr-lang/expr v1.16.0 ## explicit; go 1.18 github.com/expr-lang/expr github.com/expr-lang/expr/ast @@ -754,7 +754,7 @@ github.com/google/s2a-go/stream # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/googleapis/enterprise-certificate-proxy v0.3.2 @@ -836,7 +836,7 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/vault/api v1.10.0 +# github.com/hashicorp/vault/api v1.11.0 ## explicit; go 1.19 github.com/hashicorp/vault/api # github.com/imdario/mergo v0.3.16 @@ -1104,7 +1104,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.31.0 +# github.com/onsi/gomega v1.31.1 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -1332,7 +1332,7 @@ github.com/xlab/treeprint # github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a ## explicit; go 1.12 github.com/youmark/pkcs8 -# go.etcd.io/etcd/api/v3 v3.5.11 +# go.etcd.io/etcd/api/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -1340,7 +1340,7 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.11 +# go.etcd.io/etcd/client/pkg/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil @@ -1348,7 +1348,7 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.11 +# go.etcd.io/etcd/client/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials @@ -1422,11 +1422,11 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil @@ -1444,6 +1444,7 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 # go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.45.0 ## explicit; go 1.20 @@ -1672,7 +1673,7 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.156.0 +# google.golang.org/api v0.161.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1708,7 +1709,7 @@ google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac +# google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -1718,12 +1719,12 @@ google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.60.1 => google.golang.org/grpc v1.60.1 +# google.golang.org/grpc v1.61.0 => google.golang.org/grpc v1.60.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1856,7 +1857,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.0 => k8s.io/api v0.28.5 +# k8s.io/api v0.29.1 => k8s.io/api v0.28.5 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1923,7 +1924,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.29.0 => k8s.io/apimachinery v0.28.5 +# k8s.io/apimachinery v0.29.1 => k8s.io/apimachinery v0.28.5 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1985,7 +1986,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.0 => k8s.io/apiserver v0.28.5 +# k8s.io/apiserver v0.29.1 => k8s.io/apiserver v0.28.5 ## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -2130,7 +2131,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.29.0 => k8s.io/client-go v0.28.5 +# k8s.io/client-go v1.5.2 => k8s.io/client-go v0.28.5 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -2401,7 +2402,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.29.0 => k8s.io/code-generator v0.28.5 +# k8s.io/code-generator v0.29.1 => k8s.io/code-generator v0.28.5 ## explicit; go 1.20 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -2439,7 +2440,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.29.0 => k8s.io/component-base v0.28.5 +# k8s.io/component-base v0.29.1 => k8s.io/component-base v0.28.5 ## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -2472,7 +2473,7 @@ k8s.io/gengo/generator k8s.io/gengo/namer k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/klog/v2 v2.110.1 => k8s.io/klog/v2 v2.100.1 +# k8s.io/klog/v2 v2.120.1 => k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -2487,7 +2488,7 @@ k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20240103051144-eec4567ac022 => k8s.io/kube-openapi v0.0.0-20230901164831-6c774f458599 +# k8s.io/kube-openapi v0.0.0-20240126223410-2919ad4fcfec => k8s.io/kube-openapi v0.0.0-20230901164831-6c774f458599 ## explicit; go 1.19 k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args @@ -2514,7 +2515,7 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/metrics v0.29.0 => k8s.io/metrics v0.28.5 +# k8s.io/metrics v0.29.1 => k8s.io/metrics v0.28.5 ## explicit; go 1.20 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/install @@ -2538,7 +2539,7 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 +# knative.dev/pkg v0.0.0-20240201013110-e85c3cf6d5f1 ## explicit; go 1.18 knative.dev/pkg/apis knative.dev/pkg/apis/duck @@ -2554,8 +2555,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/controller-runtime v0.16.3 -## explicit; go 1.20 +# sigs.k8s.io/controller-runtime v0.17.0 +## explicit; go 1.21 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder sigs.k8s.io/controller-runtime/pkg/cache @@ -2586,6 +2587,7 @@ sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/internal/source +sigs.k8s.io/controller-runtime/pkg/internal/syncs sigs.k8s.io/controller-runtime/pkg/internal/testing/addr sigs.k8s.io/controller-runtime/pkg/internal/testing/certs sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane @@ -2607,7 +2609,7 @@ sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240102165319-7f316f1309b1 +# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 ## explicit; go 1.20 sigs.k8s.io/controller-runtime/tools/setup-envtest sigs.k8s.io/controller-runtime/tools/setup-envtest/env @@ -2615,7 +2617,7 @@ sigs.k8s.io/controller-runtime/tools/setup-envtest/remote sigs.k8s.io/controller-runtime/tools/setup-envtest/store sigs.k8s.io/controller-runtime/tools/setup-envtest/versions sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows -# sigs.k8s.io/controller-tools v0.13.0 +# sigs.k8s.io/controller-tools v0.14.0 ## explicit; go 1.20 sigs.k8s.io/controller-tools/cmd/controller-gen sigs.k8s.io/controller-tools/pkg/crd diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml index deb6a783da8..a95c15b2c88 100644 --- a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml +++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -59,9 +59,9 @@ linters-settings: - pkg: sigs.k8s.io/controller-runtime alias: ctrl staticcheck: - go: "1.20" + go: "1.21" stylecheck: - go: "1.20" + go: "1.21" revive: rules: # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration diff --git a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md index f234494fe1c..2a857b976e3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md +++ b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md @@ -4,9 +4,9 @@ The Kubernetes controller-runtime Project is released on an as-needed basis. The **Note:** Releases are done from the `release-MAJOR.MINOR` branches. For PATCH releases is not required to create a new branch you will just need to ensure that all big fixes are cherry-picked into the respective -`release-MAJOR.MINOR` branch. To know more about versioning check https://semver.org/. +`release-MAJOR.MINOR` branch. To know more about versioning check https://semver.org/. -## How to do a release +## How to do a release ### Create the new branch and the release tag @@ -15,7 +15,7 @@ to create a new branch you will just need to ensure that all big fixes are cherr ### Now, let's generate the changelog -1. Create the changelog from the new branch `release-` (`git checkout release-`). +1. Create the changelog from the new branch `release-` (`git checkout release-`). You will need to use the [kubebuilder-release-tools][kubebuilder-release-tools] to generate the notes. See [here][release-notes-generation] > **Note** @@ -24,12 +24,12 @@ You will need to use the [kubebuilder-release-tools][kubebuilder-release-tools] ### Draft a new release from GitHub -1. Create a new tag with the correct version from the new `release-` branch +1. Create a new tag with the correct version from the new `release-` branch 2. Add the changelog on it and publish. Now, the code source is released ! ### Add a new Prow test the for the new branch release -1. Create a new prow test under [github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime) +1. Create a new prow test under [github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime) for the new `release-` branch. (i.e. for the `0.11.0` release see the PR: https://github.com/kubernetes/test-infra/pull/25205) 2. Ping the infra PR in the controller-runtime slack channel for reviews. @@ -45,3 +45,7 @@ For more info, see the release page: https://github.com/kubernetes-sigs/controll ```` 2. An announcement email is sent to `kubebuilder@googlegroups.com` with the subject `[ANNOUNCE] Controller-Runtime $VERSION is released` + +[kubebuilder-release-tools]: https://github.com/kubernetes-sigs/kubebuilder-release-tools +[release-notes-generation]: https://github.com/kubernetes-sigs/kubebuilder-release-tools/blob/master/README.md#release-notes-generation +[release-process]: https://github.com/kubernetes-sigs/kubebuilder/blob/master/VERSIONING.md#releasing diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 5410e1cdd47..1cecf88e5e5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -33,7 +33,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" @@ -83,6 +83,9 @@ type Informers interface { // of the underlying object. GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) + // RemoveInformer removes an informer entry and stops it if it was running. + RemoveInformer(ctx context.Context, obj client.Object) error + // Start runs all the informers known to this cache until the context is closed. // It blocks. Start(ctx context.Context) error @@ -121,6 +124,8 @@ type Informer interface { // HasSynced return true if the informers underlying store has synced. HasSynced() bool + // IsStopped returns true if the informer has been stopped. + IsStopped() bool } // AllNamespaces should be used as the map key to deliminate namespace settings @@ -199,6 +204,12 @@ type Options struct { // unless there is already one set in ByObject or DefaultNamespaces. DefaultTransform toolscache.TransformFunc + // DefaultWatchErrorHandler will be used to the WatchErrorHandler which is called + // whenever ListAndWatch drops the connection with an error. + // + // After calling this handler, the informer will backoff and retry. + DefaultWatchErrorHandler toolscache.WatchErrorHandler + // DefaultUnsafeDisableDeepCopy is the default for UnsafeDisableDeepCopy // for everything that doesn't specify this. // @@ -369,7 +380,8 @@ func newCache(restConfig *rest.Config, opts Options) newCacheFunc { Field: config.FieldSelector, }, Transform: config.Transform, - UnsafeDisableDeepCopy: pointer.BoolDeref(config.UnsafeDisableDeepCopy, false), + WatchErrorHandler: opts.DefaultWatchErrorHandler, + UnsafeDisableDeepCopy: ptr.Deref(config.UnsafeDisableDeepCopy, false), NewInformer: opts.newInformer, }), readerFailOnMissingInformer: opts.ReaderFailOnMissingInformer, @@ -400,7 +412,7 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) + opts.Mapper, err = apiutil.NewDynamicRESTMapper(config, opts.HTTPClient) if err != nil { return Options{}, fmt.Errorf("could not create RESTMapper from config: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go index f3fa4800d26..4db8208a63f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go @@ -52,6 +52,14 @@ func (dbt *delegatingByGVKCache) List(ctx context.Context, list client.ObjectLis return cache.List(ctx, list, opts...) } +func (dbt *delegatingByGVKCache) RemoveInformer(ctx context.Context, obj client.Object) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.RemoveInformer(ctx, obj) +} + func (dbt *delegatingByGVKCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { cache, err := dbt.cacheForObject(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 0f1b4e93d23..091667b7faa 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -190,6 +190,17 @@ func (ic *informerCache) getInformerForKind(ctx context.Context, gvk schema.Grou return ic.Informers.Get(ctx, gvk, obj, &internal.GetOptions{}) } +// RemoveInformer deactivates and removes the informer from the cache. +func (ic *informerCache) RemoveInformer(_ context.Context, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) + if err != nil { + return err + } + + ic.Informers.Remove(gvk, obj) + return nil +} + // NeedLeaderElection implements the LeaderElectionRunnable interface // to indicate that this can be started without requiring the leader lock. func (ic *informerCache) NeedLeaderElection() bool { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index eb941f034ed..2e4f5ce5272 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -23,6 +23,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -117,16 +118,14 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli switch { case listOpts.FieldSelector != nil: - // TODO(directxman12): support more complicated field selectors by - // combining multiple indices, GetIndexers, etc - field, val, requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector) + requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector) if !requiresExact { return fmt.Errorf("non-exact field matches are not supported by the cache") } // list all objects by the field selector. If this is namespaced and we have one, ask for the // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. - objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + objs, err = byIndexes(c.indexer, listOpts.FieldSelector.Requirements(), listOpts.Namespace) case listOpts.Namespace != "": objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) default: @@ -178,6 +177,54 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli return apimeta.SetList(out, runtimeObjs) } +func byIndexes(indexer cache.Indexer, requires fields.Requirements, namespace string) ([]interface{}, error) { + var ( + err error + objs []interface{} + vals []string + ) + indexers := indexer.GetIndexers() + for idx, req := range requires { + indexName := FieldIndexName(req.Field) + indexedValue := KeyToNamespacedKey(namespace, req.Value) + if idx == 0 { + // we use first require to get snapshot data + // TODO(halfcrazy): use complicated index when client-go provides byIndexes + // https://github.com/kubernetes/kubernetes/issues/109329 + objs, err = indexer.ByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + if len(objs) == 0 { + return nil, nil + } + continue + } + fn, exist := indexers[indexName] + if !exist { + return nil, fmt.Errorf("index with name %s does not exist", indexName) + } + filteredObjects := make([]interface{}, 0, len(objs)) + for _, obj := range objs { + vals, err = fn(obj) + if err != nil { + return nil, err + } + for _, val := range vals { + if val == indexedValue { + filteredObjects = append(filteredObjects, obj) + break + } + } + } + if len(filteredObjects) == 0 { + return nil, nil + } + objs = filteredObjects + } + return objs, nil +} + // objectKeyToStorageKey converts an object key to store key. // It's akin to MetaNamespaceKeyFunc. It's separate from // String to allow keeping the key format easily in sync with diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go index 1d2c9ce2b43..c270e809ca3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go @@ -36,6 +36,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/internal/syncs" ) // InformersOpts configures an InformerMap. @@ -49,6 +50,7 @@ type InformersOpts struct { Selector Selector Transform cache.TransformFunc UnsafeDisableDeepCopy bool + WatchErrorHandler cache.WatchErrorHandler } // NewInformers creates a new InformersMap that can create informers under the hood. @@ -76,6 +78,7 @@ func NewInformers(config *rest.Config, options *InformersOpts) *Informers { transform: options.Transform, unsafeDisableDeepCopy: options.UnsafeDisableDeepCopy, newInformer: newInformer, + watchErrorHandler: options.WatchErrorHandler, } } @@ -86,6 +89,20 @@ type Cache struct { // CacheReader wraps Informer and implements the CacheReader interface for a single type Reader CacheReader + + // Stop can be used to stop this individual informer. + stop chan struct{} +} + +// Start starts the informer managed by a MapEntry. +// Blocks until the informer stops. The informer can be stopped +// either individually (via the entry's stop channel) or globally +// via the provided stop argument. +func (c *Cache) Start(stop <-chan struct{}) { + // Stop on either the whole map stopping or just this informer being removed. + internalStop, cancel := syncs.MergeChans(stop, c.stop) + defer cancel() + c.Informer.Run(internalStop) } type tracker struct { @@ -159,6 +176,11 @@ type Informers struct { // NewInformer allows overriding of the shared index informer constructor for testing. newInformer func(cache.ListerWatcher, runtime.Object, time.Duration, cache.Indexers) cache.SharedIndexInformer + + // WatchErrorHandler allows the shared index informer's + // watchErrorHandler to be set by overriding the options + // or to use the default watchErrorHandler + watchErrorHandler cache.WatchErrorHandler } // Start calls Run on each of the informers and sets started to true. Blocks on the context. @@ -173,13 +195,13 @@ func (ip *Informers) Start(ctx context.Context) error { // Start each informer for _, i := range ip.tracker.Structured { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } for _, i := range ip.tracker.Unstructured { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } for _, i := range ip.tracker.Metadata { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } // Set started to true so we immediately start any informers added later. @@ -194,7 +216,7 @@ func (ip *Informers) Start(ctx context.Context) error { return nil } -func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { +func (ip *Informers) startInformerLocked(cacheEntry *Cache) { // Don't start the informer in case we are already waiting for the items in // the waitGroup to finish, since waitGroups don't support waiting and adding // at the same time. @@ -205,7 +227,7 @@ func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { ip.waitGroup.Add(1) go func() { defer ip.waitGroup.Done() - informer.Run(ip.ctx.Done()) + cacheEntry.Start(ip.ctx.Done()) }() } @@ -281,6 +303,21 @@ func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj r return started, i, nil } +// Remove removes an informer entry and stops it if it was running. +func (ip *Informers) Remove(gvk schema.GroupVersionKind, obj runtime.Object) { + ip.mu.Lock() + defer ip.mu.Unlock() + + informerMap := ip.informersByType(obj) + + entry, ok := informerMap[gvk] + if !ok { + return + } + close(entry.stop) + delete(informerMap, gvk) +} + func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { switch obj.(type) { case runtime.Unstructured: @@ -323,6 +360,13 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, }) + // Set WatchErrorHandler on SharedIndexInformer if set + if ip.watchErrorHandler != nil { + if err := sharedIndexInformer.SetWatchErrorHandler(ip.watchErrorHandler); err != nil { + return nil, false, err + } + } + // Check to see if there is a transformer for this gvk if err := sharedIndexInformer.SetTransform(ip.transform); err != nil { return nil, false, err @@ -342,13 +386,14 @@ func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.O scopeName: mapping.Scope.Name(), disableDeepCopy: ip.unsafeDisableDeepCopy, }, + stop: make(chan struct{}), } ip.informersByType(obj)[gvk] = i // Start the informer in case the InformersMap has started, otherwise it will be // started when the InformersMap starts. if ip.started { - ip.startInformerLocked(i.Informer) + ip.startInformerLocked(i) } return i, ip.started, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index 87c31a7c037..e38da1455c2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -109,6 +109,27 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } +func (c *multiNamespaceCache) RemoveInformer(ctx context.Context, obj client.Object) error { + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + if !isNamespaced { + return c.clusterCache.RemoveInformer(ctx, obj) + } + + for _, cache := range c.namespaceToCache { + err := cache.RemoveInformer(ctx, obj) + if err != nil { + return err + } + } + + return nil +} + func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { // If the object is cluster scoped, get the informer from clusterCache, // if not use the namespaced caches. @@ -391,3 +412,13 @@ func (i *multiNamespaceInformer) HasSynced() bool { } return true } + +// IsStopped checks if each namespaced informer has stopped, returns false if any are still running. +func (i *multiNamespaceInformer) IsStopped() bool { + for _, informer := range i.namespaceToInformer { + if stopped := informer.IsStopped(); !stopped { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 6a1bfb546ea..3c0206bea53 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -31,11 +31,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" ) var ( @@ -60,25 +58,6 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { return addToScheme(protobufScheme) } -// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery -// information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { - if httpClient == nil { - return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") - } - - // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) - if err != nil { - return nil, err - } - gr, err := restmapper.GetAPIGroupResources(dc) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(gr), nil -} - // IsObjectNamespaced returns true if the object is namespace scoped. // For unstructured objects the gvk is found from the object itself. func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index d5e03b2b19e..5af02063b8c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -21,6 +21,7 @@ import ( "net/http" "sync" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -166,8 +167,10 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er if err != nil { return err } - for _, version := range apiGroup.Versions { - versions = append(versions, version.Version) + if apiGroup != nil { + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } } } @@ -254,17 +257,12 @@ func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) m.mu.Unlock() // Looking in the cache again. - { - m.mu.RLock() - group, ok := m.apiGroups[groupName] - m.mu.RUnlock() - if ok { - return group, nil - } - } + m.mu.RLock() + defer m.mu.RUnlock() - // If there is still nothing, return an error. - return nil, fmt.Errorf("failed to find API group %q", groupName) + // Don't return an error here if the API group is not present. + // The reloaded RESTMapper will take care of returning a NoMatchError. + return m.apiGroups[groupName], nil } // fetchGroupVersionResources fetches the resources for the specified group and its versions. @@ -276,7 +274,7 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string groupVersion := schema.GroupVersion{Group: groupName, Version: version} apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { failedGroups[groupVersion] = err } if apiResourceList != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 2fb0acb7b39..c0ebb39e3d5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -90,11 +90,18 @@ type CacheOptions struct { type NewClientFunc func(config *rest.Config, options Options) (Client, error) // New returns a new Client using the provided config and Options. -// The returned client reads *and* writes directly from the server -// (it doesn't use object caches). It understands how to work with -// normal types (both custom resources and aggregated/built-in resources), -// as well as unstructured types. // +// The client's read behavior is determined by Options.Cache. +// If either Options.Cache or Options.Cache.Reader is nil, +// the client reads directly from the API server. +// If both Options.Cache and Options.Cache.Reader are non-nil, +// the client reads from a local cache. However, specific +// resources can still be configured to bypass the cache based +// on Options.Cache.Unstructured and Options.Cache.DisableFor. +// Write operations are always performed directly on the API server. +// +// The client understands how to work with normal types (both custom resources +// and aggregated/built-in resources), as well as unstructured types. // In the case of normal types, the scheme will be used to look up the // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted @@ -210,7 +217,8 @@ func newClient(config *rest.Config, options Options) (*client, error) { var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. +// client is a client.Client configured to either read from a local cache or directly from the API server. +// Write operations are always performed directly on the API server. // It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index 9deb6756c38..790a1faab19 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -334,10 +334,12 @@ func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Ob // tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure // we save as the very same type, otherwise subsequent List requests will fail. func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) { - gvk := o.GetObjectKind().GroupVersionKind() - u, isUnstructured := o.(runtime.Unstructured) - if !isUnstructured || !s.Recognizes(gvk) { + if !isUnstructured { + return o, nil + } + gvk := o.GetObjectKind().GroupVersionKind() + if !s.Recognizes(gvk) { return o, nil } @@ -380,12 +382,9 @@ func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Ob field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) } - gvk := obj.GetObjectKind().GroupVersionKind() - if gvk.Empty() { - gvk, err = apiutil.GVKForObject(obj, t.scheme) - if err != nil { - return err - } + gvk, err := apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err } oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) @@ -464,25 +463,25 @@ func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.O return err } - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return err - } - ta, err := meta.TypeAccessor(o) - if err != nil { - return err + if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) } - ta.SetKind(gvk.Kind) - ta.SetAPIVersion(gvk.GroupVersion().String()) j, err := json.Marshal(o) if err != nil { return err } - decoder := scheme.Codecs.UniversalDecoder() zero(obj) - _, _, err = decoder.Decode(j, nil, obj) - return err + return json.Unmarshal(j, obj) } func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { @@ -527,21 +526,21 @@ func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...cl return err } - ta, err := meta.TypeAccessor(o) - if err != nil { - return err + if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(originalKind) + ta.SetAPIVersion(gvk.GroupVersion().String()) } - ta.SetKind(originalKind) - ta.SetAPIVersion(gvk.GroupVersion().String()) j, err := json.Marshal(o) if err != nil { return err } - decoder := scheme.Codecs.UniversalDecoder() zero(obj) - _, _, err = decoder.Decode(j, nil, obj) - if err != nil { + if err := json.Unmarshal(j, obj); err != nil { return err } @@ -588,9 +587,7 @@ func (c *fakeClient) filterList(list []runtime.Object, gvk schema.GroupVersionKi } func (c *fakeClient) filterWithFields(list []runtime.Object, gvk schema.GroupVersionKind, fs fields.Selector) ([]runtime.Object, error) { - // We only allow filtering on the basis of a single field to ensure consistency with the - // behavior of the cache reader (which we're faking here). - fieldKey, fieldVal, requiresExact := selector.RequiresExactMatch(fs) + requiresExact := selector.RequiresExactMatch(fs) if !requiresExact { return nil, fmt.Errorf("field selector %s is not in one of the two supported forms \"key==val\" or \"key=val\"", fs) @@ -599,15 +596,24 @@ func (c *fakeClient) filterWithFields(list []runtime.Object, gvk schema.GroupVer // Field selection is mimicked via indexes, so there's no sane answer this function can give // if there are no indexes registered for the GroupVersionKind of the objects in the list. indexes := c.indexes[gvk] - if len(indexes) == 0 || indexes[fieldKey] == nil { - return nil, fmt.Errorf("List on GroupVersionKind %v specifies selector on field %s, but no "+ - "index with name %s has been registered for GroupVersionKind %v", gvk, fieldKey, fieldKey, gvk) + for _, req := range fs.Requirements() { + if len(indexes) == 0 || indexes[req.Field] == nil { + return nil, fmt.Errorf("List on GroupVersionKind %v specifies selector on field %s, but no "+ + "index with name %s has been registered for GroupVersionKind %v", gvk, req.Field, req.Field, gvk) + } } - indexExtractor := indexes[fieldKey] filteredList := make([]runtime.Object, 0, len(list)) for _, obj := range list { - if c.objMatchesFieldSelector(obj, indexExtractor, fieldVal) { + matches := true + for _, req := range fs.Requirements() { + indexExtractor := indexes[req.Field] + if !c.objMatchesFieldSelector(obj, indexExtractor, req.Value) { + matches = false + break + } + } + if matches { filteredList = append(filteredList, obj) } } @@ -862,21 +868,22 @@ func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client if !handled { panic("tracker could not handle patch method") } - ta, err := meta.TypeAccessor(o) - if err != nil { - return err + + if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured { + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) } - ta.SetKind(gvk.Kind) - ta.SetAPIVersion(gvk.GroupVersion().String()) j, err := json.Marshal(o) if err != nil { return err } - decoder := scheme.Codecs.UniversalDecoder() zero(obj) - _, _, err = decoder.Decode(j, nil, obj) - return err + return json.Unmarshal(j, obj) } // Applying a patch results in a deletionTimestamp that is truncated to the nearest second. @@ -1247,6 +1254,8 @@ func inTreeResourcesWithStatus() []schema.GroupVersionKind { {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"}, {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"}, } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index d81bf25de9a..798506f4865 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -419,7 +419,7 @@ type ListOptions struct { LabelSelector labels.Selector // FieldSelector filters results by a particular field. In order // to use this with cache-based implementations, restrict usage to - // a single field-value pair that's been added to the indexers. + // exact match field-value pair that's been added to the indexers. FieldSelector fields.Selector // Namespace represents the namespace to list for, or empty for @@ -514,7 +514,8 @@ type MatchingLabels map[string]string func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? if opts.LabelSelector == nil { - opts.LabelSelector = labels.NewSelector() + opts.LabelSelector = labels.SelectorFromValidatedSet(map[string]string(m)) + return } // If there's already a selector, we need to AND the two together. noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index f76e012ea8a..05153f74ce7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -77,8 +77,8 @@ func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Sch Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: pointer.Bool(true), - Controller: pointer.Bool(true), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } // Return early with an error if the object is already controlled. @@ -121,6 +121,84 @@ func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) erro return nil } +// RemoveOwnerReference is a helper method to make sure the given object removes an owner reference to the object provided. +// This allows you to remove the owner to establish a new owner of the object in a subsequent call. +func RemoveOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + owners := object.GetOwnerReferences() + length := len(owners) + if length < 1 { + return fmt.Errorf("%T does not have any owner references", object) + } + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call RemoveOwnerReference", owner) + } + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + + index := indexOwnerRef(owners, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: owner.GetName(), + Kind: gvk.Kind, + }) + if index == -1 { + return fmt.Errorf("%T does not have an owner reference for %T", object, owner) + } + + owners = append(owners[:index], owners[index+1:]...) + object.SetOwnerReferences(owners) + return nil +} + +// HasControllerReference returns true if the object +// has an owner ref with controller equal to true +func HasControllerReference(object metav1.Object) bool { + owners := object.GetOwnerReferences() + for _, owner := range owners { + isTrue := owner.Controller + if owner.Controller != nil && *isTrue { + return true + } + } + return false +} + +// RemoveControllerReference removes an owner reference where the controller +// equals true +func RemoveControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + if ok := HasControllerReference(object); !ok { + return fmt.Errorf("%T does not have a owner reference with controller equals true", object) + } + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call RemoveControllerReference", owner) + } + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ownerRefs := object.GetOwnerReferences() + index := indexOwnerRef(ownerRefs, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: owner.GetName(), + Kind: gvk.Kind, + }) + + if index == -1 { + return fmt.Errorf("%T does not have an controller reference for %T", object, owner) + } + + if ownerRefs[index].Controller == nil || !*ownerRefs[index].Controller { + return fmt.Errorf("%T owner is not the controller reference for %T", owner, object) + } + + ownerRefs = append(ownerRefs[:index], ownerRefs[index+1:]...) + object.SetOwnerReferences(ownerRefs) + return nil +} + func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { owners := object.GetOwnerReferences() if idx := indexOwnerRef(owners, ref); idx == -1 { @@ -166,7 +244,6 @@ func referSameObject(a, b metav1.OwnerReference) bool { if err != nil { return false } - return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name } @@ -193,6 +270,9 @@ const ( // They should complete the sentence "Deployment default/foo has been .. // The MutateFn is called regardless of creating or updating an object. // // It returns the executed operation and an error. +// +// Note: changes made by MutateFn to any sub-resource (status...), will be +// discarded. func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { @@ -230,6 +310,12 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f M // The MutateFn is called regardless of creating or updating an object. // // It returns the executed operation and an error. +// +// Note: changes to any sub-resource other than status will be ignored. +// Changes to the status sub-resource will only be applied if the object +// already exist. To change the status on object creation, the easiest +// way is to requeue the object in the controller if OperationResult is +// OperationResultCreated func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go index f9c58ea26a3..5fdd657cd7f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -38,7 +38,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/yaml" "sigs.k8s.io/controller-runtime/pkg/client" @@ -364,19 +364,26 @@ func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, if err != nil { return err } - url := pointer.String(fmt.Sprintf("https://%s/convert", hostPort)) + url := ptr.To(fmt.Sprintf("https://%s/convert", hostPort)) for i := range crds { // Continue if we're preserving unknown fields. if crds[i].Spec.PreserveUnknownFields { continue } - // Continue if the GroupKind isn't registered as being convertible. - if _, ok := convertibles[schema.GroupKind{ - Group: crds[i].Spec.Group, - Kind: crds[i].Spec.Names.Kind, - }]; !ok { - continue + if !webhookOptions.IgnoreSchemeConvertible { + // Continue if the GroupKind isn't registered as being convertible, + // and remove any existing conversion webhooks if they exist. + // This is to prevent the CRD from being rejected by the apiserver, usually + // manifests that are generated by controller-gen will have a conversion + // webhook set, but we don't want to enable it if the type isn't registered. + if _, ok := convertibles[schema.GroupKind{ + Group: crds[i].Spec.Group, + Kind: crds[i].Spec.Names.Kind, + }]; !ok { + crds[i].Spec.Conversion = nil + continue + } } if crds[i].Spec.Conversion == nil { crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go index f7e43a14802..e4e54e472c7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -49,6 +49,11 @@ type WebhookInstallOptions struct { // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration + // IgnoreSchemeConvertible, will modify any CRD conversion webhook to use the local serving host and port, + // bypassing the need to have the types registered in the Scheme. This is useful for testing CRD conversion webhooks + // with unregistered or unstructured types. + IgnoreSchemeConvertible bool + // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true IgnoreErrorIfPathMissing bool @@ -184,7 +189,8 @@ func defaultWebhookOptions(o *WebhookInstallOptions) { func WaitForWebhooks(config *rest.Config, mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, - options WebhookInstallOptions) error { + options WebhookInstallOptions, +) error { waitingFor := map[schema.GroupVersionKind]*sets.Set[string]{} for _, hook := range mutatingWebhooks { @@ -242,7 +248,7 @@ func (p *webhookPoller) poll(ctx context.Context) (done bool, err error) { continue } for _, name := range names.UnsortedList() { - var obj = &unstructured.Unstructured{} + obj := &unstructured.Unstructured{} obj.SetGroupVersionKind(gvk) err := c.Get(context.Background(), client.ObjectKey{ Namespace: "", diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 2f380f4fc4d..ff2f3e80b2a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -42,7 +42,7 @@ import ( // Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. // Most users shouldn't need to implement their own EventHandler. type EventHandler interface { - // Create is called in response to an create event - e.g. Pod Creation. + // Create is called in response to a create event - e.g. Pod Creation. Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event - e.g. Pod Updated. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go index 4f6d0843184..8f6dc71ede5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go @@ -22,14 +22,16 @@ import ( ) // RequiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. -func RequiresExactMatch(sel fields.Selector) (field, val string, required bool) { +func RequiresExactMatch(sel fields.Selector) bool { reqs := sel.Requirements() - if len(reqs) != 1 { - return "", "", false + if len(reqs) == 0 { + return false } - req := reqs[0] - if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { - return "", "", false + + for _, req := range reqs { + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return false + } } - return req.Field, req.Value, true + return true } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go new file mode 100644 index 00000000000..c78a30377a3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go @@ -0,0 +1,38 @@ +package syncs + +import ( + "context" + "reflect" + "sync" +) + +// MergeChans returns a channel that is closed when any of the input channels are signaled. +// The caller must call the returned CancelFunc to ensure no resources are leaked. +func MergeChans[T any](chans ...<-chan T) (<-chan T, context.CancelFunc) { + var once sync.Once + out := make(chan T) + cancel := make(chan T) + cancelFunc := func() { + once.Do(func() { + close(cancel) + }) + <-out + } + cases := make([]reflect.SelectCase, len(chans)+1) + for i := range chans { + cases[i] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(chans[i]), + } + } + cases[len(cases)-1] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(cancel), + } + go func() { + defer close(out) + _, _, _ = reflect.Select(cases) + }() + + return out, cancelFunc +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go index a27b7a0ff88..a41bb77c4dd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/kubectl.go @@ -112,6 +112,7 @@ func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { cmd := exec.Command(k.Path, allArgs...) cmd.Stdout = stdoutBuffer cmd.Stderr = stderrBuffer + cmd.SysProcAttr = process.GetSysProcAttr() err = cmd.Run() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_other.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_other.go new file mode 100644 index 00000000000..df13b341a48 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_other.go @@ -0,0 +1,28 @@ +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import "syscall" + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for non-unix systems this returns nil. +func GetSysProcAttr() *syscall.SysProcAttr { + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_unix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_unix.go new file mode 100644 index 00000000000..83ad509af0f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/procattr_unix.go @@ -0,0 +1,33 @@ +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "golang.org/x/sys/unix" +) + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for unix systems this returns a SysProcAttr with Setpgid set to true, +// which inherits the parent's process group id. +func GetSysProcAttr() *unix.SysProcAttr { + return &unix.SysProcAttr{ + Setpgid: true, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go index af83c70a2fa..03f252524a3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go @@ -155,6 +155,7 @@ func (ps *State) Start(stdout, stderr io.Writer) (err error) { ps.Cmd = exec.Command(ps.Path, ps.Args...) ps.Cmd.Stdout = stdout ps.Cmd.Stderr = stderr + ps.Cmd.SysProcAttr = GetSysProcAttr() ready := make(chan bool) timedOut := time.After(ps.StartTimeout) @@ -265,6 +266,9 @@ func (ps *State) Stop() error { case <-ps.waitDone: break case <-timedOut: + if err := ps.Cmd.Process.Signal(syscall.SIGKILL); err != nil { + return fmt.Errorf("unable to kill process %s: %w", ps.Path, err) + } return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) } ps.ready = false diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 708a9cc16f7..25c3c7375b2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -409,10 +409,10 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, fmt.Errorf("failed to new pprof listener: %w", err) } - errChan := make(chan error) + errChan := make(chan error, 1) runnables := newRunnables(options.BaseContext, errChan) return &controllerManager{ - stopProcedureEngaged: pointer.Int64(0), + stopProcedureEngaged: ptr.To(int64(0)), cluster: cluster, runnables: runnables, errChan: errChan, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go index 277b8788100..cff1de4c1c8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -54,14 +54,14 @@ var ( Subsystem: WorkQueueSubsystem, Name: QueueLatencyKey, Help: "How long in seconds an item stays in workqueue before being requested", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), }, []string{"name"}) workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: WorkQueueSubsystem, Name: WorkDurationKey, Help: "How long in seconds processing an item from workqueue takes.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), }, []string{"name"}) unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index 0f4e7e16bb0..f1cce87c857 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -19,9 +19,11 @@ package reconcile import ( "context" "errors" + "reflect" "time" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) // Result contains the result of a Reconciler invocation. @@ -97,7 +99,7 @@ type Reconciler interface { // If the error is nil and the returned Result has a non-zero result.RequeueAfter, the request // will be requeued after the specified duration. // - // If the error is nil and result.RequeueAfter is zero and result.Reque is true, the request + // If the error is nil and result.RequeueAfter is zero and result.Requeue is true, the request // will be requeued using exponential backoff. Reconcile(context.Context, Request) (Result, error) } @@ -110,6 +112,36 @@ var _ Reconciler = Func(nil) // Reconcile implements Reconciler. func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } +// ObjectReconciler is a specialized version of Reconciler that acts on instances of client.Object. Each reconciliation +// event gets the associated object from Kubernetes before passing it to Reconcile. An ObjectReconciler can be used in +// Builder.Complete by calling AsReconciler. See Reconciler for more details. +type ObjectReconciler[T client.Object] interface { + Reconcile(context.Context, T) (Result, error) +} + +// AsReconciler creates a Reconciler based on the given ObjectReconciler. +func AsReconciler[T client.Object](client client.Client, rec ObjectReconciler[T]) Reconciler { + return &objectReconcilerAdapter[T]{ + objReconciler: rec, + client: client, + } +} + +type objectReconcilerAdapter[T client.Object] struct { + objReconciler ObjectReconciler[T] + client client.Client +} + +// Reconcile implements Reconciler. +func (a *objectReconcilerAdapter[T]) Reconcile(ctx context.Context, req Request) (Result, error) { + o := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T) + if err := a.client.Get(ctx, req.NamespacedName, o); err != nil { + return Result{}, client.IgnoreNotFound(err) + } + + return a.objReconciler.Reconcile(ctx, o) +} + // TerminalError is an error that will not be retried but still be logged // and recorded in metrics. func TerminalError(wrapped error) error { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index a3b72071687..c9662ce1c02 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -27,12 +27,14 @@ import ( ) // Defaulter defines functions for setting defaults on resources. +// Deprecated: Ue CustomDefaulter instead. type Defaulter interface { runtime.Object Default() } // DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. +// Deprecated: Use WithCustomDefaulter instead. func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook { return &Webhook{ Handler: &mutatingHandler{defaulter: defaulter, decoder: NewDecoder(scheme)}, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 57e465abb3b..f049fb66e6a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -34,6 +34,26 @@ import ( var admissionScheme = runtime.NewScheme() var admissionCodecs = serializer.NewCodecFactory(admissionScheme) +// adapted from https://github.com/kubernetes/kubernetes/blob/c28c2009181fcc44c5f6b47e10e62dacf53e4da0/staging/src/k8s.io/pod-security-admission/cmd/webhook/server/server.go +// +// From https://github.com/kubernetes/apiserver/blob/d6876a0600de06fef75968c4641c64d7da499f25/pkg/server/config.go#L433-L442C5: +// +// 1.5MB is the recommended client request size in byte +// the etcd server should accept. See +// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. +// A request body might be encoded in json, and is converted to +// proto when persisted in etcd, so we allow 2x as the largest request +// body size to be accepted and decoded in a write request. +// +// For the admission request, we can infer that it contains at most two objects +// (the old and new versions of the object being admitted), each of which can +// be at most 3MB in size. For the rest of the request, we can assume that +// it will be less than 1MB in size. Therefore, we can set the max request +// size to 7MB. +// If your use case requires larger max request sizes, please +// open an issue (https://github.com/kubernetes-sigs/controller-runtime/issues/new). +const maxRequestSize = int64(7 * 1024 * 1024) + func init() { utilruntime.Must(v1.AddToScheme(admissionScheme)) utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) @@ -42,27 +62,30 @@ func init() { var _ http.Handler = &Webhook{} func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var body []byte - var err error ctx := r.Context() if wh.WithContextFunc != nil { ctx = wh.WithContextFunc(ctx, r) } - var reviewResponse Response - if r.Body == nil { - err = errors.New("request body is empty") + if r.Body == nil || r.Body == http.NoBody { + err := errors.New("request body is empty") wh.getLogger(nil).Error(err, "bad request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } defer r.Body.Close() - if body, err = io.ReadAll(r.Body); err != nil { + limitedReader := &io.LimitedReader{R: r.Body, N: maxRequestSize} + body, err := io.ReadAll(limitedReader) + if err != nil { wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) + return + } + if limitedReader.N <= 0 { + err := fmt.Errorf("request entity is too large; limit is %d bytes", maxRequestSize) + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request; limit reached") + wh.writeResponse(w, Errored(http.StatusRequestEntityTooLarge, err)) return } @@ -70,8 +93,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } @@ -89,14 +111,12 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) if err != nil { wh.getLogger(nil).Error(err, "unable to decode the request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } wh.getLogger(&req).V(5).Info("received request") - reviewResponse = wh.Handle(ctx, req) - wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) + wh.writeResponseTyped(w, wh.Handle(ctx, req), actualAdmRevGVK) } // writeResponse writes response to w generically, i.e. without encoding GVK information. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 00bda8a4ce5..fa42217bd66 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -33,6 +33,7 @@ type Warnings []string // Validator defines functions for validating an operation. // The custom resource kind which implements this interface can validate itself. // To validate the custom resource with another specific struct, use CustomValidator instead. +// Deprecated: Use CustomValidator instead. type Validator interface { runtime.Object @@ -53,6 +54,7 @@ type Validator interface { } // ValidatingWebhookFor creates a new Webhook for validating the provided type. +// Deprecated: Use WithCustomValidator instead. func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook { return &Webhook{ Handler: &validatingHandler{validator: validator, decoder: NewDecoder(scheme)}, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go index e99fbd8a857..07650aa60a2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -30,7 +30,6 @@ import ( // CustomValidator defines functions for validating an operation. // The object to be validated is passed into methods as a parameter. type CustomValidator interface { - // ValidateCreate validates the object on creation. // The optional warnings will be added to the response as warning messages. // Return an error if the object is invalid. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go index 293137db498..e8439e2ea23 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -24,9 +24,11 @@ import ( // define some aliases for common bits of the webhook functionality // Defaulter defines functions for setting defaults on resources. +// Deprecated: Use CustomDefaulter instead. type Defaulter = admission.Defaulter // Validator defines functions for validating an operation. +// Deprecated: Use CustomValidator instead. type Validator = admission.Validator // CustomDefaulter defines functions for setting defaults on resources. diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go index 80344dd3a9d..40aec5f9423 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go @@ -477,6 +477,9 @@ func (m Default) ApplyToSchema(schema *apiext.JSONSchemaProps) error { if err != nil { return err } + if schema.Type == "array" && string(marshalledDefault) == "{}" { + marshalledDefault = []byte("[]") + } schema.Default = &apiext.JSON{Raw: marshalledDefault} return nil } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go index bed7ec60392..b553db14c75 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "golang.org/x/tools/go/packages" @@ -214,7 +213,7 @@ func (g GenerationContext) ReadFile(path string) ([]byte, error) { return nil, err } defer file.Close() - return ioutil.ReadAll(file) + return io.ReadAll(file) } // ForRoots produces a Runtime to run the given generators against the diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go index 4cd29f49605..3eb43b0c206 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go @@ -19,7 +19,6 @@ package genall import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -92,7 +91,7 @@ var OutputToNothing = outputToNothing{} type outputToNothing struct{} func (o outputToNothing) Open(_ *loader.Package, _ string) (io.WriteCloser, error) { - return nopCloser{ioutil.Discard}, nil + return nopCloser{io.Discard}, nil } // +controllertools:marker:generateHelp:category="" diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go index 2efa94c7d94..7762e53e733 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go @@ -23,7 +23,6 @@ import ( "go/scanner" "go/token" "go/types" - "io/ioutil" "os" "path/filepath" "regexp" @@ -111,7 +110,7 @@ func (p *Package) NeedSyntax() { for i, filename := range p.CompiledGoFiles { go func(i int, filename string) { defer wg.Done() - src, err := ioutil.ReadFile(filename) + src, err := os.ReadFile(filename) if err != nil { p.AddError(err) return diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go index d84b70eb746..259bff027cf 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go @@ -310,6 +310,7 @@ func guessType(scanner *sc.Scanner, raw string, allowSlice bool) *Argument { // We'll cross that bridge when we get there. // look ahead till we can figure out if this is a map or a slice + hint = peekNoSpace(subScanner) firstElemType := guessType(subScanner, subRaw, false) if firstElemType.Type == StringType { // might be a map or slice, parse the string and check for colon @@ -317,8 +318,9 @@ func guessType(scanner *sc.Scanner, raw string, allowSlice bool) *Argument { var keyVal string // just ignore this (&Argument{Type: StringType}).parseString(subScanner, raw, reflect.Indirect(reflect.ValueOf(&keyVal))) - if subScanner.Scan() == ':' { + if token := subScanner.Scan(); token == ':' || hint == '}' { // it's got a string followed by a colon -- it's a map + // or an empty map in case of {} return &Argument{ Type: MapType, ItemType: &Argument{Type: AnyType}, diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go index b352ededc71..fd5b0035a08 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go @@ -58,6 +58,13 @@ func extractDoc(node ast.Node, decl *ast.GenDecl) string { } outGroup.List = append(outGroup.List, comment) } + isAsteriskComment := false + for _, l := range outGroup.List { + if strings.HasPrefix(l.Text, "/*") { + isAsteriskComment = true + break + } + } // split lines, and re-join together as a single // paragraph, respecting double-newlines as @@ -69,10 +76,12 @@ func extractDoc(node ast.Node, decl *ast.GenDecl) string { } for i, line := range outLines { - // Trim any extranous whitespace, - // for handling /*…*/-style comments, - // which have whitespace preserved in go/ast: - line = strings.TrimSpace(line) + if isAsteriskComment { + // Trim any extranous whitespace, + // for handling /*…*/-style comments, + // which have whitespace preserved in go/ast: + line = strings.TrimSpace(line) + } // Respect that double-newline means // actual newline: @@ -82,8 +91,7 @@ func extractDoc(node ast.Node, decl *ast.GenDecl) string { outLines[i] = line } } - - return strings.Join(outLines, " ") + return strings.Join(outLines, "\n") } // PackageMarkers collects all the package-level marker values for the given package. diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go index e33ec11ff56..8080aeae720 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go @@ -18,7 +18,7 @@ package schemapatcher import ( "fmt" - "io/ioutil" + "os" "path/filepath" "gopkg.in/yaml.v3" @@ -335,7 +335,7 @@ func (e *partialCRD) setVersionedSchemata(newSchemata map[string]apiext.JSONSche // minimally invasive. Returned CRDs are mapped by group-kind. func crdsFromDirectory(ctx *genall.GenerationContext, dir string) (map[schema.GroupKind]*partialCRDSet, error) { res := map[schema.GroupKind]*partialCRDSet{} - dirEntries, err := ioutil.ReadDir(dir) + dirEntries, err := os.ReadDir(dir) if err != nil { return nil, err }