diff --git a/.github/workflows/pr-precommit.yml b/.github/workflows/pr-precommit.yml
index 5b1b5091cf..272fb524f4 100644
--- a/.github/workflows/pr-precommit.yml
+++ b/.github/workflows/pr-precommit.yml
@@ -41,6 +41,10 @@ jobs:
with:
go-version: '1.22'
check-latest: true
+ - uses: hashicorp/setup-terraform@v3
+ with:
+ terraform_version: "1.5.7"
+ terraform_wrapper: false
- run: make install-dev-deps
- uses: terraform-linters/setup-tflint@v4
with:
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 42b6a6f041..e0419285f5 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -123,8 +123,9 @@ repos:
hooks:
- id: script-must-have-extension
- id: shellcheck
+ exclude: ".*unlinted"
- id: shfmt
- exclude: ".*tpl"
+ exclude: ".*tpl|.*unlinted"
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
diff --git a/cmd/root.go b/cmd/root.go
index 7c713db852..d19219fb97 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -53,7 +53,7 @@ HPC deployments on the Google Cloud Platform.`,
logging.Fatal("cmd.Help function failed: %s", err)
}
},
- Version: "v1.43.0",
+ Version: "v1.44.0",
Annotations: annotation,
}
)
diff --git a/community/examples/xpk-gke-a3-megagpu.yaml b/community/examples/xpk-gke-a3-megagpu.yaml
index 91bc7659d1..b881be1dce 100644
--- a/community/examples/xpk-gke-a3-megagpu.yaml
+++ b/community/examples/xpk-gke-a3-megagpu.yaml
@@ -34,8 +34,9 @@ deployment_groups:
settings:
subnetwork_name: xpk-gke-a3-megagpu-subnet
mtu: 8244
- secondary_ranges:
- xpk-gke-a3-megagpu-subnet:
+ secondary_ranges_list:
+ - subnetwork_name: xpk-gke-a3-megagpu-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
@@ -54,6 +55,7 @@ deployment_groups:
source: modules/scheduler/gke-cluster
use: [network1, gpunets]
settings:
+ enable_private_endpoint: false
master_authorized_networks:
- cidr_block: $(vars.authorized_cidr) # Allows your machine run kubectl command. It's required for the multi-network setup.
display_name: "kubectl-access-network"
diff --git a/community/modules/compute/gke-topology-scheduler/manifests/label-nodes-daemon.yaml b/community/modules/compute/gke-topology-scheduler/manifests/label-nodes-daemon.yaml
index fe49c607a6..c3e0176c8e 100644
--- a/community/modules/compute/gke-topology-scheduler/manifests/label-nodes-daemon.yaml
+++ b/community/modules/compute/gke-topology-scheduler/manifests/label-nodes-daemon.yaml
@@ -32,7 +32,7 @@ spec:
hostNetwork: true
containers:
- name: label-nodes-daemon
- image: python:3.9
+ image: python:3.10
command:
- bash
- -c
diff --git a/community/modules/compute/gke-topology-scheduler/manifests/schedule-daemon.yaml b/community/modules/compute/gke-topology-scheduler/manifests/schedule-daemon.yaml
index 9c9a4ab929..d263de18d7 100644
--- a/community/modules/compute/gke-topology-scheduler/manifests/schedule-daemon.yaml
+++ b/community/modules/compute/gke-topology-scheduler/manifests/schedule-daemon.yaml
@@ -39,7 +39,7 @@ spec:
effect: NoSchedule
containers:
- name: topology-scheduler-container
- image: python:3.9
+ image: python:3.10
command: ["/bin/sh", "-c", "pip install google-auth google-api-python-client kubernetes; python /scripts/schedule-daemon.py --ignored-namespace kube-system gmp-public gmp-system"]
volumeMounts:
- name: scripts-volume
diff --git a/community/modules/compute/gke-topology-scheduler/manifests/topology-scheduler-scripts.yaml b/community/modules/compute/gke-topology-scheduler/manifests/topology-scheduler-scripts.yaml
index 441cf18429..cf6cf2fb00 100644
--- a/community/modules/compute/gke-topology-scheduler/manifests/topology-scheduler-scripts.yaml
+++ b/community/modules/compute/gke-topology-scheduler/manifests/topology-scheduler-scripts.yaml
@@ -6,6 +6,7 @@ metadata:
data:
schedule-daemon.py: |
#!/usr/bin/env python
+ """schedule-daemon.py is a Topology-aware Kubernetes pod scheduler."""
# Copyright 2024 Google Inc. All Rights Reserved.
#
@@ -22,157 +23,288 @@ data:
# limitations under the License.
import argparse
- from itertools import groupby
+ import collections
+ import itertools
+ import logging
+ import re
import time
+ from typing import Any
import kubernetes
import kubernetes.client
- from kubernetes.client.rest import ApiException
- from kubernetes.utils.quantity import parse_quantity
+ import kubernetes.client.models
+ import kubernetes.client.rest
+ from kubernetes.utils import quantity
+
+ # Configure logging
+ logging.basicConfig(
+ level=logging.INFO, # Set the root logger level to INFO
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ handlers=[
+ logging.FileHandler('my_app.log'), # Log to a file
+ logging.StreamHandler(), # Log to the console
+ ],
+ )
+
+ # labels for GKE<1.31, require labeler sidecar
+ PRERELEASE_CLUSTER_LABEL = 'topology.gke.io/cluster'
+ PRERELEASE_RACK_LABEL = 'topology.gke.io/rack'
+ PRERELEASE_HOST_LABEL = 'topology.gke.io/host'
+ # labels for GKE>=1.31
+ CLUSTER_LABEL = 'cloud.google.com/gce-topology-block'
+ RACK_LABEL = 'cloud.google.com/gce-topology-subblock'
+ HOST_LABEL = 'cloud.google.com/gce-topology-host'
+
+ # Kubernetes labels used to identify jobs and their completion indices.
+ # These labels are typically set by Kubernetes controllers like JobSet or
+ # CronJob controllers. The job completion index is used to track the progress
+ # of a job and ensure that pods are scheduled in the correct order.
+ JOB_COMPLETION_INDEX_LABEL_KEY = 'batch.kubernetes.io/job-completion-index'
+ JOB_NAME_LABEL_KEY = 'job-name'
+
+ # Most ML workloads are launched on batch jobs or JobSet API. To support
+ # kubeflow operators (eg MPIJobs) we introduce the alternative labels to read
+ KUBEFLOW_REPLICA_INDEX_LABEL_KEY = 'training.kubeflow.org/replica-index'
+ KUBEFLOW_JOB_NAME_LABEL_KEY = 'training.kubeflow.org/job-name'
+
+ # collection of methods to extract job name from pod metadata
+ UNKNOWN_JOB_NAME = 'jobless' # in case when job name is not found
+
+
+ def extract_job_name_label(pod: kubernetes.client.models.V1Pod) -> str:
+ """Extracts the job name label from a pod.
+
+ Args:
+ pod: The pod to extract the job name.
+
+ Returns:
+ The job name label, or UNKNOWN_JOB_NAME if the label is not found.
+ """
+ return pod.metadata.labels.get(JOB_NAME_LABEL_KEY, UNKNOWN_JOB_NAME)
+
+
+ def extract_kubeflow_job_name_label(pod: kubernetes.client.models.V1Pod) -> str:
+ """Extracts the kubeflow job name label from a pod.
+
+ Args:
+ pod: The pod to extract the job name.
+
+ Returns:
+ The kubeflow job name label, or UNKNOWN_JOB_NAME if the label is not found.
+ """
+ return pod.metadata.labels.get(KUBEFLOW_JOB_NAME_LABEL_KEY, UNKNOWN_JOB_NAME)
+
+ def extract_owner_reference_uid(pod: kubernetes.client.models.V1Pod) -> str:
+ """Extracts the owner reference UID from a pod.
- def split_pods_based_on_jobs(pods):
- """Splits pending pods into groups based on jobs."""
- return [
- list(job_group)
- for _, job_group in groupby(pods, lambda pod: pod.get('job_name'))
- ]
+ Args:
+ pod: The pod to extract the job name.
+
+ Returns:
+ The owner reference UID, or UNKNOWN_JOB_NAME if the owner reference is not
+ found.
+ """
+ if pod.metadata.owner_references:
+ return pod.metadata.owner_references[0].uid
+ return UNKNOWN_JOB_NAME
- def sort_jobs_by_time(job):
- """Return the key to be used for sorting jobs which is by creation time."""
- # All the pods in the job should have the same creation time.
- return job[0].get('creation_time')
+ def extract_helm_job_name_label(pod: kubernetes.client.models.V1Pod) -> str:
+ """Extracts the helm job name label from a pod.
+ Args:
+ pod: The pod to extract the job name.
+
+ Returns:
+ The helm job name label, or UNKNOWN_JOB_NAME if the label is not found.
+ """
+ if pod.metadata.labels:
+ return pod.metadata.labels.get('name', UNKNOWN_JOB_NAME)
+ return UNKNOWN_JOB_NAME
+
+
+ def pod_sorting_key(pod_info: dict[str, Any]) -> tuple[str, int]:
+ """Returns key/rank to be used for sorting pods.
- def pod_sorting_key(pod):
- """Returns key to be used for sorting pods.
Given that numbers is often suffixed for multi-node deployments,
here we use a (prefix, number) tuple for the sorting key.
This means "xxx-pod2" should appear before "xxx-pod10"
- """
- if pod['index'] is not None:
- return int(pod['index'])
+ Args:
+ pod_info: The pod info.
- # if the suffix is a number, extract it
- idx = 0
- suffix = ""
- name = pod['name']
- while name[-1 - len(suffix)].isdigit():
- suffix = name[-1 - len(suffix)] + suffix
+ Returns:
+ A tuple of the pod's prefix and index.
+ """
+
+ try:
+ index_value = pod_info.get('index')
+ if index_value is not None:
+ return int(index_value)
+ except (ValueError, TypeError) as e:
+ logging.exception(
+ 'Error converting %s pod index to integer: %s', pod_info['name'], e
+ )
+ # if the suffix is a number, extract it from the name
+ name = pod_info['name']
+ if match := re.fullmatch(r'^(.*?)(\d+)$', name):
+ prefix, suffix = match.groups()
+ return (prefix, int(suffix))
+ else:
+ logging.warning(
+ 'Pod %s does not have a numeric suffix. Using 0 as index.', name
+ )
+ return (name, 0) # No numeric suffix
- if suffix != "":
- idx = int(suffix)
- return (name[:len(name) - len(suffix)], idx)
+ def node_topology_distance(node1: dict[str, Any], node2: dict[str, Any]) -> int:
+ """Calculates the distance between two nodes in the topology.
+ Args:
+ node1: The first node.
+ node2: The second node.
- def node_topology_distance(node1, node2):
- node1_key = node_topology_key(node1)
- node2_key = node_topology_key(node2)
+ Returns:
+ The distance between the two nodes, or 0 if the nodes are in the same
+ topology block. It's also 0, if topology labels are missing.
+ """
+ # distance between cluster/rack/host tuples
+ node1_topology_keys = node_topology_key(node1)
+ node2_topology_keys = node_topology_key(node2)
result = 1000000
- for i in range(len(node1_key)):
- if node1_key[i] != node2_key[i]:
+ for i, node1_topology_key in enumerate(node1_topology_keys):
+ if node1_topology_key != node2_topology_keys[i]:
return result
result /= 100
return 0
- def node_topology_key(node):
- """Builds a key to be used to sort nodes."""
- node_labels = node['node_labels']
+ def node_topology_key(
+ node: dict[str, Any],
+ ) -> tuple[str, str, str] | tuple[()]:
+ """Extract topology labels of a node.
- if (
- 'topology.gke.io/cluster' in node_labels
- and 'topology.gke.io/rack' in node_labels
- and 'topology.gke.io/host' in node_labels
- ):
- return (
- node_labels['topology.gke.io/cluster'],
- node_labels['topology.gke.io/rack'],
- node_labels['topology.gke.io/host'],
- )
+ Args:
+ node: The node.
+ Returns:
+ A tuple of the node's topology labels, or an empty tuple if the node does
+ not have topology labels.
+ """
+ node_labels = node['node_labels']
+ for labels in [
+ (CLUSTER_LABEL, RACK_LABEL, HOST_LABEL),
+ (PRERELEASE_CLUSTER_LABEL, PRERELEASE_RACK_LABEL, PRERELEASE_HOST_LABEL),
+ ]:
+ if all(label in node_labels for label in labels):
+ return tuple(node_labels[label] for label in labels)
+ logging.info('Node %s does not have topology labels', node['name'])
return ()
- def get_pod_used_resources(pod):
- """Get the resources used by this pod"""
- used_cpu = 0
- used_memory = 0
- used_gpu = 0
+ def get_pod_used_resources(
+ pod: kubernetes.client.models.V1Pod,
+ ) -> tuple[int, int, int]:
+ """Get the resources used by this pod.
+
+ Args:
+ pod: The pod.
+
+ Returns:
+ A tuple of the pod's used CPU, memory, and GPU.
+ """
+ used_cpu, used_memory, used_gpu = 0, 0, 0
if pod.status is None or pod.status.container_statuses is None:
return used_cpu, used_memory, used_gpu
- for container, container_status in zip(pod.spec.containers, pod.status.container_statuses):
+ for container, container_status in zip(
+ pod.spec.containers, pod.status.container_statuses
+ ):
if container_status.state.terminated is not None:
# terminated pods don't use resources
continue
requests = container.resources.requests or {}
- used_cpu += parse_quantity(requests.get('cpu', 0))
- used_memory += parse_quantity(requests.get('memory', 0))
+ used_cpu += quantity.parse_quantity(requests.get('cpu', 0))
+ used_memory += quantity.parse_quantity(requests.get('memory', 0))
used_gpu += int(requests.get('nvidia.com/gpu', 0))
return used_cpu, used_memory, used_gpu
- def get_pods_taint_toleration(pods):
- """Get the taint tolerations of the pods.
- For simplicity, we assume that the pods are homogeneous and
- all have the same tolerations.
+ def all_pods_have_same_tolerations(
+ pods: list[kubernetes.client.models.V1Pod],
+ ) -> bool:
+ """Checks if all pods in the list have the same tolerations.
+
+ Edge case: Empty list is considered to have having same tolerations.
+
+ Args:
+ pods: A list of V1Pod objects.
+
+ Returns:
+ True if all pods have the same tolerations, False otherwise.
"""
- ts = None
- for pod in pods:
- tolerations = pod['spec'].tolerations
- if ts is None:
- ts = tolerations
- else:
- assert(ts == tolerations)
- return ts if ts is not None else []
+ if not pods:
+ return True
+ first_pod_tolerations = pods[0].spec.tolerations
+ return all(pod.spec.tolerations == first_pod_tolerations for pod in pods[1:])
- def find_schedulable_nodes(nodes, pods, tolerated_taints):
- """Finds nodes that can be scheduled."""
- nodes_info = {}
- if tolerated_taints is not None:
+ def find_schedulable_nodes(
+ nodes: list[kubernetes.client.models.V1Node],
+ other_running_pods: list[kubernetes.client.models.V1Pod],
+ new_pods_to_schedule: dict[str, dict[str, Any]],
+ ) -> dict[str, Any]:
+ """Finds nodes that can be scheduled.
+
+ Args:
+ nodes: A list of V1Node objects.
+ other_running_pods: A list of running pods. They occupy node resources.
+ new_pods_to_schedule: Info dict of pods to schedule.
+
+ Returns:
+ A dict of node names to node infos for node where scheduling is possible.
+ """
+ nodes_info = {}
+ # guarantee: we checked above that all pods have same tolerations
+ tolerated_taint_dict = {}
+ tolerated_taints = next(iter(new_pods_to_schedule.values()))[
+ 'spec'
+ ].tolerations
+ if tolerated_taints:
tolerated_taint_dict = {t.key: t for t in tolerated_taints}
- else:
- tolerated_taint_dict = {}
for node in nodes:
node_name = node.metadata.name
- node_labels = node.metadata.labels
-
- skip_node = False
- if node.spec.taints is not None:
- for t in node.spec.taints:
- if t.key not in tolerated_taint_dict:
- print(f'Skipping node {node_name} because it is tainted with key {t.key}')
- skip_node = True
- break
- else:
- tol = tolerated_taint_dict[t.key]
- if tol.operator == "Equal" and tol.value != t.value:
- skip_node = True
- break
-
- if skip_node:
+ # skip nodes that have taints that are not covered by pod tolerations
+ if any(
+ t.key not in tolerated_taint_dict
+ or (
+ tolerated_taint_dict[t.key].operator == 'Equal'
+ and tolerated_taint_dict[t.key].value != t.value
+ )
+ for t in node.spec.taints or []
+ ):
+ logging.info(
+ 'Skipping node %s because it has taints (%s) which are not covered'
+ ' by pod tolerations %s',
+ node_name,
+ node.spec.taints,
+ tolerated_taint_dict,
+ )
continue
allocatable = node.status.allocatable
+ used_cpu, used_memory, used_gpu = 0, 0, 0
- used_cpu = 0
- used_memory = 0
- used_gpu = 0
-
- for pod in pods:
- if pod.spec.node_name == node_name:
+ for pod in other_running_pods:
+ if pod.spec.node_name and pod.spec.node_name == node_name:
cpu, mem, gpu = get_pod_used_resources(pod)
used_cpu += cpu
used_memory += mem
used_gpu += gpu
- free_cpu = parse_quantity(allocatable['cpu']) - used_cpu
- free_memory = parse_quantity(allocatable['memory']) - used_memory
+ free_cpu = quantity.parse_quantity(allocatable['cpu']) - used_cpu
+ free_memory = quantity.parse_quantity(allocatable['memory']) - used_memory
free_gpu = int(allocatable.get('nvidia.com/gpu', 0)) - used_gpu
node_info = {
@@ -180,20 +312,35 @@ data:
'cpu': free_cpu,
'memory': free_memory,
'gpu': free_gpu,
- 'node_labels': node_labels,
}
+ if node.metadata.labels:
+ node_info['node_labels'] = node.metadata.labels
nodes_info[node_name] = node_info
- print(
- f'Node: {node_name}, CPU: {free_cpu}, Memory: {free_memory}, GPU:'
- f' {free_gpu}, Topology: {node_topology_key(node_info)}'
+ logging.info(
+ 'Node: %s, CPU: %s, Memory: %s, GPU: %s, Topology: %s',
+ node_name,
+ free_cpu,
+ free_memory,
+ free_gpu,
+ node_topology_key(node_info),
)
return nodes_info
- def find_pod_gates(pods, prefix):
- """Finds pods with scheduling gates that starts with the prefix"""
+ def find_pod_gates(
+ pods: list[kubernetes.client.models.V1Pod], prefix: str
+ ) -> set[str]:
+ """Finds pods with scheduling gates that starts with the prefix.
+
+ Args:
+ pods: A list of V1Pod objects.
+ prefix: The prefix of the scheduling gate.
+
+ Returns:
+ A set of scheduling gate names.
+ """
s = set()
for pod in pods:
if pod.spec.scheduling_gates:
@@ -203,88 +350,89 @@ data:
return s
- def find_schedulable_pods(pods, gate_name):
- """Finds pods that can be scheduled."""
- pods_to_schedule = {}
+ def find_schedulable_pods(
+ job_name: str,
+ pods: list[kubernetes.client.models.V1Pod],
+ ) -> dict[str, dict[str, Any]]:
+ """Finds pods that can be scheduled for a given gate.
+
+ Args:
+ job_name: The name of the job.
+ pods: A list of V1Pod objects.
+ Returns:
+ A dict of pod names to pod infos for schedulable pods.
+ """
+ pods_to_schedule = {}
for pod in pods:
- if pod.spec.scheduling_gates:
- gates = pod.spec.scheduling_gates
- for gate in gates:
- if gate.name == gate_name:
- pod_name = pod.metadata.name
- pod_namespace = pod.metadata.namespace
-
- pod_index = None
- job_name = None
- if pod.metadata.labels is not None:
- if (
- 'batch.kubernetes.io/job-completion-index'
- in pod.metadata.labels
- ):
- pod_index = pod.metadata.labels[
- 'batch.kubernetes.io/job-completion-index'
- ]
- else:
- print('Unable to find index in metadata. Can not queue jobs')
-
- if 'job-name' in pod.metadata.labels:
- job_name = pod.metadata.labels['job-name']
- else:
- print('Unable to find job_name in metadata. Can not queue jobs')
- else:
- print('No labels on pod to extract job metadata from.')
-
- creation_time = None
- if pod.metadata.creation_timestamp is not None:
- creation_time = pod.metadata.creation_timestamp
- else:
- print(
- 'Unable to find creation_time in metadata. Can not queue jobs'
- )
-
- used_cpu = 0
- used_memory = 0
- used_gpu = 0
-
- for container in pod.spec.containers:
- requests = container.resources.requests or {}
- used_cpu += parse_quantity(requests.get('cpu', 0))
- used_memory += parse_quantity(requests.get('memory', 0))
- used_gpu += int(requests.get('nvidia.com/gpu', 0))
-
- pods_to_schedule[pod_name] = {
- 'name': pod_name,
- 'namespace': pod_namespace,
- 'index': pod_index,
- 'cpu': used_cpu,
- 'memory': used_memory,
- 'gpu': used_gpu,
- 'node_selector': pod.spec.node_selector,
- 'spec': pod.spec,
- 'metadata': pod.metadata,
- 'job_name': job_name,
- 'creation_time': creation_time
- }
+ pod_name = pod.metadata.name
+ pod_index = None
+ if JOB_COMPLETION_INDEX_LABEL_KEY in pod.metadata.labels:
+ pod_index = pod.metadata.labels[JOB_COMPLETION_INDEX_LABEL_KEY]
+ elif KUBEFLOW_REPLICA_INDEX_LABEL_KEY in pod.metadata.labels:
+ pod_index = pod.metadata.labels[KUBEFLOW_REPLICA_INDEX_LABEL_KEY]
+ else:
+ # it's not hard stop, we still can order pods by name
+ logging.info(
+ 'No index in pod %s metadata. Ordering derived from name.',
+ pod_name,
+ )
- print(
- f'Found schedulable pod: {pod_namespace}/{pod_name}, CPU:'
- f' {used_cpu}, Memory: {used_memory}, GPU: {used_gpu}'
- f' Index: {pod_index}'
- )
+ used_cpu, used_memory, used_gpu = 0, 0, 0
+ for container in pod.spec.containers:
+ requests = container.resources.requests or {}
+ used_cpu += quantity.parse_quantity(requests.get('cpu', 0))
+ used_memory += quantity.parse_quantity(requests.get('memory', 0))
+ used_gpu += int(requests.get('nvidia.com/gpu', 0))
+
+ pod_info = {
+ 'name': pod_name,
+ 'namespace': pod.metadata.namespace,
+ 'index': pod_index,
+ 'cpu': used_cpu,
+ 'memory': used_memory,
+ 'gpu': used_gpu,
+ 'spec': pod.spec,
+ 'metadata': pod.metadata,
+ 'job_name': job_name,
+ 'creation_time': pod.metadata.creation_timestamp,
+ }
+ if pod.spec.node_selector:
+ pod_info['node_selector'] = pod.spec.node_selector
+ pods_to_schedule[pod_name] = pod_info
+
+ logging.info(
+ 'Found schedulable pod: %s/%s, CPU: %s, Memory: %s, GPU: %s Index: %s',
+ pod.metadata.namespace,
+ pod_name,
+ used_cpu,
+ used_memory,
+ used_gpu,
+ pod_index,
+ )
return pods_to_schedule
- def can_schedule(node, pod):
- """Checks if a given pod can be scheduled on a given node."""
- node_selector = pod['node_selector']
- node_labels = node['node_labels']
+ def can_schedule(node: dict[str, Any], pod: dict[str, Any]) -> bool:
+ """Checks if a given pod can be scheduled on a given node.
+
+ The decision is based on resource node availability and pod requirements. The
+ node_selector is also checked, if the pod has it.
- if node_selector:
- for key, value in node_selector.items():
- if key not in node_labels or node_labels[key] != value:
- return False
+ Args:
+ node: The node to check if the pod can be scheduled on.
+ pod: The pod to check if it can be scheduled on the node.
+
+ Returns:
+ True if the pod can be scheduled on the node, False otherwise.
+ """
+ node_selector = pod.get('node_selector', {})
+ node_labels = node.get('node_labels', {})
+
+ for key, value in node_selector.items():
+ if key not in node_labels or node_labels[key] != value:
+ return False
return (
node['cpu'] >= pod['cpu']
@@ -293,11 +441,27 @@ data:
)
- def schedule_pod_on_node(v1, pod_name, pod_namespace, node_name, gate_name):
- """Schedules a pod on a given node."""
+ def schedule_pod_on_node(
+ v1: kubernetes.client.CoreV1Api,
+ pod_name: str,
+ pod_namespace: str,
+ node_name: str,
+ gate_name: str,
+ ) -> bool:
+ """Schedules a pod on a given node using affinity for direct assignment.
+
+ Args:
+ v1: The kubernetes client.
+ pod_name: The name of the pod to schedule.
+ pod_namespace: The namespace of the pod to schedule.
+ node_name: The name of the node to schedule the pod on.
+ gate_name: The name of the gate to remove from the pod.
+
+ Returns:
+ True if the pod was scheduled on the node, False otherwise.
+ """
try:
pod = v1.read_namespaced_pod(pod_name, pod_namespace)
-
if any(gate.name == gate_name for gate in pod.spec.scheduling_gates):
new_gates = [
gate for gate in pod.spec.scheduling_gates if gate.name != gate_name
@@ -319,16 +483,32 @@ data:
v1.replace_namespaced_pod(pod_name, pod_namespace, pod)
- print(f'Pod {pod_namespace}/{pod_name} scheduled on {node_name}')
- except ApiException as e:
- print(f'Exception when removing scheduling gate: {e}')
+ logging.info(
+ 'Pod %s/%s scheduled on %s', pod_namespace, pod_name, node_name
+ )
+ except kubernetes.client.rest.ApiException as e:
+ logging.exception(
+ 'Exception when removing pod %s scheduling gate: %s', pod_name, e
+ )
+ return False
+ return True
- def calculate_pods_assignment(sorted_nodes, sorted_pods):
- """Calculates the best assignment for pods."""
+ def calculate_pods_assignment(
+ sorted_nodes: list[dict[str, Any]], sorted_pods: list[dict[str, Any]]
+ ) -> list[int]:
+ """Gets the best pod assignment by minimizing the topology distance.
+
+ Args:
+ sorted_nodes: A list of sorted node infos.
+ sorted_pods: A list of sorted pod infos.
+
+ Returns:
+ A list of node indices that the pods should be assigned to.
+ """
assignment = [-i for i in reversed(range(1, len(sorted_pods) + 1))]
best_assignment = []
- minimum_distance = 1000000000
+ minimum_distance = float('inf')
while True:
all_ok = True
@@ -336,15 +516,18 @@ data:
while i >= 0 and all_ok:
assignment[i] += 1
if assignment[i] == len(sorted_nodes):
- break
+ break # no more nodes to consider
if assignment[i] >= 0 and can_schedule(
sorted_nodes[assignment[i]], sorted_pods[i]
):
i -= 1
elif i < len(assignment) - 1 and assignment[i] == assignment[i + 1] - 1:
- all_ok = False
+ all_ok = False # to ignore the half of triangle of all combinations
+
+ # we checked all combinations, return what found so far
if assignment[-1] == len(sorted_nodes):
break
+ # calculate total distance of the detected viable assignment
if all_ok:
new_distance = 0
for i in range(1, len(sorted_pods)):
@@ -358,60 +541,227 @@ data:
return best_assignment
- def schedule_pod_with_gate(v1, pods, gate):
- pods_to_schedule = find_schedulable_pods(pods, gate)
+ def list_pods(
+ v1: kubernetes.client.CoreV1Api, state_filter: str = None
+ ) -> list[kubernetes.client.models.V1Pod]:
+ """Lists pods in all namespaces.
+
+ Args:
+ v1: The kubernetes client.
+ state_filter: The pod state to filter by.
+
+ Returns:
+ A list of V1Pod objects.
+ """
+ pods = []
+ for n in v1.list_namespace().items:
+ namespace = n.metadata.name
+ for pod in v1.list_namespaced_pod(namespace).items:
+ if not state_filter or pod.status.phase == state_filter:
+ pods.append(pod)
+ return pods
+
+
+ def schedule_pod_with_gate(
+ v1: kubernetes.client.CoreV1Api,
+ gate_name: str,
+ ) -> None:
+ """Find and schedule pods with a given gate.
+
+ Args:
+ v1: k8s client
+ gate_name: name of the gate to schedule pods with (see k8s yaml, eg
+ schedulingGates=gke.io/topology-aware-auto-*`)
+ """
+ # query the pods again after the sleep, just in case not all gated pods
+ # are returned from previous query
+ pending_pods: list[kubernetes.client.models.V1Pod] = list_pods(v1, 'Pending')
+ pods_for_given_gate: list[kubernetes.client.models.V1Pod] = []
+ for p in pending_pods:
+ if p.spec.scheduling_gates:
+ if gate_name in {g.name for g in p.spec.scheduling_gates}:
+ pods_for_given_gate.append(p)
+
+ pods_per_job: dict[str, list[kubernetes.client.models.V1Pod]] = (
+ collections.defaultdict(list)
+ )
+ jobless_pod_names: set[str] = set(
+ [p.metadata.name for p in pods_for_given_gate]
+ )
+ for job_name_extractor in [
+ extract_job_name_label,
+ extract_kubeflow_job_name_label,
+ extract_owner_reference_uid,
+ extract_helm_job_name_label,
+ ]:
+ for job_name, job_pod_group in itertools.groupby(
+ pods_for_given_gate,
+ job_name_extractor,
+ ):
+ # ignore pod groups with not yet recognized job name
+ # (we have 3 iters/different ways to extract it, see outer loop)
+ if job_name == UNKNOWN_JOB_NAME:
+ continue
+ pod_group = list(job_pod_group)
+ # sanity check: all pods have creation_timestamp
+ if not all(p.metadata.creation_timestamp for p in pod_group):
+ logging.error(
+ 'No pod creation_timestamp in job %s. Job ignored.Pods: %s',
+ job_name,
+ ', '.join([p.metadata.name for p in pod_group]),
+ )
+ # exclude such bad pods from consideration
+ jobless_pod_names -= {p.metadata.name for p in pod_group}
+ continue
+ # sanity check: all pods have same tolerations
+ if not all_pods_have_same_tolerations(pod_group):
+ logging.error(
+ 'Pods in job %s have different tolerations. Job ignored.Pods: %s',
+ job_name,
+ ', '.join([p.metadata.name for p in pod_group]),
+ )
+ # exclude such bad pods from consideration
+ jobless_pod_names -= {p.metadata.name for p in pod_group}
+ continue
+ pod_names_in_group = set([p.metadata.name for p in pod_group])
+ # sanity check: if all job pods has been scheduled, we can ignore the job
+ if all(p not in jobless_pod_names for p in pod_names_in_group):
+ logging.info(
+ 'All %d pods of job %s are already scheduled. Job ignored.',
+ len(pod_group),
+ job_name,
+ )
+ continue
+
+ # all checks passed
+ jobless_pod_names -= pod_names_in_group
+ pods_per_job[job_name] = list(pod_group)
+ logging.info(
+ 'Found %d pods for job %s, created: %s',
+ len(pods_per_job[job_name]),
+ job_name,
+ pods_per_job[job_name][0].metadata.creation_timestamp,
+ )
- nodes = v1.list_node().items
- print(f'Pods to schedule: {len(pods_to_schedule)}')
- jobs = split_pods_based_on_jobs(pods_to_schedule.values())
- sorted_jobs = sorted(jobs, key=sort_jobs_by_time)
- for job in sorted_jobs:
- job_name = job[0].get('job_name')
- creation_time = job[0].get('creation_time')
- print(f'Attempting to schedule job: {job_name} created: {creation_time}')
+ # sanity check
+ if jobless_pod_names:
+ logging.warning(
+ 'Found %d pods without explicit job name, going to schedule all'
+ ' together. Pods: %s',
+ len(jobless_pod_names),
+ ', '.join(list(jobless_pod_names)),
+ )
+ # only for robustness we try to schedule them separately
+ pods_per_job['pods-without-explicit-job-name'] = [
+ p for p in pods_for_given_gate if p.metadata.name in jobless_pod_names
+ ]
+
+ logging.info('Start scheduling %d jobs', len(pods_per_job))
+
+ nodes: list[kubernetes.client.models.V1Node] = v1.list_node().items
+ # Sort job names based on the creation time of the first pod in each job group
+ sorted_job_names = sorted(
+ pods_per_job.keys(),
+ key=lambda job_name: pods_per_job[job_name][
+ 0
+ ].metadata.creation_timestamp,
+ )
+ running_pods: list[kubernetes.client.models.V1Pod] = list_pods(v1, 'Running')
+ logging.info('Already running pods number: %d', len(running_pods))
+ recently_scheduled_nodes_names = set()
+ for job_name in sorted_job_names:
+ job_pods: list[kubernetes.client.models.V1Pod] = pods_per_job.get(
+ job_name, []
+ )
+ logging.info(
+ 'Attempting to schedule job: %s with %s pods ', job_name, len(job_pods)
+ )
+ try:
+ schedulable_pods_infos: dict[str, Any] = find_schedulable_pods(
+ job_name, job_pods
+ )
- tolerated_taints = get_pods_taint_toleration(job)
- nodes_to_schedule = find_schedulable_nodes(nodes, pods, tolerated_taints)
+ # filter out nodes that are already occupied by running pods
+ nodes = [
+ node
+ for node in nodes
+ if node.metadata.name not in recently_scheduled_nodes_names
+ ]
+ nodes_infos: dict[str, Any] = find_schedulable_nodes(
+ nodes, running_pods, schedulable_pods_infos
+ )
- sorted_pods = sorted(job, key=pod_sorting_key)
- sorted_nodes = sorted(nodes_to_schedule.values(), key=node_topology_key)
+ if len(schedulable_pods_infos) > len(nodes_infos):
+ logging.error(
+ 'Not enough nodes available for job %s scheduling: %s nodes, %s'
+ ' pods. Skipping job.',
+ job_name,
+ len(nodes_infos),
+ len(schedulable_pods_infos),
+ )
+ continue
- print(f'Nodes to schedule: {len(nodes_to_schedule)}')
+ logging.info(
+ 'Available nodes for job %s scheduling: %s',
+ job_name,
+ len(nodes_infos),
+ )
- best_assignment = calculate_pods_assignment(sorted_nodes, sorted_pods)
+ sorted_pods = sorted(schedulable_pods_infos.values(), key=pod_sorting_key)
+ sorted_nodes = sorted(nodes_infos.values(), key=node_topology_key)
+ best_assignment = calculate_pods_assignment(sorted_nodes, sorted_pods)
- if not best_assignment:
- print(
- f'No scheduling for job: {job_name} with gate {gate} has been found.'
- ' Skipping job.'
+ if not best_assignment:
+ logging.error(
+ 'No scheduling for job %s with gate %s was found. Skipping job.',
+ job_name,
+ gate_name,
+ )
+ continue
+ logging.info(
+ 'Assignment found, scheduling %s with %s pods.',
+ job_name,
+ len(job_pods),
)
- continue
- else:
- print(f'Assignment found, scheduling {job_name} with {len(jobs)} pods.')
- for i in range(0, len(sorted_pods)):
- pod = sorted_pods[i]
- node = sorted_nodes[best_assignment[i]]
- schedule_pod_on_node(
- v1, pod['name'], pod['namespace'], node['name'], gate
+ for i, pod in enumerate(sorted_pods):
+ node = sorted_nodes[best_assignment[i]]
+ if not schedule_pod_on_node(
+ v1, pod['name'], pod['namespace'], node['name'], gate_name
+ ):
+ logging.error(
+ 'Failed to schedule pod %s on node %s. Skipping job %s',
+ pod['name'],
+ node['name'],
+ job_name,
+ )
+ break
+ # revisit: in case of failure clear up partially scheduled pods
+ recently_scheduled_nodes_names.add(node['name'])
+
+ except Exception as e: # pylint: disable=broad-except
+ logging.exception(
+ 'Exception when scheduling %s job,gate %s: %s', job_name, gate_name, e
)
- def run_scheduling_loop():
- """Runs scheduling."""
- parser = argparse.ArgumentParser(
- prog='schedule-workload.py')
+ def run_scheduling_loop() -> None:
+ """Runs scheduling.
+
+ This function runs a infinite loop that periodically schedules pods with
+ topology-aware scheduling gates.
+ """
+ parser = argparse.ArgumentParser(prog='schedule-workload.py')
parser.add_argument(
- '-g', '--gate',
- default='gke.io/topology-aware-auto-') # prefix of the schedule gate
+ '-g', '--gate', default='gke.io/topology-aware-auto-'
+ ) # prefix of the schedule gate
parser.add_argument(
- '-i', '--interval',
- default=1.0) # intervals (in seconds) between scheduling
+ '-i', '--interval', default=1.0
+ ) # intervals (in seconds) between scheduling
parser.add_argument(
- '--ignored-namespace',
- nargs='*',
- default=[]) # namespace to search for pods
+ '--ignored-namespace', nargs='*', default=[]
+ ) # namespace to search for pods
args = parser.parse_args()
try:
@@ -419,56 +769,49 @@ data:
except kubernetes.config.ConfigException:
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
-
- def list_pods():
- # filtering of namespace is not cached as namespaces could be
- # created and deleted
- namespaces = v1.list_namespace().items
- filtered_namespace_names = []
- for n in namespaces:
- if n.metadata.name not in args.ignored_namespace:
- filtered_namespace_names.append(n.metadata.name)
- pods = []
- for n in filtered_namespace_names:
- pods += v1.list_namespaced_pod(n).items
- return pods
-
+ # wait needed during container restart to allow previously scheduled pods
+ # to be visible on nodes and occupy resources for their correct estimates
+ logging.info('[Cool off] 90sec')
+ time.sleep(90.0)
try:
- t0 = time.time()
+ last_run_ts = time.time()
while True:
- interval = time.time() - t0
- if interval < args.interval:
- time.sleep(args.interval - interval)
- t0 = time.time()
+ time_since_prev_run = time.time() - last_run_ts
+ if time_since_prev_run < args.interval:
+ logging.info('[Cool off] %ssec', args.interval - time_since_prev_run)
+ time.sleep(args.interval - time_since_prev_run)
+ last_run_ts = time.time()
- pods = list_pods()
+ # Get pods to schedule
+ pods: list[kubernetes.client.models.V1Pod] = list_pods(v1, 'Pending')
gates = find_pod_gates(pods, args.gate)
- print(f"Found {len(pods)} pods and {len(gates)} gates")
+ logging.info('Found %s pending pods and %s gates', len(pods), len(gates))
- if len(gates) == 0:
+ if not gates:
# No pods to be scheduled
continue
- # sleep for one seconds, assuming that all pods within one group would be
+ # sleep for 5 seconds, assuming that all pods within one group would be
# all visible by then
+ logging.info('[Cool off] 5sec')
time.sleep(5.0)
for g in gates:
- print(f"scheduling pods with gate {g}")
- # query the pods again after the sleep, just in case not all gated pods
- # are returned from previous query
- pods = list_pods()
- schedule_pod_with_gate(v1, pods, g)
+ logging.info('Scheduling pods with gate %s', g)
+ schedule_pod_with_gate(v1, g)
+ logging.info('[Cool off] 60sec')
+ time.sleep(60.0) # cool off
- except ApiException as e:
- print(f'Exception when listing Kubernetes nodes or pods: {e}')
+ except kubernetes.client.rest.ApiException as e:
+ logging.exception('Exception when listing Kubernetes nodes or pods: %s', e)
if __name__ == '__main__':
run_scheduling_loop()
label-nodes-daemon.py: |
#!/usr/bin/env python
+ """Daemon to update Kubernetes node labels based on GCE VM metadata."""
# Copyright 2024 Google Inc. All Rights Reserved.
#
@@ -485,13 +828,14 @@ data:
# limitations under the License.
import time
+ from typing import Dict
from kubernetes import client
from kubernetes import config
import requests
- def update_node_labels(kube):
+ def update_node_labels(kube: client.CoreV1Api) -> None:
"""Updates Kubernetes node labels based on GCE VM metadata."""
node_name_url = "http://metadata.google.internal/computeMetadata/v1/instance/name"
metadata_url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/physical_host"
@@ -515,23 +859,23 @@ data:
cluster, rack, host = physical_host.split("/")[1:]
- node_labels = {
+ node_labels: Dict[str, str] = {
"topology.gke.io/cluster": cluster,
"topology.gke.io/rack": rack,
"topology.gke.io/host": host,
}
- kube.patch_node(node_name, {"metadata": {"labels": node_labels}})
+ kube.patch_node(node_name, {"metadata": {"labels": node_labels}}) # type: ignore
print(f"Updated labels on node {node_name}: {node_labels}")
if __name__ == "__main__":
# Kubernetes configuration
config.load_incluster_config()
- kube = client.CoreV1Api()
+ client = client.CoreV1Api()
while True:
print("Starting node update")
# Update node labels
- update_node_labels(kube)
+ update_node_labels(client)
time.sleep(600)
diff --git a/community/modules/compute/htcondor-execute-point/versions.tf b/community/modules/compute/htcondor-execute-point/versions.tf
index 104ca0ed17..fdde0fbb7f 100644
--- a/community/modules/compute/htcondor-execute-point/versions.tf
+++ b/community/modules/compute/htcondor-execute-point/versions.tf
@@ -25,6 +25,6 @@ terraform {
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:htcondor-execute-point/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:htcondor-execute-point/v1.44.0"
}
}
diff --git a/community/modules/compute/mig/versions.tf b/community/modules/compute/mig/versions.tf
index a06cc1f922..c92b1250a9 100644
--- a/community/modules/compute/mig/versions.tf
+++ b/community/modules/compute/mig/versions.tf
@@ -22,6 +22,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:mig/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:mig/v1.44.0"
}
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v5-node-group/versions.tf b/community/modules/compute/schedmd-slurm-gcp-v5-node-group/versions.tf
index 1de7fda7cd..dd24831a3b 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v5-node-group/versions.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v5-node-group/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v5-node-group/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v5-node-group/v1.44.0"
}
required_version = ">= 1.1"
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v5-partition/versions.tf b/community/modules/compute/schedmd-slurm-gcp-v5-partition/versions.tf
index c9490670fb..1749023916 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v5-partition/versions.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v5-partition/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v5-partition/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v5-partition/v1.44.0"
}
required_version = ">= 0.13.0"
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/README.md b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/README.md
index f4da0c0d1a..643ef9ad84 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/README.md
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/README.md
@@ -74,7 +74,7 @@ modules. For support with the underlying modules, see the instructions in the
| Name | Source | Version |
|------|--------|---------|
-| [slurm\_nodeset\_template](#module\_slurm\_nodeset\_template) | github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template | 6.8.6 |
+| [slurm\_nodeset\_template](#module\_slurm\_nodeset\_template) | ../../internal/slurm-gcp-v6/instance_template | n/a |
## Resources
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/main.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/main.tf
index 183a41e7e7..6dcc872cab 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/main.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/main.tf
@@ -56,7 +56,7 @@ locals {
}
module "slurm_nodeset_template" {
- source = "github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template?ref=6.8.6"
+ source = "../../internal/slurm-gcp-v6/instance_template"
project_id = var.project_id
region = var.region
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/versions.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/versions.tf
index 5c2c4db34e..781ca820ee 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/versions.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-dynamic/versions.tf
@@ -24,6 +24,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-nodeset-dynamic/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-nodeset-dynamic/v1.44.0"
}
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-tpu/versions.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-tpu/versions.tf
index 6ff683fe1f..361294d4d4 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-tpu/versions.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset-tpu/versions.tf
@@ -18,6 +18,6 @@ terraform {
required_version = ">= 1.3"
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-nodeset-tpu/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-nodeset-tpu/v1.44.0"
}
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/README.md b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/README.md
index 8ad8c304f0..297c40bb7a 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/README.md
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/README.md
@@ -160,7 +160,7 @@ No modules.
|------|-------------|------|---------|:--------:|
| [access\_config](#input\_access\_config) | Access configurations, i.e. IPs via which the VM instance can be accessed via the Internet. |
list(object({
nat_ip = string
network_tier = string
}))
| `[]` | no |
| [additional\_disks](#input\_additional\_disks) | Configurations of additional disks to be included on the partition nodes. | list(object({
disk_name = string
device_name = string
disk_size_gb = number
disk_type = string
disk_labels = map(string)
auto_delete = bool
boot = bool
}))
| `[]` | no |
-| [additional\_networks](#input\_additional\_networks) | Additional network interface details for GCE, if any. | list(object({
network = string
subnetwork = string
subnetwork_project = string
network_ip = string
nic_type = string
stack_type = string
queue_count = number
access_config = list(object({
nat_ip = string
network_tier = string
}))
ipv6_access_config = list(object({
network_tier = string
}))
alias_ip_range = list(object({
ip_cidr_range = string
subnetwork_range_name = string
}))
}))
| `[]` | no |
+| [additional\_networks](#input\_additional\_networks) | Additional network interface details for GCE, if any. | list(object({
network = optional(string)
subnetwork = string
subnetwork_project = optional(string)
network_ip = optional(string, "")
nic_type = optional(string)
stack_type = optional(string)
queue_count = optional(number)
access_config = optional(list(object({
nat_ip = string
network_tier = string
})), [])
ipv6_access_config = optional(list(object({
network_tier = string
})), [])
alias_ip_range = optional(list(object({
ip_cidr_range = string
subnetwork_range_name = string
})), [])
}))
| `[]` | no |
| [allow\_automatic\_updates](#input\_allow\_automatic\_updates) | If false, disables automatic system package updates on the created instances. This feature is
only available on supported images (or images derived from them). For more details, see
https://cloud.google.com/compute/docs/instances/create-hpc-vm#disable_automatic_updates | `bool` | `true` | no |
| [bandwidth\_tier](#input\_bandwidth\_tier) | Configures the network interface card and the maximum egress bandwidth for VMs.
- Setting `platform_default` respects the Google Cloud Platform API default values for networking.
- Setting `virtio_enabled` explicitly selects the VirtioNet network adapter.
- Setting `gvnic_enabled` selects the gVNIC network adapter (without Tier 1 high bandwidth).
- Setting `tier_1_enabled` selects both the gVNIC adapter and Tier 1 high bandwidth networking.
- Note: both gVNIC and Tier 1 networking require a VM image with gVNIC support as well as specific VM families and shapes.
- See [official docs](https://cloud.google.com/compute/docs/networking/configure-vm-with-high-bandwidth-configuration) for more details. | `string` | `"platform_default"` | no |
| [can\_ip\_forward](#input\_can\_ip\_forward) | Enable IP forwarding, for NAT instances for example. | `bool` | `false` | no |
@@ -179,6 +179,7 @@ No modules.
| [enable\_shielded\_vm](#input\_enable\_shielded\_vm) | Enable the Shielded VM configuration. Note: the instance image must support option. | `bool` | `false` | no |
| [enable\_smt](#input\_enable\_smt) | Enables Simultaneous Multi-Threading (SMT) on instance. | `bool` | `false` | no |
| [enable\_spot\_vm](#input\_enable\_spot\_vm) | Enable the partition to use spot VMs (https://cloud.google.com/spot-vms). | `bool` | `false` | no |
+| [future\_reservation](#input\_future\_reservation) | If set, will make use of the future reservation for the nodeset. Input can be either the future reservation name or its selfLink in the format 'projects/PROJECT\_ID/zones/ZONE/futureReservations/FUTURE\_RESERVATION\_NAME'.
See https://cloud.google.com/compute/docs/instances/future-reservations-overview | `string` | `""` | no |
| [guest\_accelerator](#input\_guest\_accelerator) | List of the type and count of accelerator cards attached to the instance. | list(object({
type = string,
count = number
}))
| `[]` | no |
| [instance\_image](#input\_instance\_image) | Defines the image that will be used in the Slurm node group VM instances.
Expected Fields:
name: The name of the image. Mutually exclusive with family.
family: The image family to use. Mutually exclusive with name.
project: The project where the image is hosted.
For more information on creating custom images that comply with Slurm on GCP
see the "Slurm on GCP Custom Images" section in docs/vm-images.md. | `map(string)` | {
"family": "slurm-gcp-6-8-hpc-rocky-linux-8",
"project": "schedmd-slurm-public"
}
| no |
| [instance\_image\_custom](#input\_instance\_image\_custom) | A flag that designates that the user is aware that they are requesting
to use a custom and potentially incompatible image for this Slurm on
GCP module.
If the field is set to false, only the compatible families and project
names will be accepted. The deployment will fail with any other image
family or name. If set to true, no checks will be done.
See: https://goo.gle/hpc-slurm-images | `bool` | `false` | no |
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/main.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/main.tf
index 3f283ffade..84cb60457a 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/main.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/main.tf
@@ -95,6 +95,7 @@ locals {
spot = var.enable_spot_vm
termination_action = try(var.spot_instance_config.termination_action, null)
reservation_name = local.reservation_name
+ future_reservation = local.future_reservation
maintenance_interval = var.maintenance_interval
instance_properties_json = jsonencode(var.instance_properties)
@@ -141,6 +142,17 @@ locals {
reservation_name = local.res_match.whole == null ? "" : "${local.res_prefix}${local.res_short_name}${local.res_suffix}"
}
+locals {
+ fr_match = regex("^(?Pprojects/(?P[a-z0-9-]+)/zones/(?P[a-z0-9-]+)/futureReservations/)?(?P[a-z0-9-]+)?$", var.future_reservation)
+
+ fr_name = local.fr_match.name
+ fr_project = coalesce(local.fr_match.project, var.project_id)
+ fr_zone = coalesce(local.fr_match.zone, var.zone)
+
+ future_reservation = var.future_reservation == "" ? "" : "projects/${local.fr_project}/zones/${local.fr_zone}/futureReservations/${local.fr_name}"
+}
+
+
# tflint-ignore: terraform_unused_declarations
data "google_compute_reservation" "reservation" {
count = length(local.reservation_name) > 0 ? 1 : 0
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/outputs.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/outputs.tf
index b957db13c1..ad78840a38 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/outputs.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/outputs.tf
@@ -54,4 +54,28 @@ output "nodeset" {
condition = !var.enable_placement || !var.dws_flex.enabled
error_message = "Cannot use DWS Flex with `enable_placement`."
}
+
+ precondition {
+ condition = var.reservation_name == "" || var.future_reservation == ""
+ error_message = "Cannot use reservations and future reservations in the same nodeset"
+ }
+
+ precondition {
+ condition = !var.enable_placement || var.future_reservation == ""
+ error_message = "Cannot use `enable_placement` with future reservations."
+ }
+
+ precondition {
+ condition = var.future_reservation == "" || length(var.zones) == 0
+ error_message = <<-EOD
+ If a future reservation is specified, `var.zones` should be empty.
+ EOD
+ }
+
+ precondition {
+ condition = var.future_reservation == "" || local.fr_zone == var.zone
+ error_message = <<-EOD
+ The zone of the deployment must match that of the future reservation"
+ EOD
+ }
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/variables.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/variables.tf
index c35faad4e9..3b7e342c32 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/variables.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/variables.tf
@@ -414,24 +414,24 @@ variable "additional_networks" {
description = "Additional network interface details for GCE, if any."
default = []
type = list(object({
- network = string
+ network = optional(string)
subnetwork = string
- subnetwork_project = string
- network_ip = string
- nic_type = string
- stack_type = string
- queue_count = number
- access_config = list(object({
+ subnetwork_project = optional(string)
+ network_ip = optional(string, "")
+ nic_type = optional(string)
+ stack_type = optional(string)
+ queue_count = optional(number)
+ access_config = optional(list(object({
nat_ip = string
network_tier = string
- }))
- ipv6_access_config = list(object({
+ })), [])
+ ipv6_access_config = optional(list(object({
network_tier = string
- }))
- alias_ip_range = list(object({
+ })), [])
+ alias_ip_range = optional(list(object({
ip_cidr_range = string
subnetwork_range_name = string
- }))
+ })), [])
}))
}
@@ -463,6 +463,21 @@ variable "reservation_name" {
}
}
+variable "future_reservation" {
+ description = <<-EOD
+ If set, will make use of the future reservation for the nodeset. Input can be either the future reservation name or its selfLink in the format 'projects/PROJECT_ID/zones/ZONE/futureReservations/FUTURE_RESERVATION_NAME'.
+ See https://cloud.google.com/compute/docs/instances/future-reservations-overview
+ EOD
+ type = string
+ default = ""
+ nullable = false
+
+ validation {
+ condition = length(regexall("^(projects/([a-z0-9-]+)/zones/([a-z0-9-]+)/futureReservations/([a-z0-9-]+))?$", var.future_reservation)) > 0 || length(regexall("^([a-z0-9-]+)$", var.future_reservation)) > 0
+ error_message = "Future reservation must be either the future reservation name or its selfLink in the format 'projects/PROJECT_ID/zone/ZONE/futureReservations/FUTURE_RESERVATION_NAME'."
+ }
+}
+
variable "maintenance_interval" {
description = <<-EOD
Sets the maintenance interval for instances in this nodeset.
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/versions.tf b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/versions.tf
index c1a45f1caa..85ef700e96 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/versions.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-nodeset/versions.tf
@@ -24,6 +24,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-nodeset/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-nodeset/v1.44.0"
}
}
diff --git a/community/modules/compute/schedmd-slurm-gcp-v6-partition/versions.tf b/community/modules/compute/schedmd-slurm-gcp-v6-partition/versions.tf
index 0346fb9040..7c71c60486 100644
--- a/community/modules/compute/schedmd-slurm-gcp-v6-partition/versions.tf
+++ b/community/modules/compute/schedmd-slurm-gcp-v6-partition/versions.tf
@@ -18,6 +18,6 @@ terraform {
required_version = ">= 1.3"
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-partition/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-partition/v1.44.0"
}
}
diff --git a/community/modules/database/slurm-cloudsql-federation/versions.tf b/community/modules/database/slurm-cloudsql-federation/versions.tf
index 890845fb7d..a958ad16a3 100644
--- a/community/modules/database/slurm-cloudsql-federation/versions.tf
+++ b/community/modules/database/slurm-cloudsql-federation/versions.tf
@@ -26,10 +26,10 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:slurm-cloudsql-federation/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:slurm-cloudsql-federation/v1.44.0"
}
provider_meta "google-beta" {
- module_name = "blueprints/terraform/hpc-toolkit:slurm-cloudsql-federation/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:slurm-cloudsql-federation/v1.44.0"
}
required_version = ">= 0.13.0"
diff --git a/community/modules/file-system/cloud-storage-bucket/versions.tf b/community/modules/file-system/cloud-storage-bucket/versions.tf
index 1e280fa3ab..72ccbf44bd 100644
--- a/community/modules/file-system/cloud-storage-bucket/versions.tf
+++ b/community/modules/file-system/cloud-storage-bucket/versions.tf
@@ -26,7 +26,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:cloud-storage-bucket/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:cloud-storage-bucket/v1.44.0"
}
required_version = ">= 0.14.0"
}
diff --git a/community/modules/file-system/nfs-server/versions.tf b/community/modules/file-system/nfs-server/versions.tf
index 6743a00352..2b8a1a8c21 100644
--- a/community/modules/file-system/nfs-server/versions.tf
+++ b/community/modules/file-system/nfs-server/versions.tf
@@ -30,7 +30,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:nfs-server/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:nfs-server/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/community/modules/files/fsi-montecarlo-on-batch/versions.tf b/community/modules/files/fsi-montecarlo-on-batch/versions.tf
index 02028038dc..5ba66bd4c5 100644
--- a/community/modules/files/fsi-montecarlo-on-batch/versions.tf
+++ b/community/modules/files/fsi-montecarlo-on-batch/versions.tf
@@ -35,9 +35,9 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:fsi-montecarlo-on-batch/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:fsi-montecarlo-on-batch/v1.44.0"
}
provider_meta "google-beta" {
- module_name = "blueprints/terraform/hpc-toolkit:fsi-montecarlo-on-batch/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:fsi-montecarlo-on-batch/v1.44.0"
}
}
diff --git a/community/modules/internal/slurm-gcp-v6/instance/README.md b/community/modules/internal/slurm-gcp-v6/instance/README.md
new file mode 100644
index 0000000000..fadb65bac6
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance/README.md
@@ -0,0 +1,109 @@
+# Module: Slurm Instance
+
+
+
+- [Module: Slurm Instance](#module-slurm-instance)
+ - [Overview](#overview)
+ - [Module API](#module-api)
+
+
+
+## Overview
+
+This module creates a [compute instance](../../../../docs/glossary.md#vm) from
+[instance template](../../../../docs/glossary.md#instance-template) for a
+[Slurm cluster](../slurm_cluster/README.md).
+
+> **NOTE:** This module is only intended to be used by Slurm modules. For
+> general usage, please consider using:
+>
+> - [terraform-google-modules/vm/google//modules/compute_instance](https://registry.terraform.io/modules/terraform-google-modules/vm/google/latest/submodules/compute_instance).
+> **WARNING:** The source image is not modified. Make sure to use a compatible
+> source image.
+
+## Module API
+
+For the terraform module API reference, please see
+[README_TF.md](./README_TF.md).
+
+
+Copyright (C) SchedMD LLC.
+Copyright 2018 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | ~> 1.0 |
+| [google](#requirement\_google) | >= 3.43 |
+| [local](#requirement\_local) | ~> 2.0 |
+| [null](#requirement\_null) | ~> 3.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [google](#provider\_google) | >= 3.43 |
+| [local](#provider\_local) | ~> 2.0 |
+| [null](#provider\_null) | ~> 3.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [google_compute_instance_from_template.slurm_instance](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance_from_template) | resource |
+| [null_resource.replace_trigger](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [google_compute_instance_template.base](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/compute_instance_template) | data source |
+| [google_compute_zones.available](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/compute_zones) | data source |
+| [local_file.startup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [access\_config](#input\_access\_config) | Access configurations, i.e. IPs via which the VM instance can be accessed via the Internet. | list(object({
nat_ip = string
network_tier = string
}))
| `[]` | no |
+| [add\_hostname\_suffix](#input\_add\_hostname\_suffix) | Adds a suffix to the hostname | `bool` | `true` | no |
+| [additional\_networks](#input\_additional\_networks) | Additional network interface details for GCE, if any. | list(object({
access_config = optional(list(object({
nat_ip = string
network_tier = string
})), [])
alias_ip_range = optional(list(object({
ip_cidr_range = string
subnetwork_range_name = string
})), [])
ipv6_access_config = optional(list(object({
network_tier = string
})), [])
network = optional(string)
network_ip = optional(string, "")
nic_type = optional(string)
queue_count = optional(number)
stack_type = optional(string)
subnetwork = optional(string)
subnetwork_project = optional(string)
}))
| `[]` | no |
+| [hostname](#input\_hostname) | Hostname of instances | `string` | `""` | no |
+| [hostname\_suffix\_separator](#input\_hostname\_suffix\_separator) | Separator character to compose hostname when add\_hostname\_suffix is set to true. | `string` | `"-"` | no |
+| [instance\_template](#input\_instance\_template) | Instance template self\_link used to create compute instances | `string` | n/a | yes |
+| [labels](#input\_labels) | Labels, provided as a map. Merged and takes precedence over labels on instance template | `map(string)` | `{}` | no |
+| [metadata](#input\_metadata) | Metadata, provided as a map | `map(string)` | `{}` | no |
+| [network](#input\_network) | Network to deploy to. Only one of network or subnetwork should be specified. | `string` | `""` | no |
+| [num\_instances](#input\_num\_instances) | Number of instances to create. This value is ignored if static\_ips is provided. | `number` | `1` | no |
+| [project\_id](#input\_project\_id) | The GCP project ID | `string` | `null` | no |
+| [region](#input\_region) | Region where the instances should be created. | `string` | `null` | no |
+| [replace\_trigger](#input\_replace\_trigger) | Trigger value to replace the instances. | `string` | `""` | no |
+| [slurm\_cluster\_name](#input\_slurm\_cluster\_name) | Cluster name, used for resource naming. | `string` | n/a | yes |
+| [slurm\_instance\_role](#input\_slurm\_instance\_role) | Slurm instance type. Must be one of: controller; login; compute. | `string` | `null` | no |
+| [static\_ips](#input\_static\_ips) | List of static IPs for VM instances | `list(string)` | `[]` | no |
+| [subnetwork](#input\_subnetwork) | Subnet to deploy to. Only one of network or subnetwork should be specified. | `string` | `""` | no |
+| [subnetwork\_project](#input\_subnetwork\_project) | The project that subnetwork belongs to | `string` | `null` | no |
+| [zone](#input\_zone) | Zone where the instances should be created. If not specified, instances will be spread across available zones in the region. | `string` | `null` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [available\_zones](#output\_available\_zones) | List of available zones in region |
+| [instances\_details](#output\_instances\_details) | List of all details for compute instances |
+| [instances\_self\_links](#output\_instances\_self\_links) | List of self-links for compute instances |
+| [names](#output\_names) | List of available zones in region |
+| [slurm\_instances](#output\_slurm\_instances) | List of all resource objects for compute instances |
+
diff --git a/community/modules/internal/slurm-gcp-v6/instance/main.tf b/community/modules/internal/slurm-gcp-v6/instance/main.tf
new file mode 100644
index 0000000000..749ca9d3b3
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance/main.tf
@@ -0,0 +1,153 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+##########
+# LOCALS #
+##########
+
+locals {
+ hostname = var.hostname == "" ? "default" : var.hostname
+ num_instances = length(var.static_ips) == 0 ? var.num_instances : length(var.static_ips)
+
+ # local.static_ips is the same as var.static_ips with a dummy element appended
+ # at the end of the list to work around "list does not have any elements so cannot
+ # determine type" error when var.static_ips is empty
+ static_ips = concat(var.static_ips, ["NOT_AN_IP"])
+}
+
+#################
+# LOCALS: SLURM #
+#################
+
+locals {
+ network_interfaces = [for index in range(local.num_instances) :
+ concat([
+ {
+ access_config = var.access_config
+ alias_ip_range = []
+ ipv6_access_config = []
+ network = var.network
+ network_ip = length(var.static_ips) == 0 ? "" : element(local.static_ips, index)
+ nic_type = null
+ queue_count = null
+ stack_type = null
+ subnetwork = var.subnetwork
+ subnetwork_project = var.subnetwork_project
+ }
+ ],
+ var.additional_networks
+ )
+ ]
+
+ slurm_instance_role = lower(var.slurm_instance_role)
+
+}
+
+################
+# DATA SOURCES #
+################
+
+data "google_compute_zones" "available" {
+ project = var.project_id
+ region = var.region
+}
+
+data "google_compute_instance_template" "base" {
+ project = var.project_id
+ name = var.instance_template
+}
+
+data "local_file" "startup" {
+ filename = "${path.module}/../instance_template/files/startup_sh_unlinted"
+}
+
+#############
+# INSTANCES #
+#############
+resource "null_resource" "replace_trigger" {
+ triggers = {
+ trigger = var.replace_trigger
+ }
+}
+
+resource "google_compute_instance_from_template" "slurm_instance" {
+ count = local.num_instances
+ name = var.add_hostname_suffix ? format("%s%s%s", local.hostname, var.hostname_suffix_separator, format("%03d", count.index + 1)) : local.hostname
+ project = var.project_id
+ zone = var.zone == null ? data.google_compute_zones.available.names[count.index % length(data.google_compute_zones.available.names)] : var.zone
+
+ allow_stopping_for_update = true
+
+ dynamic "network_interface" {
+ for_each = local.network_interfaces[count.index]
+ iterator = nic
+ content {
+ dynamic "access_config" {
+ for_each = nic.value.access_config
+ content {
+ nat_ip = access_config.value.nat_ip
+ network_tier = access_config.value.network_tier
+ }
+ }
+ dynamic "alias_ip_range" {
+ for_each = nic.value.alias_ip_range
+ content {
+ ip_cidr_range = alias_ip_range.value.ip_cidr_range
+ subnetwork_range_name = alias_ip_range.value.subnetwork_range_name
+ }
+ }
+ dynamic "ipv6_access_config" {
+ for_each = nic.value.ipv6_access_config
+ iterator = access_config
+ content {
+ network_tier = access_config.value.network_tier
+ }
+ }
+ network = nic.value.network
+ network_ip = nic.value.network_ip
+ nic_type = nic.value.nic_type
+ queue_count = nic.value.queue_count
+ subnetwork = nic.value.subnetwork
+ subnetwork_project = nic.value.subnetwork_project
+ }
+ }
+
+ source_instance_template = data.google_compute_instance_template.base.self_link
+
+ # Slurm
+ labels = merge(
+ data.google_compute_instance_template.base.labels,
+ var.labels,
+ {
+ slurm_cluster_name = var.slurm_cluster_name
+ slurm_instance_role = local.slurm_instance_role
+ },
+ )
+ metadata = merge(
+ data.google_compute_instance_template.base.metadata,
+ var.metadata,
+ {
+ slurm_cluster_name = var.slurm_cluster_name
+ slurm_instance_role = local.slurm_instance_role
+ startup-script = data.local_file.startup.content
+ },
+ )
+
+ lifecycle {
+ replace_triggered_by = [null_resource.replace_trigger.id]
+ }
+}
diff --git a/community/modules/internal/slurm-gcp-v6/instance/outputs.tf b/community/modules/internal/slurm-gcp-v6/instance/outputs.tf
new file mode 100644
index 0000000000..4eba78a7e8
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance/outputs.tf
@@ -0,0 +1,41 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+output "slurm_instances" {
+ description = "List of all resource objects for compute instances"
+ value = google_compute_instance_from_template.slurm_instance
+}
+
+output "instances_self_links" {
+ description = "List of self-links for compute instances"
+ value = google_compute_instance_from_template.slurm_instance[*].self_link
+}
+
+output "instances_details" {
+ description = "List of all details for compute instances"
+ value = google_compute_instance_from_template.slurm_instance[*]
+}
+
+output "available_zones" {
+ description = "List of available zones in region"
+ value = data.google_compute_zones.available.names
+}
+
+output "names" {
+ description = "List of available zones in region"
+ value = google_compute_instance_from_template.slurm_instance[*].name
+}
diff --git a/community/modules/internal/slurm-gcp-v6/instance/variables.tf b/community/modules/internal/slurm-gcp-v6/instance/variables.tf
new file mode 100644
index 0000000000..697d5c4b98
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance/variables.tf
@@ -0,0 +1,161 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+variable "project_id" {
+ type = string
+ description = "The GCP project ID"
+ default = null
+}
+
+variable "network" {
+ description = "Network to deploy to. Only one of network or subnetwork should be specified."
+ type = string
+ default = ""
+}
+
+variable "subnetwork" {
+ description = "Subnet to deploy to. Only one of network or subnetwork should be specified."
+ type = string
+ default = ""
+}
+
+variable "subnetwork_project" {
+ description = "The project that subnetwork belongs to"
+ type = string
+ default = null
+}
+
+variable "hostname" {
+ description = "Hostname of instances"
+ type = string
+ default = ""
+}
+
+variable "add_hostname_suffix" {
+ description = "Adds a suffix to the hostname"
+ type = bool
+ default = true
+}
+
+variable "additional_networks" {
+ description = "Additional network interface details for GCE, if any."
+ default = []
+ type = list(object({
+ access_config = optional(list(object({
+ nat_ip = string
+ network_tier = string
+ })), [])
+ alias_ip_range = optional(list(object({
+ ip_cidr_range = string
+ subnetwork_range_name = string
+ })), [])
+ ipv6_access_config = optional(list(object({
+ network_tier = string
+ })), [])
+ network = optional(string)
+ network_ip = optional(string, "")
+ nic_type = optional(string)
+ queue_count = optional(number)
+ stack_type = optional(string)
+ subnetwork = optional(string)
+ subnetwork_project = optional(string)
+ }))
+ nullable = false
+}
+
+variable "static_ips" {
+ description = "List of static IPs for VM instances"
+ type = list(string)
+ default = []
+}
+
+variable "access_config" {
+ description = "Access configurations, i.e. IPs via which the VM instance can be accessed via the Internet."
+ type = list(object({
+ nat_ip = string
+ network_tier = string
+ }))
+ default = []
+}
+
+variable "num_instances" {
+ description = "Number of instances to create. This value is ignored if static_ips is provided."
+ type = number
+ default = 1
+}
+
+variable "instance_template" {
+ description = "Instance template self_link used to create compute instances"
+ type = string
+}
+
+variable "region" {
+ description = "Region where the instances should be created."
+ type = string
+ default = null
+}
+
+variable "zone" {
+ description = "Zone where the instances should be created. If not specified, instances will be spread across available zones in the region."
+ type = string
+ default = null
+}
+
+variable "hostname_suffix_separator" {
+ description = "Separator character to compose hostname when add_hostname_suffix is set to true."
+ type = string
+ default = "-"
+}
+
+variable "metadata" {
+ type = map(string)
+ description = "Metadata, provided as a map"
+ default = {}
+}
+
+variable "labels" {
+ type = map(string)
+ description = "Labels, provided as a map. Merged and takes precedence over labels on instance template"
+ default = {}
+}
+
+#########
+# SLURM #
+#########
+
+variable "slurm_instance_role" {
+ description = "Slurm instance type. Must be one of: controller; login; compute."
+ type = string
+ default = null
+
+ validation {
+ condition = contains(["controller", "login", "compute"], lower(var.slurm_instance_role))
+ error_message = "Must be one of: controller; login; compute."
+ }
+}
+
+variable "slurm_cluster_name" {
+ description = "Cluster name, used for resource naming."
+ type = string
+}
+
+
+variable "replace_trigger" {
+ description = "Trigger value to replace the instances."
+ type = string
+ default = ""
+}
diff --git a/community/modules/internal/slurm-gcp-v6/instance/versions.tf b/community/modules/internal/slurm-gcp-v6/instance/versions.tf
new file mode 100644
index 0000000000..293a1ef8ca
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance/versions.tf
@@ -0,0 +1,35 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ * Copyright 2018 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+terraform {
+ required_version = "~> 1.0"
+
+ required_providers {
+ google = {
+ source = "hashicorp/google"
+ version = ">= 3.43"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = "~> 2.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = "~> 3.0"
+ }
+ }
+}
diff --git a/community/modules/internal/slurm-gcp-v6/instance_template/README.md b/community/modules/internal/slurm-gcp-v6/instance_template/README.md
new file mode 100644
index 0000000000..8cef4311ca
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance_template/README.md
@@ -0,0 +1,80 @@
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | ~> 1.0 |
+| [local](#requirement\_local) | ~> 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [local](#provider\_local) | ~> 2.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [instance\_template](#module\_instance\_template) | ../internal_instance_template | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [local_file.startup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [access\_config](#input\_access\_config) | Access configurations, i.e. IPs via which the VM instance can be accessed via the Internet. | list(object({
nat_ip = string
network_tier = string
}))
| `[]` | no |
+| [additional\_disks](#input\_additional\_disks) | List of maps of disks. | list(object({
disk_name = string
device_name = string
disk_type = string
disk_size_gb = number
disk_labels = map(string)
auto_delete = bool
boot = bool
}))
| `[]` | no |
+| [additional\_networks](#input\_additional\_networks) | Additional network interface details for GCE, if any. | list(object({
network = string
subnetwork = string
subnetwork_project = string
network_ip = string
nic_type = string
access_config = list(object({
nat_ip = string
network_tier = string
}))
ipv6_access_config = list(object({
network_tier = string
}))
}))
| `[]` | no |
+| [bandwidth\_tier](#input\_bandwidth\_tier) | Tier 1 bandwidth increases the maximum egress bandwidth for VMs.
Using the `virtio_enabled` setting will only enable VirtioNet and will not enable TIER\_1.
Using the `tier_1_enabled` setting will enable both gVNIC and TIER\_1 higher bandwidth networking.
Using the `gvnic_enabled` setting will only enable gVNIC and will not enable TIER\_1.
Note that TIER\_1 only works with specific machine families & shapes and must be using an image that supports gVNIC. See [official docs](https://cloud.google.com/compute/docs/networking/configure-vm-with-high-bandwidth-configuration) for more details. | `string` | `"platform_default"` | no |
+| [can\_ip\_forward](#input\_can\_ip\_forward) | Enable IP forwarding, for NAT instances for example. | `bool` | `false` | no |
+| [disable\_smt](#input\_disable\_smt) | Disables Simultaneous Multi-Threading (SMT) on instance. | `bool` | `false` | no |
+| [disk\_auto\_delete](#input\_disk\_auto\_delete) | Whether or not the boot disk should be auto-deleted. | `bool` | `true` | no |
+| [disk\_labels](#input\_disk\_labels) | Labels to be assigned to boot disk, provided as a map. | `map(string)` | `{}` | no |
+| [disk\_size\_gb](#input\_disk\_size\_gb) | Boot disk size in GB. | `number` | `100` | no |
+| [disk\_type](#input\_disk\_type) | Boot disk type, can be either pd-ssd, local-ssd, or pd-standard. | `string` | `"pd-standard"` | no |
+| [enable\_confidential\_vm](#input\_enable\_confidential\_vm) | Enable the Confidential VM configuration. Note: the instance image must support option. | `bool` | `false` | no |
+| [enable\_oslogin](#input\_enable\_oslogin) | Enables Google Cloud os-login for user login and authentication for VMs.
See https://cloud.google.com/compute/docs/oslogin | `bool` | `true` | no |
+| [enable\_shielded\_vm](#input\_enable\_shielded\_vm) | Enable the Shielded VM configuration. Note: the instance image must support option. | `bool` | `false` | no |
+| [gpu](#input\_gpu) | GPU information. Type and count of GPU to attach to the instance template. See
https://cloud.google.com/compute/docs/gpus more details.
- type : the GPU type
- count : number of GPUs | object({
type = string
count = number
})
| `null` | no |
+| [labels](#input\_labels) | Labels, provided as a map | `map(string)` | `{}` | no |
+| [machine\_type](#input\_machine\_type) | Machine type to create. | `string` | `"n1-standard-1"` | no |
+| [metadata](#input\_metadata) | Metadata, provided as a map. | `map(string)` | `{}` | no |
+| [min\_cpu\_platform](#input\_min\_cpu\_platform) | Specifies a minimum CPU platform. Applicable values are the friendly names of
CPU platforms, such as Intel Haswell or Intel Skylake. See the complete list:
https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform | `string` | `null` | no |
+| [name\_prefix](#input\_name\_prefix) | Prefix for template resource. | `string` | `"default"` | no |
+| [network](#input\_network) | The name or self\_link of the network to attach this interface to. Use network
attribute for Legacy or Auto subnetted networks and subnetwork for custom
subnetted networks. | `string` | `null` | no |
+| [network\_ip](#input\_network\_ip) | Private IP address to assign to the instance if desired. | `string` | `""` | no |
+| [on\_host\_maintenance](#input\_on\_host\_maintenance) | Instance availability Policy | `string` | `"MIGRATE"` | no |
+| [preemptible](#input\_preemptible) | Allow the instance to be preempted. | `bool` | `false` | no |
+| [project\_id](#input\_project\_id) | Project ID to create resources in. | `string` | n/a | yes |
+| [region](#input\_region) | Region where the instance template should be created. | `string` | `null` | no |
+| [resource\_policies](#input\_resource\_policies) | A list of self\_links of resource policies to attach to the instance.
Currently a max of 1 resource policy is supported. | `list(string)` | `null` | no |
+| [service\_account](#input\_service\_account) | Service account to attach to the instances. See
'main.tf:local.service\_account' for the default. | object({
email = string
scopes = set(string)
})
| `null` | no |
+| [shielded\_instance\_config](#input\_shielded\_instance\_config) | Shielded VM configuration for the instance. Note: not used unless
enable\_shielded\_vm is 'true'.
- enable\_integrity\_monitoring : Compare the most recent boot measurements to the
integrity policy baseline and return a pair of pass/fail results depending on
whether they match or not.
- enable\_secure\_boot : Verify the digital signature of all boot components, and
halt the boot process if signature verification fails.
- enable\_vtpm : Use a virtualized trusted platform module, which is a
specialized computer chip you can use to encrypt objects like keys and
certificates. | object({
enable_integrity_monitoring = bool
enable_secure_boot = bool
enable_vtpm = bool
})
| {
"enable_integrity_monitoring": true,
"enable_secure_boot": true,
"enable_vtpm": true
}
| no |
+| [slurm\_bucket\_path](#input\_slurm\_bucket\_path) | GCS Bucket URI of Slurm cluster file storage. | `string` | n/a | yes |
+| [slurm\_cluster\_name](#input\_slurm\_cluster\_name) | Cluster name, used for resource naming. | `string` | n/a | yes |
+| [slurm\_instance\_role](#input\_slurm\_instance\_role) | Slurm instance type. Must be one of: controller; login; compute; or null. | `string` | `null` | no |
+| [source\_image](#input\_source\_image) | Source disk image. | `string` | `""` | no |
+| [source\_image\_family](#input\_source\_image\_family) | Source image family. | `string` | `""` | no |
+| [source\_image\_project](#input\_source\_image\_project) | Project where the source image comes from. If it is not provided, the provider project is used. | `string` | `""` | no |
+| [spot](#input\_spot) | Provision as a SPOT preemptible instance.
See https://cloud.google.com/compute/docs/instances/spot for more details. | `bool` | `false` | no |
+| [subnetwork](#input\_subnetwork) | The name of the subnetwork to attach this interface to. The subnetwork must
exist in the same region this instance will be created in. Either network or
subnetwork must be provided. | `string` | `null` | no |
+| [subnetwork\_project](#input\_subnetwork\_project) | The ID of the project in which the subnetwork belongs. If it is not provided, the provider project is used. | `string` | `null` | no |
+| [tags](#input\_tags) | Network tag list. | `list(string)` | `[]` | no |
+| [termination\_action](#input\_termination\_action) | Which action to take when Compute Engine preempts the VM. Value can be: 'STOP', 'DELETE'. The default value is 'STOP'.
See https://cloud.google.com/compute/docs/instances/spot for more details. | `string` | `"STOP"` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [instance\_template](#output\_instance\_template) | Instance template details |
+| [name](#output\_name) | Name of instance template |
+| [self\_link](#output\_self\_link) | Self\_link of instance template |
+| [service\_account](#output\_service\_account) | Service account object, includes email and scopes. |
+| [tags](#output\_tags) | Tags that will be associated with instance(s) |
+
diff --git a/community/modules/internal/slurm-gcp-v6/instance_template/files/startup_sh_unlinted b/community/modules/internal/slurm-gcp-v6/instance_template/files/startup_sh_unlinted
new file mode 100755
index 0000000000..9918411af5
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance_template/files/startup_sh_unlinted
@@ -0,0 +1,147 @@
+#!/bin/bash
+# Copyright (C) SchedMD LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+SLURM_DIR=/slurm
+FLAGFILE=$SLURM_DIR/slurm_configured_do_not_remove
+SCRIPTS_DIR=$SLURM_DIR/scripts
+if [[ -z "$HOME" ]]; then
+ # google-startup-scripts.service lacks environment variables
+ HOME="$(getent passwd "$(whoami)" | cut -d: -f6)"
+fi
+
+METADATA_SERVER="metadata.google.internal"
+URL="http://$METADATA_SERVER/computeMetadata/v1"
+CURL="curl -sS --fail --header Metadata-Flavor:Google"
+
+PING_METADATA="ping -q -w1 -c1 $METADATA_SERVER"
+echo "INFO: $PING_METADATA"
+for i in $(seq 10); do
+ [ $i -gt 1 ] && sleep 5;
+ $PING_METADATA > /dev/null && s=0 && break || s=$?;
+ echo "ERROR: Failed to contact metadata server, will retry"
+done
+if [ $s -ne 0 ]; then
+ echo "ERROR: Unable to contact metadata server, aborting"
+ wall -n '*** Slurm setup failed in the startup script! see `journalctl -u google-startup-scripts` ***'
+ exit 1
+else
+ echo "INFO: Successfully contacted metadata server"
+fi
+
+PING_GOOGLE="ping -q -w1 -c1 8.8.8.8"
+echo "INFO: $PING_GOOGLE"
+for i in $(seq 5); do
+ [ $i -gt 1 ] && sleep 2;
+ $PING_GOOGLE > /dev/null && s=0 && break || s=$?;
+ echo "failed to ping Google DNS, will retry"
+done
+if [ $s -ne 0 ]; then
+ echo "WARNING: No internet access detected"
+else
+ echo "INFO: Internet access detected"
+fi
+
+mkdir -p $SCRIPTS_DIR
+UNIVERSE_DOMAIN="$($CURL $URL/instance/attributes/universe_domain)"
+BUCKET="$($CURL $URL/instance/attributes/slurm_bucket_path)"
+if [[ -z $BUCKET ]]; then
+ echo "ERROR: No bucket path detected."
+ exit 1
+fi
+
+SCRIPTS_ZIP="$HOME/slurm-gcp-scripts.zip"
+export CLOUDSDK_CORE_UNIVERSE_DOMAIN="$UNIVERSE_DOMAIN"
+until gcloud storage cp "$BUCKET/slurm-gcp-devel.zip" "$SCRIPTS_ZIP"; do
+ echo "WARN: Could not download SlurmGCP scripts, retrying in 5 seconds."
+ sleep 5
+done
+unzip -o "$SCRIPTS_ZIP" -d "$SCRIPTS_DIR"
+rm -rf "$SCRIPTS_ZIP"
+
+#temporary hack to not make the script fail on TPU vm
+chown slurm:slurm -R "$SCRIPTS_DIR" || true
+chmod 700 -R "$SCRIPTS_DIR"
+
+
+if [ -f $FLAGFILE ]; then
+ echo "WARNING: Slurm was previously configured, quitting"
+ exit 0
+fi
+touch $FLAGFILE
+
+function tpu_setup {
+ #allow the following command to fail, as this attribute does not exist for regular nodes
+ docker_image=$($CURL $URL/instance/attributes/slurm_docker_image 2> /dev/null || true)
+ if [ -z $docker_image ]; then #Not a tpu node, do not do anything
+ return
+ fi
+ if [ "$OS_ENV" == "slurm_container" ]; then #Already inside the slurm container, we should continue starting
+ return
+ fi
+
+ #given a input_string like "WORKER_0:Joseph;WORKER_1:richard;WORKER_2:edward;WORKER_3:john" and a number 1, this function will print richard
+ parse_metadata() {
+ local number=$1
+ local input_string=$2
+ local word=$(echo "$input_string" | awk -v n="$number" -F ':|;' '{ for (i = 1; i <= NF; i+=2) if ($(i) == "WORKER_"n) print $(i+1) }')
+ echo "$word"
+ }
+
+ input_string=$($CURL $URL/instance/attributes/slurm_names)
+ worker_id=$($CURL $URL/instance/attributes/tpu-env | awk '/WORKER_ID/ {print $2}' | tr -d \')
+ real_name=$(parse_metadata $worker_id $input_string)
+
+ #Prepare to docker pull with gcloud
+ mkdir -p /root/.docker
+ cat << EOF > /root/.docker/config.json
+{
+ "credHelpers": {
+ "gcr.io": "gcloud",
+ "us-docker.pkg.dev": "gcloud"
+ }
+}
+EOF
+ #cgroup detection
+ CGV=1
+ CGROUP_FLAGS="-v /sys/fs/cgroup:/sys/fs/cgroup:rw"
+ if [ -f /sys/fs/cgroup/cgroup.controllers ]; then #CGV2
+ CGV=2
+ fi
+ if [ $CGV == 2 ]; then
+ CGROUP_FLAGS="--cgroup-parent=docker.slice --cgroupns=private --tmpfs /run --tmpfs /run/lock --tmpfs /tmp"
+ if [ ! -f /etc/systemd/system/docker.slice ]; then #In case that there is no slice prepared for hosting the containers create it
+ printf "[Unit]\nDescription=docker slice\nBefore=slices.target\n[Slice]\nCPUAccounting=true\nMemoryAccounting=true" > /etc/systemd/system/docker.slice
+ systemctl start docker.slice
+ fi
+ fi
+ #for the moment always use --privileged, as systemd might not work properly otherwise
+ TPU_FLAGS="--privileged"
+ # TPU_FLAGS="--cap-add SYS_RESOURCE --device /dev/accel0 --device /dev/accel1 --device /dev/accel2 --device /dev/accel3"
+ # if [ $CGV == 2 ]; then #In case that we are in CGV2 for systemd to work correctly for the moment we go with privileged
+ # TPU_FLAGS="--privileged"
+ # fi
+
+ docker run -d $CGROUP_FLAGS $TPU_FLAGS --net=host --name=slurmd --hostname=$real_name --entrypoint=/usr/bin/systemd --restart unless-stopped $docker_image
+ exit 0
+}
+
+tpu_setup #will do nothing for normal nodes or the container spawned inside TPU
+
+echo "INFO: Running python cluster setup script"
+SETUP_SCRIPT_FILE=$SCRIPTS_DIR/setup.py
+chmod +x $SETUP_SCRIPT_FILE
+exec $SETUP_SCRIPT_FILE
diff --git a/community/modules/internal/slurm-gcp-v6/instance_template/main.tf b/community/modules/internal/slurm-gcp-v6/instance_template/main.tf
new file mode 100644
index 0000000000..64c4caa0a6
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance_template/main.tf
@@ -0,0 +1,161 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##########
+# LOCALS #
+##########
+
+locals {
+ additional_disks = [
+ for disk in var.additional_disks : {
+ disk_name = disk.disk_name
+ device_name = disk.device_name
+ auto_delete = disk.auto_delete
+ boot = disk.boot
+ disk_size_gb = disk.disk_size_gb
+ disk_type = disk.disk_type
+ disk_labels = merge(
+ disk.disk_labels,
+ {
+ slurm_cluster_name = var.slurm_cluster_name
+ slurm_instance_role = local.slurm_instance_role
+ },
+ )
+ }
+ ]
+
+ service_account = {
+ email = try(var.service_account.email, null)
+ scopes = try(var.service_account.scopes, ["https://www.googleapis.com/auth/cloud-platform"])
+ }
+
+ source_image_family = (
+ var.source_image_family != "" && var.source_image_family != null
+ ? var.source_image_family
+ : "slurm-gcp-6-8-hpc-rocky-linux-8"
+ )
+ source_image_project = (
+ var.source_image_project != "" && var.source_image_project != null
+ ? var.source_image_project
+ : "projects/schedmd-slurm-public/global/images/family"
+ )
+
+ source_image = (
+ var.source_image != null
+ ? var.source_image
+ : ""
+ )
+
+ slurm_instance_role = var.slurm_instance_role != null ? lower(var.slurm_instance_role) : null
+
+ name_prefix = (
+ local.slurm_instance_role != null
+ ? "${var.slurm_cluster_name}-${local.slurm_instance_role}-${var.name_prefix}"
+ : "${var.slurm_cluster_name}-${var.name_prefix}"
+ )
+
+ total_egress_bandwidth_tier = var.bandwidth_tier == "tier_1_enabled" ? "TIER_1" : "DEFAULT"
+
+ nic_type_map = {
+ platform_default = null
+ virtio_enabled = "VIRTIO_NET"
+ gvnic_enabled = "GVNIC"
+ tier_1_enabled = "GVNIC"
+ }
+ nic_type = lookup(local.nic_type_map, var.bandwidth_tier, null)
+}
+
+########
+# DATA #
+########
+
+data "local_file" "startup" {
+ filename = "${path.module}/files/startup_sh_unlinted"
+}
+
+############
+# TEMPLATE #
+############
+
+module "instance_template" {
+ source = "../internal_instance_template"
+
+ project_id = var.project_id
+
+ # Network
+ can_ip_forward = var.can_ip_forward
+ network_ip = var.network_ip
+ network = var.network
+ nic_type = local.nic_type
+ region = var.region
+ subnetwork_project = var.subnetwork_project
+ subnetwork = var.subnetwork
+ tags = var.tags
+ total_egress_bandwidth_tier = local.total_egress_bandwidth_tier
+ additional_networks = var.additional_networks
+ access_config = var.access_config
+
+ # Instance
+ machine_type = var.machine_type
+ min_cpu_platform = var.min_cpu_platform
+ name_prefix = local.name_prefix
+ gpu = var.gpu
+ service_account = local.service_account
+ shielded_instance_config = var.shielded_instance_config
+ threads_per_core = var.disable_smt ? 1 : null
+ enable_confidential_vm = var.enable_confidential_vm
+ enable_shielded_vm = var.enable_shielded_vm
+ preemptible = var.preemptible
+ spot = var.spot
+ on_host_maintenance = var.on_host_maintenance
+ labels = merge(
+ var.labels,
+ {
+ slurm_cluster_name = var.slurm_cluster_name
+ slurm_instance_role = local.slurm_instance_role
+ },
+ )
+ instance_termination_action = var.termination_action
+
+ # Metadata
+ startup_script = data.local_file.startup.content
+ metadata = merge(
+ var.metadata,
+ {
+ enable-oslogin = upper(var.enable_oslogin)
+ slurm_bucket_path = var.slurm_bucket_path
+ slurm_cluster_name = var.slurm_cluster_name
+ slurm_instance_role = local.slurm_instance_role
+ },
+ )
+
+ # Image
+ source_image_project = local.source_image_project
+ source_image_family = local.source_image_family
+ source_image = local.source_image
+
+ # Disk
+ disk_type = var.disk_type
+ disk_size_gb = var.disk_size_gb
+ auto_delete = var.disk_auto_delete
+ disk_labels = merge(
+ {
+ slurm_cluster_name = var.slurm_cluster_name
+ slurm_instance_role = local.slurm_instance_role
+ },
+ var.disk_labels,
+ )
+ additional_disks = local.additional_disks
+ resource_policies = var.resource_policies
+}
diff --git a/community/modules/internal/slurm-gcp-v6/instance_template/outputs.tf b/community/modules/internal/slurm-gcp-v6/instance_template/outputs.tf
new file mode 100644
index 0000000000..f9d985065d
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance_template/outputs.tf
@@ -0,0 +1,38 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+output "instance_template" {
+ description = "Instance template details"
+ value = module.instance_template
+}
+
+output "self_link" {
+ description = "Self_link of instance template"
+ value = module.instance_template.self_link
+}
+
+output "name" {
+ description = "Name of instance template"
+ value = module.instance_template.name
+}
+
+output "tags" {
+ description = "Tags that will be associated with instance(s)"
+ value = module.instance_template.tags
+}
+
+output "service_account" {
+ description = "Service account object, includes email and scopes."
+ value = module.instance_template.service_account
+}
diff --git a/community/modules/internal/slurm-gcp-v6/instance_template/variables.tf b/community/modules/internal/slurm-gcp-v6/instance_template/variables.tf
new file mode 100644
index 0000000000..e8393e9654
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/instance_template/variables.tf
@@ -0,0 +1,386 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###########
+# GENERAL #
+###########
+
+variable "project_id" {
+ type = string
+ description = "Project ID to create resources in."
+}
+
+variable "on_host_maintenance" {
+ type = string
+ description = "Instance availability Policy"
+ default = "MIGRATE"
+}
+
+variable "labels" {
+ type = map(string)
+ description = "Labels, provided as a map"
+ default = {}
+}
+
+variable "enable_oslogin" {
+ type = bool
+ description = <
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >=0.13.0 |
+| [google](#requirement\_google) | >= 3.88 |
+| [google-beta](#requirement\_google-beta) | >= 6.13.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [google](#provider\_google) | >= 3.88 |
+| [google-beta](#provider\_google-beta) | >= 6.13.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [google-beta_google_compute_instance_template.tpl](https://registry.terraform.io/providers/hashicorp/google-beta/latest/docs/resources/google_compute_instance_template) | resource |
+| [google_project.this](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/project) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [access\_config](#input\_access\_config) | Access configurations, i.e. IPs via which the VM instance can be accessed via the Internet. | list(object({
nat_ip = string
network_tier = string
}))
| `[]` | no |
+| [additional\_disks](#input\_additional\_disks) | List of maps of additional disks. See https://www.terraform.io/docs/providers/google/r/compute_instance_template#disk_name | list(object({
disk_name = string
device_name = string
auto_delete = bool
boot = bool
disk_size_gb = number
disk_type = string
disk_labels = map(string)
}))
| `[]` | no |
+| [additional\_networks](#input\_additional\_networks) | Additional network interface details for GCE, if any. | list(object({
network = string
subnetwork = string
subnetwork_project = string
network_ip = string
nic_type = string
access_config = list(object({
nat_ip = string
network_tier = string
}))
ipv6_access_config = list(object({
network_tier = string
}))
}))
| `[]` | no |
+| [alias\_ip\_range](#input\_alias\_ip\_range) | An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.
ip\_cidr\_range: The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.
subnetwork\_range\_name: The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used. | object({
ip_cidr_range = string
subnetwork_range_name = string
})
| `null` | no |
+| [auto\_delete](#input\_auto\_delete) | Whether or not the boot disk should be auto-deleted | `string` | `"true"` | no |
+| [automatic\_restart](#input\_automatic\_restart) | (Optional) Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). | `bool` | `true` | no |
+| [can\_ip\_forward](#input\_can\_ip\_forward) | Enable IP forwarding, for NAT instances for example | `string` | `"false"` | no |
+| [disk\_encryption\_key](#input\_disk\_encryption\_key) | The id of the encryption key that is stored in Google Cloud KMS to use to encrypt all the disks on this instance | `string` | `null` | no |
+| [disk\_labels](#input\_disk\_labels) | Labels to be assigned to boot disk, provided as a map | `map(string)` | `{}` | no |
+| [disk\_size\_gb](#input\_disk\_size\_gb) | Boot disk size in GB | `string` | `"100"` | no |
+| [disk\_type](#input\_disk\_type) | Boot disk type, can be either pd-ssd, local-ssd, or pd-standard | `string` | `"pd-standard"` | no |
+| [enable\_confidential\_vm](#input\_enable\_confidential\_vm) | Whether to enable the Confidential VM configuration on the instance. Note that the instance image must support Confidential VMs. See https://cloud.google.com/compute/docs/images | `bool` | `false` | no |
+| [enable\_nested\_virtualization](#input\_enable\_nested\_virtualization) | Defines whether the instance should have nested virtualization enabled. | `bool` | `false` | no |
+| [enable\_shielded\_vm](#input\_enable\_shielded\_vm) | Whether to enable the Shielded VM configuration on the instance. Note that the instance image must support Shielded VMs. See https://cloud.google.com/compute/docs/images | `bool` | `false` | no |
+| [gpu](#input\_gpu) | GPU information. Type and count of GPU to attach to the instance template. See https://cloud.google.com/compute/docs/gpus more details | object({
type = string
count = number
})
| `null` | no |
+| [instance\_termination\_action](#input\_instance\_termination\_action) | Which action to take when Compute Engine preempts the VM. Value can be: 'STOP', 'DELETE'. The default value is 'STOP'.
See https://cloud.google.com/compute/docs/instances/spot for more details. | `string` | `"STOP"` | no |
+| [ipv6\_access\_config](#input\_ipv6\_access\_config) | IPv6 access configurations. Currently a max of 1 IPv6 access configuration is supported. If not specified, the instance will have no external IPv6 Internet access. | list(object({
network_tier = string
}))
| `[]` | no |
+| [labels](#input\_labels) | Labels, provided as a map | `map(string)` | `{}` | no |
+| [machine\_type](#input\_machine\_type) | Machine type to create, e.g. n1-standard-1 | `string` | `"n1-standard-1"` | no |
+| [metadata](#input\_metadata) | Metadata, provided as a map | `map(string)` | `{}` | no |
+| [min\_cpu\_platform](#input\_min\_cpu\_platform) | Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell or Intel Skylake. See the complete list: https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform | `string` | `null` | no |
+| [name\_prefix](#input\_name\_prefix) | Name prefix for the instance template | `string` | `"default-instance-template"` | no |
+| [network](#input\_network) | The name or self\_link of the network to attach this interface to. Use network attribute for Legacy or Auto subnetted networks and subnetwork for custom subnetted networks. | `string` | `""` | no |
+| [network\_ip](#input\_network\_ip) | Private IP address to assign to the instance if desired. | `string` | `""` | no |
+| [nic\_type](#input\_nic\_type) | The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO\_NET. | `string` | `null` | no |
+| [on\_host\_maintenance](#input\_on\_host\_maintenance) | Instance availability Policy | `string` | `"MIGRATE"` | no |
+| [preemptible](#input\_preemptible) | Allow the instance to be preempted | `bool` | `false` | no |
+| [project\_id](#input\_project\_id) | The GCP project ID | `string` | `null` | no |
+| [region](#input\_region) | Region where the instance template should be created. | `string` | `null` | no |
+| [resource\_policies](#input\_resource\_policies) | A list of self\_links of resource policies to attach to the instance.
Currently a max of 1 resource policy is supported. | `list(string)` | `null` | no |
+| [service\_account](#input\_service\_account) | Service account to attach to the instance. See https://www.terraform.io/docs/providers/google/r/compute_instance_template#service_account. | object({
email = optional(string)
scopes = set(string)
})
| n/a | yes |
+| [shielded\_instance\_config](#input\_shielded\_instance\_config) | Not used unless enable\_shielded\_vm is true. Shielded VM configuration for the instance. | object({
enable_secure_boot = bool
enable_vtpm = bool
enable_integrity_monitoring = bool
})
| {
"enable_integrity_monitoring": true,
"enable_secure_boot": true,
"enable_vtpm": true
}
| no |
+| [source\_image](#input\_source\_image) | Source disk image. If neither source\_image nor source\_image\_family is specified, defaults to the latest public CentOS image. | `string` | `""` | no |
+| [source\_image\_family](#input\_source\_image\_family) | Source image family. If neither source\_image nor source\_image\_family is specified, defaults to the latest public CentOS image. | `string` | `"centos-7"` | no |
+| [source\_image\_project](#input\_source\_image\_project) | Project where the source image comes from. The default project contains CentOS images. | `string` | `"centos-cloud"` | no |
+| [spot](#input\_spot) | Provision as a SPOT preemptible instance.
See https://cloud.google.com/compute/docs/instances/spot for more details. | `bool` | `false` | no |
+| [stack\_type](#input\_stack\_type) | The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are `IPV4_IPV6` or `IPV4_ONLY`. Default behavior is equivalent to IPV4\_ONLY. | `string` | `null` | no |
+| [startup\_script](#input\_startup\_script) | User startup script to run when instances spin up | `string` | `""` | no |
+| [subnetwork](#input\_subnetwork) | The name of the subnetwork to attach this interface to. The subnetwork must exist in the same region this instance will be created in. Either network or subnetwork must be provided. | `string` | `""` | no |
+| [subnetwork\_project](#input\_subnetwork\_project) | The ID of the project in which the subnetwork belongs. If it is not provided, the provider project is used. | `string` | `null` | no |
+| [tags](#input\_tags) | Network tags, provided as a list | `list(string)` | `[]` | no |
+| [threads\_per\_core](#input\_threads\_per\_core) | The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. | `number` | `null` | no |
+| [total\_egress\_bandwidth\_tier](#input\_total\_egress\_bandwidth\_tier) | Network bandwidth tier. Note: machine\_type must be a supported type. Values are 'TIER\_1' or 'DEFAULT'.
See https://cloud.google.com/compute/docs/networking/configure-vm-with-high-bandwidth-configuration for details. | `string` | `"DEFAULT"` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [name](#output\_name) | Name of instance template |
+| [self\_link](#output\_self\_link) | Self-link of instance template |
+| [service\_account](#output\_service\_account) | value |
+| [tags](#output\_tags) | Tags that will be associated with instance(s) |
+
diff --git a/community/modules/internal/slurm-gcp-v6/internal_instance_template/main.tf b/community/modules/internal/slurm-gcp-v6/internal_instance_template/main.tf
new file mode 100644
index 0000000000..be1fdd600e
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/internal_instance_template/main.tf
@@ -0,0 +1,204 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#########
+# Locals
+#########
+
+locals {
+ source_image = var.source_image != "" ? var.source_image : "centos-7-v20201112"
+ source_image_family = var.source_image_family != "" ? var.source_image_family : "centos-7"
+ source_image_project = var.source_image_project != "" ? var.source_image_project : "centos-cloud"
+
+ boot_disk = [
+ {
+ source_image = var.source_image != "" ? format("${local.source_image_project}/${local.source_image}") : format("${local.source_image_project}/${local.source_image_family}")
+ disk_size_gb = var.disk_size_gb
+ disk_type = var.disk_type
+ disk_labels = var.disk_labels
+ auto_delete = var.auto_delete
+ boot = "true"
+ },
+ ]
+
+ all_disks = concat(local.boot_disk, var.additional_disks)
+
+ # NOTE: Even if all the shielded_instance_config or confidential_instance_config
+ # values are false, if the config block exists and an unsupported image is chosen,
+ # the apply will fail so we use a single-value array with the default value to
+ # initialize the block only if it is enabled.
+ shielded_vm_configs = var.enable_shielded_vm ? [true] : []
+
+ gpu_enabled = var.gpu != null
+ alias_ip_range_enabled = var.alias_ip_range != null
+ preemptible = var.preemptible || var.spot
+ on_host_maintenance = (
+ local.preemptible || var.enable_confidential_vm || local.gpu_enabled
+ ? "TERMINATE"
+ : var.on_host_maintenance
+ )
+ automatic_restart = (
+ # must be false when preemptible is true
+ local.preemptible ? false : var.automatic_restart
+ )
+
+ nic_type = var.total_egress_bandwidth_tier == "TIER_1" ? "GVNIC" : var.nic_type
+}
+
+data "google_project" "this" {
+ project_id = var.project_id
+}
+
+####################
+# Instance Template
+####################
+resource "google_compute_instance_template" "tpl" {
+ provider = google-beta
+ name_prefix = "${var.name_prefix}-"
+ project = var.project_id
+ machine_type = var.machine_type
+ labels = var.labels
+ metadata = var.metadata
+ tags = var.tags
+ can_ip_forward = var.can_ip_forward
+ metadata_startup_script = var.startup_script
+ region = var.region
+ min_cpu_platform = var.min_cpu_platform
+ resource_policies = var.resource_policies
+
+ service_account {
+ email = coalesce(var.service_account.email, "${data.google_project.this.number}-compute@developer.gserviceaccount.com")
+ scopes = lookup(var.service_account, "scopes", null)
+ }
+
+ dynamic "disk" {
+ for_each = local.all_disks
+ content {
+ auto_delete = lookup(disk.value, "auto_delete", null)
+ boot = lookup(disk.value, "boot", null)
+ device_name = lookup(disk.value, "device_name", null)
+ disk_name = lookup(disk.value, "disk_name", null)
+ disk_size_gb = lookup(disk.value, "disk_size_gb", lookup(disk.value, "disk_type", null) == "local-ssd" ? "375" : null)
+ disk_type = lookup(disk.value, "disk_type", null)
+ interface = lookup(disk.value, "interface", lookup(disk.value, "disk_type", null) == "local-ssd" ? "NVME" : null)
+ mode = lookup(disk.value, "mode", null)
+ source = lookup(disk.value, "source", null)
+ source_image = lookup(disk.value, "source_image", null)
+ type = lookup(disk.value, "disk_type", null) == "local-ssd" ? "SCRATCH" : "PERSISTENT"
+ labels = lookup(disk.value, "disk_type", null) == "local-ssd" ? null : lookup(disk.value, "disk_labels", null)
+
+ dynamic "disk_encryption_key" {
+ for_each = compact([var.disk_encryption_key == null ? null : 1])
+ content {
+ kms_key_self_link = var.disk_encryption_key
+ }
+ }
+ }
+ }
+
+ network_interface {
+ network = var.network
+ subnetwork = var.subnetwork
+ subnetwork_project = var.subnetwork_project
+ network_ip = try(coalesce(var.network_ip), null)
+ nic_type = local.nic_type
+ stack_type = var.stack_type
+ dynamic "access_config" {
+ for_each = var.access_config
+ content {
+ nat_ip = access_config.value.nat_ip
+ network_tier = access_config.value.network_tier
+ }
+ }
+ dynamic "ipv6_access_config" {
+ for_each = var.ipv6_access_config
+ content {
+ network_tier = ipv6_access_config.value.network_tier
+ }
+ }
+ dynamic "alias_ip_range" {
+ for_each = local.alias_ip_range_enabled ? [var.alias_ip_range] : []
+ content {
+ ip_cidr_range = alias_ip_range.value.ip_cidr_range
+ subnetwork_range_name = alias_ip_range.value.subnetwork_range_name
+ }
+ }
+ }
+
+ dynamic "network_interface" {
+ for_each = var.additional_networks
+ content {
+ network = network_interface.value.network
+ subnetwork = network_interface.value.subnetwork
+ subnetwork_project = network_interface.value.subnetwork_project
+ network_ip = try(coalesce(network_interface.value.network_ip), null)
+ nic_type = try(coalesce(network_interface.value.nic_type), null)
+ dynamic "access_config" {
+ for_each = network_interface.value.access_config
+ content {
+ nat_ip = access_config.value.nat_ip
+ network_tier = access_config.value.network_tier
+ }
+ }
+ dynamic "ipv6_access_config" {
+ for_each = network_interface.value.ipv6_access_config
+ content {
+ network_tier = ipv6_access_config.value.network_tier
+ }
+ }
+ }
+ }
+
+ network_performance_config {
+ total_egress_bandwidth_tier = coalesce(var.total_egress_bandwidth_tier, "DEFAULT")
+ }
+
+ lifecycle {
+ create_before_destroy = "true"
+ }
+
+ scheduling {
+ preemptible = local.preemptible
+ provisioning_model = local.preemptible ? "SPOT" : "STANDARD"
+ automatic_restart = local.automatic_restart
+ on_host_maintenance = local.on_host_maintenance
+ instance_termination_action = local.preemptible ? var.instance_termination_action : null
+ }
+
+ advanced_machine_features {
+ enable_nested_virtualization = var.enable_nested_virtualization
+ threads_per_core = var.threads_per_core
+ }
+
+ dynamic "shielded_instance_config" {
+ for_each = local.shielded_vm_configs
+ content {
+ enable_secure_boot = lookup(var.shielded_instance_config, "enable_secure_boot", shielded_instance_config.value)
+ enable_vtpm = lookup(var.shielded_instance_config, "enable_vtpm", shielded_instance_config.value)
+ enable_integrity_monitoring = lookup(var.shielded_instance_config, "enable_integrity_monitoring", shielded_instance_config.value)
+ }
+ }
+
+ confidential_instance_config {
+ enable_confidential_compute = var.enable_confidential_vm
+ }
+
+ dynamic "guest_accelerator" {
+ for_each = local.gpu_enabled ? [var.gpu] : []
+ content {
+ type = guest_accelerator.value.type
+ count = guest_accelerator.value.count
+ }
+ }
+}
diff --git a/community/modules/internal/slurm-gcp-v6/internal_instance_template/outputs.tf b/community/modules/internal/slurm-gcp-v6/internal_instance_template/outputs.tf
new file mode 100644
index 0000000000..69f8d3b98c
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/internal_instance_template/outputs.tf
@@ -0,0 +1,33 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+output "self_link" {
+ description = "Self-link of instance template"
+ value = google_compute_instance_template.tpl.self_link
+}
+
+output "name" {
+ description = "Name of instance template"
+ value = google_compute_instance_template.tpl.name
+}
+
+output "tags" {
+ description = "Tags that will be associated with instance(s)"
+ value = google_compute_instance_template.tpl.tags
+}
+
+output "service_account" {
+ description = "value"
+ value = google_compute_instance_template.tpl.service_account[0]
+}
diff --git a/community/modules/internal/slurm-gcp-v6/internal_instance_template/variables.tf b/community/modules/internal/slurm-gcp-v6/internal_instance_template/variables.tf
new file mode 100644
index 0000000000..874fcf51bf
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/internal_instance_template/variables.tf
@@ -0,0 +1,364 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+variable "project_id" {
+ type = string
+ description = "The GCP project ID"
+ default = null
+}
+
+variable "name_prefix" {
+ description = "Name prefix for the instance template"
+ type = string
+ default = "default-instance-template"
+}
+
+variable "machine_type" {
+ description = "Machine type to create, e.g. n1-standard-1"
+ type = string
+ default = "n1-standard-1"
+}
+
+variable "min_cpu_platform" {
+ description = "Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell or Intel Skylake. See the complete list: https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform"
+ type = string
+ default = null
+}
+
+variable "can_ip_forward" {
+ description = "Enable IP forwarding, for NAT instances for example"
+ type = string
+ default = "false"
+}
+
+variable "tags" {
+ type = list(string)
+ description = "Network tags, provided as a list"
+ default = []
+}
+
+variable "labels" {
+ type = map(string)
+ description = "Labels, provided as a map"
+ default = {}
+}
+
+variable "preemptible" {
+ type = bool
+ description = "Allow the instance to be preempted"
+ default = false
+}
+
+variable "spot" {
+ description = <<-EOD
+ Provision as a SPOT preemptible instance.
+ See https://cloud.google.com/compute/docs/instances/spot for more details.
+ EOD
+ type = bool
+ default = false
+}
+
+variable "instance_termination_action" {
+ description = <<-EOD
+ Which action to take when Compute Engine preempts the VM. Value can be: 'STOP', 'DELETE'. The default value is 'STOP'.
+ See https://cloud.google.com/compute/docs/instances/spot for more details.
+ EOD
+ type = string
+ default = "STOP"
+}
+
+variable "automatic_restart" {
+ type = bool
+ description = "(Optional) Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user)."
+ default = true
+}
+
+variable "on_host_maintenance" {
+ type = string
+ description = "Instance availability Policy"
+ default = "MIGRATE"
+}
+
+variable "region" {
+ type = string
+ description = "Region where the instance template should be created."
+ default = null
+}
+
+variable "enable_nested_virtualization" {
+ type = bool
+ description = "Defines whether the instance should have nested virtualization enabled."
+ default = false
+}
+
+variable "threads_per_core" {
+ description = "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1."
+ type = number
+ default = null
+}
+
+#######
+# disk
+#######
+variable "source_image" {
+ description = "Source disk image. If neither source_image nor source_image_family is specified, defaults to the latest public CentOS image."
+ type = string
+ default = ""
+}
+
+variable "source_image_family" {
+ description = "Source image family. If neither source_image nor source_image_family is specified, defaults to the latest public CentOS image."
+ type = string
+ default = "centos-7"
+}
+
+variable "source_image_project" {
+ description = "Project where the source image comes from. The default project contains CentOS images."
+ type = string
+ default = "centos-cloud"
+}
+
+variable "disk_size_gb" {
+ description = "Boot disk size in GB"
+ type = string
+ default = "100"
+}
+
+variable "disk_type" {
+ description = "Boot disk type, can be either pd-ssd, local-ssd, or pd-standard"
+ type = string
+ default = "pd-standard"
+}
+
+variable "disk_labels" {
+ description = "Labels to be assigned to boot disk, provided as a map"
+ type = map(string)
+ default = {}
+}
+
+variable "disk_encryption_key" {
+ description = "The id of the encryption key that is stored in Google Cloud KMS to use to encrypt all the disks on this instance"
+ type = string
+ default = null
+}
+
+variable "auto_delete" {
+ description = "Whether or not the boot disk should be auto-deleted"
+ type = string
+ default = "true"
+}
+
+variable "additional_disks" {
+ description = "List of maps of additional disks. See https://www.terraform.io/docs/providers/google/r/compute_instance_template#disk_name"
+ type = list(object({
+ disk_name = string
+ device_name = string
+ auto_delete = bool
+ boot = bool
+ disk_size_gb = number
+ disk_type = string
+ disk_labels = map(string)
+ }))
+ default = []
+}
+
+####################
+# network_interface
+####################
+variable "network" {
+ description = "The name or self_link of the network to attach this interface to. Use network attribute for Legacy or Auto subnetted networks and subnetwork for custom subnetted networks."
+ type = string
+ default = ""
+}
+
+variable "nic_type" {
+ description = "The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET."
+ type = string
+ default = null
+}
+
+variable "subnetwork" {
+ description = "The name of the subnetwork to attach this interface to. The subnetwork must exist in the same region this instance will be created in. Either network or subnetwork must be provided."
+ type = string
+ default = ""
+}
+
+variable "subnetwork_project" {
+ description = "The ID of the project in which the subnetwork belongs. If it is not provided, the provider project is used."
+ type = string
+ default = null
+}
+
+variable "network_ip" {
+ description = "Private IP address to assign to the instance if desired."
+ type = string
+ default = ""
+}
+
+variable "stack_type" {
+ description = "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are `IPV4_IPV6` or `IPV4_ONLY`. Default behavior is equivalent to IPV4_ONLY."
+ type = string
+ default = null
+}
+
+variable "additional_networks" {
+ description = "Additional network interface details for GCE, if any."
+ default = []
+ type = list(object({
+ network = string
+ subnetwork = string
+ subnetwork_project = string
+ network_ip = string
+ nic_type = string
+ access_config = list(object({
+ nat_ip = string
+ network_tier = string
+ }))
+ ipv6_access_config = list(object({
+ network_tier = string
+ }))
+ }))
+}
+
+variable "total_egress_bandwidth_tier" {
+ description = <
+
+- [Module: Slurm Nodeset (TPU)](#module-slurm-nodeset-tpu)
+ - [Overview](#overview)
+ - [Module API](#module-api)
+
+
+
+## Overview
+
+This is a submodule of [slurm_cluster](../../../slurm_cluster/README.md). It
+creates a Slurm TPU nodeset for [slurm_partition](../slurm_partition/README.md).
+
+## Module API
+
+For the terraform module API reference, please see
+[README_TF.md](./README_TF.md).
+
+
+Copyright (C) SchedMD LLC.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | ~> 1.2 |
+| [google](#requirement\_google) | >= 3.53 |
+| [null](#requirement\_null) | ~> 3.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [google](#provider\_google) | >= 3.53 |
+| [null](#provider\_null) | ~> 3.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [null_resource.nodeset_tpu](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [google_compute_subnetwork.nodeset_subnetwork](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/compute_subnetwork) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [accelerator\_config](#input\_accelerator\_config) | Nodeset accelerator config, see https://cloud.google.com/tpu/docs/supported-tpu-configurations for details. | object({
topology = string
version = string
})
| {
"topology": "",
"version": ""
}
| no |
+| [data\_disks](#input\_data\_disks) | The data disks to include in the TPU node | `list(string)` | `[]` | no |
+| [docker\_image](#input\_docker\_image) | The gcp container registry id docker image to use in the TPU vms, it defaults to gcr.io/schedmd-slurm-public/tpu:slurm-gcp-6-8-tf- | `string` | `""` | no |
+| [enable\_public\_ip](#input\_enable\_public\_ip) | Enables IP address to access the Internet. | `bool` | `false` | no |
+| [network\_storage](#input\_network\_storage) | An array of network attached storage mounts to be configured on nodes. | list(object({
server_ip = string,
remote_mount = string,
local_mount = string,
fs_type = string,
mount_options = string,
}))
| `[]` | no |
+| [node\_count\_dynamic\_max](#input\_node\_count\_dynamic\_max) | Maximum number of nodes allowed in this partition to be created dynamically. | `number` | `0` | no |
+| [node\_count\_static](#input\_node\_count\_static) | Number of nodes to be statically created. | `number` | `0` | no |
+| [node\_type](#input\_node\_type) | Specify a node type to base the vm configuration upon it. Not needed if you use accelerator\_config | `string` | `null` | no |
+| [nodeset\_name](#input\_nodeset\_name) | Name of Slurm nodeset. | `string` | n/a | yes |
+| [preemptible](#input\_preemptible) | Specify whether TPU-vms in this nodeset are preemtible, see https://cloud.google.com/tpu/docs/preemptible for details. | `bool` | `false` | no |
+| [preserve\_tpu](#input\_preserve\_tpu) | Specify whether TPU-vms will get preserve on suspend, if set to true, on suspend vm is stopped, on false it gets deleted | `bool` | `true` | no |
+| [project\_id](#input\_project\_id) | Project ID to create resources in. | `string` | n/a | yes |
+| [reserved](#input\_reserved) | Specify whether TPU-vms in this nodeset are created under a reservation. | `bool` | `false` | no |
+| [service\_account](#input\_service\_account) | Service account to attach to the TPU-vm.
If none is given, the default service account and scopes will be used. | object({
email = string
scopes = set(string)
})
| `null` | no |
+| [subnetwork](#input\_subnetwork) | The name of the subnetwork to attach the TPU-vm of this nodeset to. | `string` | n/a | yes |
+| [tf\_version](#input\_tf\_version) | Nodeset Tensorflow version, see https://cloud.google.com/tpu/docs/supported-tpu-configurations#tpu_vm for details. | `string` | n/a | yes |
+| [zone](#input\_zone) | Nodes will only be created in this zone. Check https://cloud.google.com/tpu/docs/regions-zones to get zones with TPU-vm in it. | `string` | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [nodeset](#output\_nodeset) | Nodeset details. |
+| [nodeset\_name](#output\_nodeset\_name) | Nodeset name. |
+| [service\_account](#output\_service\_account) | Service account object, includes email and scopes. |
+
diff --git a/community/modules/internal/slurm-gcp-v6/nodeset_tpu/main.tf b/community/modules/internal/slurm-gcp-v6/nodeset_tpu/main.tf
new file mode 100644
index 0000000000..1bb0add729
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/nodeset_tpu/main.tf
@@ -0,0 +1,121 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+###########
+# NODESET #
+###########
+
+locals {
+ node_conf_hw = {
+ Mem334CPU96 = {
+ CPUs = 96
+ Boards = 1
+ Sockets = 2
+ CoresPerSocket = 24
+ ThreadsPerCore = 2
+ RealMemory = 307200
+ }
+ Mem400CPU240 = {
+ CPUs = 240
+ Boards = 1
+ Sockets = 2
+ CoresPerSocket = 60
+ ThreadsPerCore = 2
+ RealMemory = 400000
+ }
+ }
+ node_conf_mappings = {
+ "v2" = local.node_conf_hw.Mem334CPU96
+ "v3" = local.node_conf_hw.Mem334CPU96
+ "v4" = local.node_conf_hw.Mem400CPU240
+ }
+ simple_nodes = ["v2-8", "v3-8", "v4-8"]
+}
+
+locals {
+ snetwork = data.google_compute_subnetwork.nodeset_subnetwork.name
+ region = join("-", slice(split("-", var.zone), 0, 2))
+ tpu_fam = var.accelerator_config.version != "" ? lower(var.accelerator_config.version) : split("-", var.node_type)[0]
+ #If subnetwork is specified and it does not have private_ip_google_access, we need to have public IPs on the TPU
+ #if no subnetwork is specified, the default one will be used, this does not have private_ip_google_access so we need public IPs too
+ pub_need = !data.google_compute_subnetwork.nodeset_subnetwork.private_ip_google_access
+ can_preempt = var.node_type != null ? contains(local.simple_nodes, var.node_type) : false
+ nodeset_tpu = {
+ nodeset_name = var.nodeset_name
+ node_conf = local.node_conf_mappings[local.tpu_fam]
+ node_type = var.node_type
+ accelerator_config = var.accelerator_config
+ tf_version = var.tf_version
+ preemptible = local.can_preempt ? var.preemptible : false
+ reserved = var.reserved
+ node_count_dynamic_max = var.node_count_dynamic_max
+ node_count_static = var.node_count_static
+ enable_public_ip = var.enable_public_ip
+ zone = var.zone
+ service_account = var.service_account != null ? var.service_account : local.service_account
+ preserve_tpu = local.can_preempt ? var.preserve_tpu : false
+ data_disks = var.data_disks
+ docker_image = var.docker_image != "" ? var.docker_image : "us-docker.pkg.dev/schedmd-slurm-public/tpu/slurm-gcp-6-8:tf-${var.tf_version}"
+ subnetwork = local.snetwork
+ network_storage = var.network_storage
+ }
+
+ service_account = {
+ email = try(var.service_account.email, null)
+ scopes = try(var.service_account.scopes, ["https://www.googleapis.com/auth/cloud-platform"])
+ }
+}
+
+data "google_compute_subnetwork" "nodeset_subnetwork" {
+ name = var.subnetwork
+ region = local.region
+ project = var.project_id
+
+ self_link = (
+ length(regexall("/projects/([^/]*)", var.subnetwork)) > 0
+ && length(regexall("/regions/([^/]*)", var.subnetwork)) > 0
+ ? var.subnetwork
+ : null
+ )
+}
+
+resource "null_resource" "nodeset_tpu" {
+ triggers = {
+ nodeset = sha256(jsonencode(local.nodeset_tpu))
+ }
+ lifecycle {
+ precondition {
+ condition = sum([var.node_count_dynamic_max, var.node_count_static]) > 0
+ error_message = "Sum of node_count_dynamic_max and node_count_static must be > 0."
+ }
+ precondition {
+ condition = !(var.preemptible && var.reserved)
+ error_message = "Nodeset cannot be preemptible and reserved at the same time."
+ }
+ precondition {
+ condition = !(var.subnetwork == null && !var.enable_public_ip)
+ error_message = "Using the default subnetwork for the TPU nodeset requires enable_public_ip set to true."
+ }
+ precondition {
+ condition = !(var.subnetwork != null && (local.pub_need && !var.enable_public_ip))
+ error_message = "The subnetwork specified does not have Private Google Access enabled. This is required when enable_public_ip is set to false."
+ }
+ precondition {
+ condition = !(var.node_type == null && (var.accelerator_config.topology == "" && var.accelerator_config.version == ""))
+ error_message = "Either a node type or an accelerator_config must be provided."
+ }
+ }
+}
diff --git a/community/modules/internal/slurm-gcp-v6/nodeset_tpu/outputs.tf b/community/modules/internal/slurm-gcp-v6/nodeset_tpu/outputs.tf
new file mode 100644
index 0000000000..fce700d567
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/nodeset_tpu/outputs.tf
@@ -0,0 +1,30 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+output "nodeset_name" {
+ description = "Nodeset name."
+ value = local.nodeset_tpu.nodeset_name
+}
+
+output "nodeset" {
+ description = "Nodeset details."
+ value = local.nodeset_tpu
+}
+
+output "service_account" {
+ description = "Service account object, includes email and scopes."
+ value = local.service_account
+}
diff --git a/community/modules/internal/slurm-gcp-v6/nodeset_tpu/variables.tf b/community/modules/internal/slurm-gcp-v6/nodeset_tpu/variables.tf
new file mode 100644
index 0000000000..a174f4afeb
--- /dev/null
+++ b/community/modules/internal/slurm-gcp-v6/nodeset_tpu/variables.tf
@@ -0,0 +1,158 @@
+/**
+ * Copyright (C) SchedMD LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+variable "nodeset_name" {
+ description = "Name of Slurm nodeset."
+ type = string
+
+ validation {
+ condition = can(regex("^[a-z](?:[a-z0-9]{0,14})$", var.nodeset_name))
+ error_message = "Variable 'nodeset_name' must be a match of regex '^[a-z](?:[a-z0-9]{0,14})$'."
+ }
+}
+
+variable "node_type" {
+ description = "Specify a node type to base the vm configuration upon it. Not needed if you use accelerator_config"
+ type = string
+ default = null
+}
+
+variable "accelerator_config" {
+ description = "Nodeset accelerator config, see https://cloud.google.com/tpu/docs/supported-tpu-configurations for details."
+ type = object({
+ topology = string
+ version = string
+ })
+ default = {
+ topology = ""
+ version = ""
+ }
+ validation {
+ condition = var.accelerator_config.version == "" ? true : contains(["V2", "V3", "V4"], upper(var.accelerator_config.version))
+ error_message = "accelerator_config.version must be one of [\"V2\", \"V3\", \"V4\"]"
+ }
+ validation {
+ condition = var.accelerator_config.topology == "" ? true : can(regex("^[1-9]x[1-9](x[1-9])?$", var.accelerator_config.topology))
+ error_message = "accelerator_config.topology must be a valid topology, like 2x2 4x4x4 4x2x4 etc..."
+ }
+}
+
+variable "docker_image" {
+ description = "The gcp container registry id docker image to use in the TPU vms, it defaults to gcr.io/schedmd-slurm-public/tpu:slurm-gcp-6-8-tf-"
+ type = string
+ default = ""
+}
+
+variable "tf_version" {
+ description = "Nodeset Tensorflow version, see https://cloud.google.com/tpu/docs/supported-tpu-configurations#tpu_vm for details."
+ type = string
+}
+
+variable "zone" {
+ description = "Nodes will only be created in this zone. Check https://cloud.google.com/tpu/docs/regions-zones to get zones with TPU-vm in it."
+ type = string
+
+ validation {
+ condition = can(coalesce(var.zone))
+ error_message = "Zone cannot be null or empty."
+ }
+}
+
+variable "preemptible" {
+ description = "Specify whether TPU-vms in this nodeset are preemtible, see https://cloud.google.com/tpu/docs/preemptible for details."
+ type = bool
+ default = false
+}
+
+variable "reserved" {
+ description = "Specify whether TPU-vms in this nodeset are created under a reservation."
+ type = bool
+ default = false
+}
+
+variable "preserve_tpu" {
+ description = "Specify whether TPU-vms will get preserve on suspend, if set to true, on suspend vm is stopped, on false it gets deleted"
+ type = bool
+ default = true
+}
+
+variable "node_count_static" {
+ description = "Number of nodes to be statically created."
+ type = number
+ default = 0
+
+ validation {
+ condition = var.node_count_static >= 0
+ error_message = "Value must be >= 0."
+ }
+}
+
+variable "node_count_dynamic_max" {
+ description = "Maximum number of nodes allowed in this partition to be created dynamically."
+ type = number
+ default = 0
+
+ validation {
+ condition = var.node_count_dynamic_max >= 0
+ error_message = "Value must be >= 0."
+ }
+}
+
+variable "enable_public_ip" {
+ description = "Enables IP address to access the Internet."
+ type = bool
+ default = false
+}
+
+variable "data_disks" {
+ type = list(string)
+ description = "The data disks to include in the TPU node"
+ default = []
+}
+
+variable "subnetwork" {
+ description = "The name of the subnetwork to attach the TPU-vm of this nodeset to."
+ type = string
+}
+
+variable "service_account" {
+ type = object({
+ email = string
+ scopes = set(string)
+ })
+ description = < [daos\_network\_storage\_scripts](#module\_daos\_network\_storage\_scripts) | ../../../../modules/scripts/startup-script | n/a |
| [nodeset\_cleanup](#module\_nodeset\_cleanup) | ./modules/cleanup_compute | n/a |
| [nodeset\_cleanup\_tpu](#module\_nodeset\_cleanup\_tpu) | ./modules/cleanup_tpu | n/a |
-| [slurm\_controller\_template](#module\_slurm\_controller\_template) | github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template | 6.8.6 |
+| [slurm\_controller\_template](#module\_slurm\_controller\_template) | ../../internal/slurm-gcp-v6/instance_template | n/a |
| [slurm\_files](#module\_slurm\_files) | ./modules/slurm_files | n/a |
-| [slurm\_login\_instance](#module\_slurm\_login\_instance) | github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/_slurm_instance | 6.8.6 |
-| [slurm\_login\_template](#module\_slurm\_login\_template) | github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template | 6.8.6 |
-| [slurm\_nodeset\_template](#module\_slurm\_nodeset\_template) | github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template | 6.8.6 |
-| [slurm\_nodeset\_tpu](#module\_slurm\_nodeset\_tpu) | github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_nodeset_tpu | 6.8.6 |
+| [slurm\_login\_instance](#module\_slurm\_login\_instance) | ../../internal/slurm-gcp-v6/instance | n/a |
+| [slurm\_login\_template](#module\_slurm\_login\_template) | ../../internal/slurm-gcp-v6/instance_template | n/a |
+| [slurm\_nodeset\_template](#module\_slurm\_nodeset\_template) | ../../internal/slurm-gcp-v6/instance_template | n/a |
+| [slurm\_nodeset\_tpu](#module\_slurm\_nodeset\_tpu) | ../../internal/slurm-gcp-v6/nodeset_tpu | n/a |
## Resources
@@ -336,7 +336,7 @@ limitations under the License.
| [metadata](#input\_metadata) | Metadata, provided as a map. | `map(string)` | `{}` | no |
| [min\_cpu\_platform](#input\_min\_cpu\_platform) | Specifies a minimum CPU platform. Applicable values are the friendly names of
CPU platforms, such as Intel Haswell or Intel Skylake. See the complete list:
https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform | `string` | `null` | no |
| [network\_storage](#input\_network\_storage) | An array of network attached storage mounts to be configured on all instances. | list(object({
server_ip = string,
remote_mount = string,
local_mount = string,
fs_type = string,
mount_options = string,
client_install_runner = optional(map(string))
mount_runner = optional(map(string))
}))
| `[]` | no |
-| [nodeset](#input\_nodeset) | Define nodesets, as a list. | list(object({
node_count_static = optional(number, 0)
node_count_dynamic_max = optional(number, 1)
node_conf = optional(map(string), {})
nodeset_name = string
additional_disks = optional(list(object({
disk_name = optional(string)
device_name = optional(string)
disk_size_gb = optional(number)
disk_type = optional(string)
disk_labels = optional(map(string), {})
auto_delete = optional(bool, true)
boot = optional(bool, false)
})), [])
bandwidth_tier = optional(string, "platform_default")
can_ip_forward = optional(bool, false)
disable_smt = optional(bool, false)
disk_auto_delete = optional(bool, true)
disk_labels = optional(map(string), {})
disk_size_gb = optional(number)
disk_type = optional(string)
enable_confidential_vm = optional(bool, false)
enable_placement = optional(bool, false)
enable_oslogin = optional(bool, true)
enable_shielded_vm = optional(bool, false)
enable_maintenance_reservation = optional(bool, false)
enable_opportunistic_maintenance = optional(bool, false)
gpu = optional(object({
count = number
type = string
}))
dws_flex = object({
enabled = bool
max_run_duration = number
use_job_duration = bool
})
labels = optional(map(string), {})
machine_type = optional(string)
maintenance_interval = optional(string)
instance_properties_json = string
metadata = optional(map(string), {})
min_cpu_platform = optional(string)
network_tier = optional(string, "STANDARD")
network_storage = optional(list(object({
server_ip = string
remote_mount = string
local_mount = string
fs_type = string
mount_options = string
client_install_runner = optional(map(string))
mount_runner = optional(map(string))
})), [])
on_host_maintenance = optional(string)
preemptible = optional(bool, false)
region = optional(string)
service_account = optional(object({
email = optional(string)
scopes = optional(list(string), ["https://www.googleapis.com/auth/cloud-platform"])
}))
shielded_instance_config = optional(object({
enable_integrity_monitoring = optional(bool, true)
enable_secure_boot = optional(bool, true)
enable_vtpm = optional(bool, true)
}))
source_image_family = optional(string)
source_image_project = optional(string)
source_image = optional(string)
subnetwork_self_link = string
additional_networks = optional(list(object({
network = string
subnetwork = string
subnetwork_project = string
network_ip = string
nic_type = string
stack_type = string
queue_count = number
access_config = list(object({
nat_ip = string
network_tier = string
}))
ipv6_access_config = list(object({
network_tier = string
}))
alias_ip_range = list(object({
ip_cidr_range = string
subnetwork_range_name = string
}))
})))
access_config = optional(list(object({
nat_ip = string
network_tier = string
})))
spot = optional(bool, false)
tags = optional(list(string), [])
termination_action = optional(string)
reservation_name = optional(string)
startup_script = optional(list(object({
filename = string
content = string })), [])
zone_target_shape = string
zone_policy_allow = set(string)
zone_policy_deny = set(string)
}))
| `[]` | no |
+| [nodeset](#input\_nodeset) | Define nodesets, as a list. | list(object({
node_count_static = optional(number, 0)
node_count_dynamic_max = optional(number, 1)
node_conf = optional(map(string), {})
nodeset_name = string
additional_disks = optional(list(object({
disk_name = optional(string)
device_name = optional(string)
disk_size_gb = optional(number)
disk_type = optional(string)
disk_labels = optional(map(string), {})
auto_delete = optional(bool, true)
boot = optional(bool, false)
})), [])
bandwidth_tier = optional(string, "platform_default")
can_ip_forward = optional(bool, false)
disable_smt = optional(bool, false)
disk_auto_delete = optional(bool, true)
disk_labels = optional(map(string), {})
disk_size_gb = optional(number)
disk_type = optional(string)
enable_confidential_vm = optional(bool, false)
enable_placement = optional(bool, false)
enable_oslogin = optional(bool, true)
enable_shielded_vm = optional(bool, false)
enable_maintenance_reservation = optional(bool, false)
enable_opportunistic_maintenance = optional(bool, false)
gpu = optional(object({
count = number
type = string
}))
dws_flex = object({
enabled = bool
max_run_duration = number
use_job_duration = bool
})
labels = optional(map(string), {})
machine_type = optional(string)
maintenance_interval = optional(string)
instance_properties_json = string
metadata = optional(map(string), {})
min_cpu_platform = optional(string)
network_tier = optional(string, "STANDARD")
network_storage = optional(list(object({
server_ip = string
remote_mount = string
local_mount = string
fs_type = string
mount_options = string
client_install_runner = optional(map(string))
mount_runner = optional(map(string))
})), [])
on_host_maintenance = optional(string)
preemptible = optional(bool, false)
region = optional(string)
service_account = optional(object({
email = optional(string)
scopes = optional(list(string), ["https://www.googleapis.com/auth/cloud-platform"])
}))
shielded_instance_config = optional(object({
enable_integrity_monitoring = optional(bool, true)
enable_secure_boot = optional(bool, true)
enable_vtpm = optional(bool, true)
}))
source_image_family = optional(string)
source_image_project = optional(string)
source_image = optional(string)
subnetwork_self_link = string
additional_networks = optional(list(object({
network = string
subnetwork = string
subnetwork_project = string
network_ip = string
nic_type = string
stack_type = string
queue_count = number
access_config = list(object({
nat_ip = string
network_tier = string
}))
ipv6_access_config = list(object({
network_tier = string
}))
alias_ip_range = list(object({
ip_cidr_range = string
subnetwork_range_name = string
}))
})))
access_config = optional(list(object({
nat_ip = string
network_tier = string
})))
spot = optional(bool, false)
tags = optional(list(string), [])
termination_action = optional(string)
reservation_name = optional(string)
future_reservation = string
startup_script = optional(list(object({
filename = string
content = string })), [])
zone_target_shape = string
zone_policy_allow = set(string)
zone_policy_deny = set(string)
}))
| `[]` | no |
| [nodeset\_dyn](#input\_nodeset\_dyn) | Defines dynamic nodesets, as a list. | list(object({
nodeset_name = string
nodeset_feature = string
}))
| `[]` | no |
| [nodeset\_tpu](#input\_nodeset\_tpu) | Define TPU nodesets, as a list. | list(object({
node_count_static = optional(number, 0)
node_count_dynamic_max = optional(number, 5)
nodeset_name = string
enable_public_ip = optional(bool, false)
node_type = string
accelerator_config = optional(object({
topology = string
version = string
}), {
topology = ""
version = ""
})
tf_version = string
preemptible = optional(bool, false)
preserve_tpu = optional(bool, false)
zone = string
data_disks = optional(list(string), [])
docker_image = optional(string, "")
network_storage = optional(list(object({
server_ip = string
remote_mount = string
local_mount = string
fs_type = string
mount_options = string
client_install_runner = optional(map(string))
mount_runner = optional(map(string))
})), [])
subnetwork = string
service_account = optional(object({
email = optional(string)
scopes = optional(list(string), ["https://www.googleapis.com/auth/cloud-platform"])
}))
project_id = string
reserved = optional(string, false)
}))
| `[]` | no |
| [on\_host\_maintenance](#input\_on\_host\_maintenance) | Instance availability Policy. | `string` | `"MIGRATE"` | no |
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/controller.tf b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/controller.tf
index e43b5e5ae2..fa28b8728f 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/controller.tf
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/controller.tf
@@ -43,7 +43,7 @@ locals {
# INSTANCE TEMPLATE
module "slurm_controller_template" {
- source = "github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template?ref=6.8.6"
+ source = "../../internal/slurm-gcp-v6/instance_template"
project_id = var.project_id
region = var.region
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/login.tf b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/login.tf
index f167ad9947..1f492a1402 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/login.tf
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/login.tf
@@ -14,7 +14,7 @@
# TEMPLATE
module "slurm_login_template" {
- source = "github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template?ref=6.8.6"
+ source = "../../internal/slurm-gcp-v6/instance_template"
for_each = { for x in var.login_nodes : x.name_prefix => x }
@@ -56,7 +56,7 @@ module "slurm_login_template" {
# INSTANCE
module "slurm_login_instance" {
- source = "github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/_slurm_instance?ref=6.8.6"
+ source = "../../internal/slurm-gcp-v6/instance"
for_each = { for x in var.login_nodes : x.name_prefix => x }
access_config = each.value.access_config
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/conf.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/conf.py
index 7ee06332f1..c4bb37c579 100755
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/conf.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/conf.py
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Iterable, Dict, Set
+from typing import List, Optional, Iterable, Dict, Set, Tuple
from itertools import chain
from collections import defaultdict
import json
@@ -436,6 +436,10 @@ def __init__(
self.tpu_nodes = set(tpu_nodes or [])
+ @classmethod
+ def path(cls, lkp: util.Lookup) -> Path:
+ return lkp.etc_dir / "cloud_topology.summary.json"
+
@classmethod
def loads(cls, s: str) -> "TopologySummary":
d = json.loads(s)
@@ -445,6 +449,13 @@ def loads(cls, s: str) -> "TopologySummary":
tpu_nodes=d.get("tpu_nodes"),
)
+ @classmethod
+ def load(cls, lkp: util.Lookup) -> "TopologySummary":
+ p = cls.path(lkp)
+ if not p.exists():
+ return cls() # Return empty instance
+ return cls.loads(p.read_text())
+
def dumps(self) -> str:
return json.dumps(
{
@@ -454,6 +465,9 @@ def dumps(self) -> str:
},
indent=2)
+ def dump(self, lkp: util.Lookup) -> None:
+ TopologySummary.path(lkp).write_text(self.dumps())
+
def _nodenames(self) -> Set[str]:
return set(self.physical_host) | self.down_nodes | self.tpu_nodes
@@ -572,8 +586,7 @@ def gen_topology(lkp: util.Lookup) -> TopologyBuilder:
add_nodeset_topology(ns, bldr, lkp)
return bldr
-
-def gen_topology_conf(lkp: util.Lookup) -> bool:
+def gen_topology_conf(lkp: util.Lookup) -> Tuple[bool, TopologySummary]:
"""
Generates slurm topology.conf.
Returns whether the topology.conf got updated.
@@ -581,7 +594,6 @@ def gen_topology_conf(lkp: util.Lookup) -> bool:
topo = gen_topology(lkp).compress()
conf_file = lkp.etc_dir / "cloud_topology.conf"
-
with open(conf_file, "w") as f:
f.writelines(FILE_PREAMBLE + "\n")
for line in topo.render_conf_lines():
@@ -589,13 +601,8 @@ def gen_topology_conf(lkp: util.Lookup) -> bool:
f.write("\n")
f.write("\n")
- summary_file = lkp.etc_dir / "cloud_topology.summary.json"
- prev_summary = TopologySummary()
- if summary_file.exists():
- prev_summary = TopologySummary.loads(summary_file.read_text())
- summary_file.write_text(topo.summary.dumps())
-
- return topo.summary.requires_reconfigure(prev_summary)
+ prev_summary = TopologySummary.load(lkp)
+ return topo.summary.requires_reconfigure(prev_summary), topo.summary
def install_topology_conf(lkp: util.Lookup) -> None:
conf_file = lkp.etc_dir / "cloud_topology.conf"
@@ -619,5 +626,6 @@ def gen_controller_configs(lkp: util.Lookup) -> None:
install_jobsubmit_lua(lkp)
if topology_plugin(lkp) == TOPOLOGY_PLUGIN_TREE:
- gen_topology_conf(lkp)
+ _, summary = gen_topology_conf(lkp)
+ summary.dump(lkp)
install_topology_conf(lkp)
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/get_tpu_vmcount.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/get_tpu_vmcount.py
index 354ec81ad3..1557d6020b 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/get_tpu_vmcount.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/get_tpu_vmcount.py
@@ -57,7 +57,7 @@ def get_vmcount_of_tpu_part(part):
valid = PART_INVALID
break
else:
- if util.part_is_tpu(part):
+ if util.lookup().partition_is_tpu(part):
vmcount = get_vmcount_of_tpu_part(part)
if vmcount == -1:
valid = DIFF_VMCOUNTS_SAME_PART
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/resume.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/resume.py
index 87ec84bd24..de2b358882 100755
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/resume.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/resume.py
@@ -15,22 +15,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional, Dict
+from typing import List, Optional
import argparse
-import collections
from datetime import timedelta
import shlex
import json
import logging
import os
import yaml
-from itertools import chain
+import collections
from pathlib import Path
+from dataclasses import dataclass
import util
from util import (
chunked,
- dirs,
ensure_execute,
execute_with_futures,
get_insert_operations,
@@ -38,27 +37,52 @@
map_with_futures,
run,
separate,
- to_hostlist,
to_hostlist_fast,
trim_self_link,
wait_for_operation,
)
-from util import lookup, NSDict, TPU
+from util import lookup, NSDict
import slurm_gcp_plugins
log = logging.getLogger()
-
-global_resume_data = None
-
-PLACEMENT_MAX_CNT = 150
+PLACEMENT_MAX_CNT = 1500
# Placement group needs to be the same for an entire bulk_insert hence
# if placement is used the actual BULK_INSERT_LIMIT will be
# max([1000, PLACEMENT_MAX_CNT])
BULK_INSERT_LIMIT = 5000
+@dataclass(frozen=True)
+class ResumeJobData:
+ job_id: int
+ partition: str
+ nodes_alloc: List[str]
+
+@dataclass(frozen=True)
+class ResumeData:
+ jobs: List[ResumeJobData]
+
+def get_resume_file_data() -> Optional[ResumeData]:
+ if not (path := os.getenv("SLURM_RESUME_FILE")):
+ log.error("SLURM_RESUME_FILE was not in environment. Cannot get detailed job, node, partition allocation data.")
+ return None
+ blob = Path(path).read_text()
+ log.debug(f"Resume data: {blob}")
+ data = json.loads(blob)
+
+ jobs = []
+ for jo in data.get("jobs", []):
+
+ job = ResumeJobData(
+ job_id = jo.get("job_id"),
+ partition = jo.get("partition"),
+ nodes_alloc = util.to_hostnames(jo.get("nodes_alloc")),
+ )
+ jobs.append(job)
+ return ResumeData(jobs=jobs)
+
def instance_properties(nodeset:object, model:str, placement_group:Optional[str], labels:Optional[dict], job_id:Optional[int]):
props = NSDict()
@@ -75,31 +99,17 @@ def instance_properties(nodeset:object, model:str, placement_group:Optional[str]
props.disks = template_info.disks
if placement_group:
- props.scheduling.onHostMaintenance = "TERMINATE"
props.resourcePolicies = [placement_group]
if reservation := lookup().nodeset_reservation(nodeset):
- props.reservationAffinity = {
- "consumeReservationType": "SPECIFIC_RESERVATION",
- "key": f"compute.{util.universe_domain()}/reservation-name",
- "values": [reservation.bulk_insert_name],
- }
+ update_reservation_props(reservation, props, placement_group, False)
- if reservation.deployment_type == "DENSE":
- props.scheduling.provisioning_model = "RESERVATION_BOUND"
-
- if reservation.policies:
- props.scheduling.onHostMaintenance = "TERMINATE"
- props.resourcePolicies = reservation.policies
- log.info(
- f"reservation {reservation.bulk_insert_name} is being used with policies {props.resourcePolicies}"
- )
- else:
- props.resourcePolicies = []
- log.info(
- f"reservation {reservation.bulk_insert_name} is being used without any policies"
- )
+ if (fr := lookup().future_reservation(nodeset)) and fr.specific:
+ update_reservation_props(fr.active_reservation, props, placement_group, True)
+ if props.resourcePolicies:
+ props.scheduling.onHostMaintenance = "TERMINATE"
+
if nodeset.maintenance_interval:
props.scheduling.maintenanceInterval = nodeset.maintenance_interval
@@ -108,9 +118,28 @@ def instance_properties(nodeset:object, model:str, placement_group:Optional[str]
# Override with properties explicit specified in the nodeset
props.update(nodeset.get("instance_properties") or {})
-
return props
+def update_reservation_props(reservation:object, props:object, placement_group:Optional[str], reservation_from_fr:bool) -> None:
+ props.reservationAffinity = {
+ "consumeReservationType": "SPECIFIC_RESERVATION",
+ "key": f"compute.{util.universe_domain()}/reservation-name",
+ "values": [reservation.bulk_insert_name],
+ }
+
+ if reservation.dense or reservation_from_fr:
+ props.scheduling.provisioningModel = "RESERVATION_BOUND"
+
+ # Figure out `resourcePolicies`
+ if reservation.policies: # use ones already attached to reservations
+ props.resourcePolicies = reservation.policies
+ elif reservation.dense and placement_group: # use once created by Slurm
+ props.resourcePolicies = [placement_group]
+ else: # vanilla reservations don't support external policies
+ props.resourcePolicies = []
+ log.info(
+ f"reservation {reservation.bulk_insert_name} is being used with resourcePolicies: {props.resourcePolicies}")
+
def update_props_dws(props:object, dws_flex:object, job_id: Optional[int]) -> None:
props.scheduling.onHostMaintenance = "TERMINATE"
props.scheduling.instanceTerminationAction = "DELETE"
@@ -126,14 +155,13 @@ def dws_flex_duration(dws_flex:object, job_id: Optional[int]) -> int:
log.info("Job TimeLimit cannot be less than 30 seconds or exceed 2 weeks")
return max_duration
-
def per_instance_properties(node):
props = NSDict()
# No properties beyond name are supported yet.
return props
-def create_instances_request(nodes, partition_name, placement_group, job_id=None):
+def create_instances_request(nodes: List[str], placement_group: Optional[str], excl_job_id: Optional[int]):
"""Call regionInstances.bulkInsert to create instances"""
assert 0 < len(nodes) <= BULK_INSERT_LIMIT
@@ -141,7 +169,6 @@ def create_instances_request(nodes, partition_name, placement_group, job_id=None
model = next(iter(nodes))
nodeset = lookup().node_nodeset(model)
template = lookup().node_template(model)
- partition = lookup().cfg.partitions[partition_name]
log.debug(f"create_instances_request: {model} placement: {placement_group}")
body = NSDict()
@@ -157,14 +184,10 @@ def create_instances_request(nodes, partition_name, placement_group, job_id=None
# source of instance properties
body.sourceInstanceTemplate = template
- labels = (
- dict(slurm_job_id=job_id)
- if job_id is not None and partition.enable_job_exclusive
- else None
- )
+ labels = {"slurm_job_id": excl_job_id} if excl_job_id else None
# overwrites properties across all instances
body.instanceProperties = instance_properties(
- nodeset, model, placement_group, labels, job_id
+ nodeset, model, placement_group, labels, excl_job_id
)
# key is instance name, value overwrites properties
@@ -201,114 +224,74 @@ def create_instances_request(nodes, partition_name, placement_group, job_id=None
log_api_request(req)
return req
+@dataclass()
+class PlacementAndNodes:
+ placement: Optional[str]
+ nodes: List[str]
+
+@dataclass(frozen=True)
+class BulkChunk:
+ nodes: List[str]
+ prefix: str # -
+ chunk_idx: int
+ excl_job_id: Optional[int]
+ placement_group: Optional[str] = None
+
+ @property
+ def name(self):
+ if self.placement_group is not None:
+ return f"{self.prefix}:job{self.excl_job_id}:{self.placement_group}:{self.chunk_idx}"
+ if self.excl_job_id is not None:
+ return f"{self.prefix}:job{self.excl_job_id}:{self.chunk_idx}"
+ return f"{self.prefix}:{self.chunk_idx}"
+
-def group_nodes_bulk(nodes, resume_data=None):
- """group nodes by job_id, placement_group, node_group, and max bulkInsert size"""
- if resume_data is None:
- # all nodes will be considered jobless
- jobs = {}
- else:
- jobs = {job.job_id: job for job in resume_data.jobs}
-
- # expand all job nodelists
- for job in jobs.values():
- job.nodelist_alloc = job.nodes_alloc
- job.nodes_alloc = util.to_hostnames(job.nodelist_alloc)
- job.nodelist_resume = job.nodes_resume
- job.nodes_resume = util.to_hostnames(job.nodelist_resume)
- job.tpu = util.part_is_tpu(job.partition)
- if not job.tpu:
- # create placement groups if nodes for job need it
- job.placement_groups = create_placement_groups(
- node_list=job.nodes_alloc,
- job_id=job.job_id,
- )
- # placement group assignment is based on all allocated nodes, but we only want to
- # handle nodes in nodes_resume in this run.
- for pg, pg_nodes in job.placement_groups.items():
- job.placement_groups[pg] = list(
- set(pg_nodes).intersection(job.nodes_resume)
- )
- # a bit of a hack, but nodes resumed using scontrol instead of through job scheduling do not have a job
- jobless_nodes = list(
- set(nodes).difference(
- chain.from_iterable(job.nodes_resume for job in jobs.values())
- )
- )
- jobless_nodes_tpu = []
- for jobless_node in jobless_nodes[:]:
- if lookup().node_is_tpu(jobless_node):
- jobless_nodes.remove(jobless_node)
- jobless_nodes_tpu.append(jobless_node)
-
- jobs["Normal_None"] = NSDict(
- job_id=None,
- nodes_resume=jobless_nodes,
- nodes_alloc=jobless_nodes,
- placement_groups=create_placement_groups(node_list=jobless_nodes),
- partition=None,
- tpu=False,
- )
- jobs["TPU_None"] = NSDict(
- job_id=None,
- nodes_resume=jobless_nodes_tpu,
- nodes_alloc=jobless_nodes_tpu,
- partition=None,
- tpu=True,
- )
-
- BulkChunk = collections.namedtuple(
- "BulkChunk",
- ["prefix", "job_id", "partition_name", "placement_group", "nodes", "i"],
- )
- BulkChunkTPU = collections.namedtuple(
- "BulkChunkTPU",
- ["prefix", "job_id", "partition_name", "nodes", "i"],
- )
- grouped_nodes = [
+def group_nodes_bulk(nodes: List[str], resume_data: Optional[ResumeData], lkp: util.Lookup):
+ """group nodes by nodeset, placement_group, exclusive_job_id if any"""
+ if resume_data is None: # all nodes will be considered jobless
+ resume_data = ResumeData(jobs=[])
+
+ nodes = set(nodes) # turn into set to simplify intersection
+ non_excl = nodes.copy()
+ groups = {} # excl_job_id|none -> PlacementAndNodes
+
+ # expand all exclusive job nodelists
+ for job in resume_data.jobs:
+ if not lkp.cfg.partitions[job.partition].enable_job_exclusive:
+ continue
+
+ groups[job.job_id] = []
+ # placement group assignment is based on all allocated nodes, ...
+ for pn in create_placements(job.nodes_alloc, job.job_id, lkp):
+ groups[job.job_id].append(
+ PlacementAndNodes(
+ placement=pn.placement,
+ #... but we only want to handle nodes in nodes_resume in this run.
+ nodes = sorted(set(pn.nodes) & nodes)
+ ))
+ non_excl.difference_update(job.nodes_alloc)
+
+ groups[None] = create_placements(sorted(non_excl), excl_job_id=None, lkp=lkp)
+
+ def chunk_nodes(nodes: List[str]):
+ chunk_size = BULK_INSERT_LIMIT
+ if nodes and lkp.node_is_tpu(nodes[0]):
+ chunk_size = util.TPU(lkp.node_nodeset(nodes[0])).vmcount
+ return chunked(nodes, n=chunk_size)
+
+ chunks = [
BulkChunk(
- prefix,
- job_id if job_id != "Normal_None" else None,
- jobs[job_id].partition,
- placement_group,
- chunk_nodes,
- i,
- )
- for job_id, job in jobs.items()
- if not job.tpu
- for placement_group, pg_nodes in job.placement_groups.items()
- for prefix, nodes in util.groupby_unsorted(pg_nodes, lookup().node_prefix)
- for i, chunk_nodes in enumerate(chunked(nodes, n=BULK_INSERT_LIMIT))
- ]
- grouped_nodes_tpu = [
- BulkChunkTPU(
- prefix,
- job_id if job_id != "TPU_None" else None,
- jobs[job_id].partition,
- chunk_nodes,
- i,
- )
- for job_id, job in jobs.items()
- if job.tpu
- for prefix, nodes in util.groupby_unsorted(job.nodes_resume, lookup().node_prefix)
- for i, chunk_nodes in enumerate(lookup().chunk_tpu_nodes(list(nodes)))
+ nodes=nodes_chunk,
+ prefix=lkp.node_prefix(nodes_chunk[0]), # -
+ excl_job_id = job_id,
+ placement_group=pn.placement,
+ chunk_idx=i)
+
+ for job_id, placements in groups.items()
+ for pn in placements if pn.nodes
+ for i, nodes_chunk in enumerate(chunk_nodes(pn.nodes))
]
-
- def group_name(chunk: BulkChunk):
- if chunk.placement_group is not None:
- return f"{chunk.prefix}:job{chunk.job_id}:{chunk.placement_group}:{chunk.i}"
- if chunk.job_id is not None:
- return f"{chunk.prefix}:job{chunk.job_id}:{chunk.i}"
- return f"{chunk.prefix}:{chunk.i}"
-
- def group_name_tpu(chunk: BulkChunkTPU):
- if chunk.job_id is not None:
- return f"{chunk.prefix}:job{chunk.job_id}:{chunk.i}"
- return f"{chunk.prefix}:{chunk.i}"
-
- grouped_nodes = {group_name(chunk): chunk for chunk in grouped_nodes}
- grouped_nodes_tpu = {group_name_tpu(chunk): chunk for chunk in grouped_nodes_tpu}
- return grouped_nodes, grouped_nodes_tpu
+ return {chunk.name: chunk for chunk in chunks}
def start_tpu(data):
@@ -339,55 +322,49 @@ def start_tpu(data):
log.error("Error creating tpu node {node}")
-def resume_nodes(nodes: List[str], resume_data=None):
+def resume_nodes(nodes: List[str], resume_data: Optional[ResumeData]):
"""resume nodes in nodelist"""
+ # Prevent dormant nodes associated with a future reservation from being resumed
+ nodes, dormant_fr_nodes = util.separate(lookup().is_dormant_fr_node, nodes)
+
+ if dormant_fr_nodes:
+ log.warning(f"Resume was unable to resume future reservation nodes={dormant_fr_nodes}")
+ down_nodes_notify_jobs(dormant_fr_nodes, "Reservation is not active, nodes cannot be resumed", resume_data)
+
if not nodes:
log.info("No nodes to resume")
return
- if resume_data is None and global_resume_data is not None:
- resume_data = global_resume_data.deepcopy()
-
nodes = sorted(nodes, key=lookup().node_prefix)
- grouped_nodes, grouped_tpu_nodes = group_nodes_bulk(nodes, resume_data)
+ grouped_nodes = group_nodes_bulk(nodes, resume_data, lookup())
if log.isEnabledFor(logging.DEBUG):
- # grouped_nodelists is used in later debug logs too
grouped_nodelists = {
- group: to_hostlist(chunk.nodes) for group, chunk in grouped_nodes.items()
- }
- grouped_tpu_nodelists = {
- group: to_hostlist(chunk.nodes)
- for group, chunk in grouped_tpu_nodes.items()
+ group: to_hostlist_fast(chunk.nodes) for group, chunk in grouped_nodes.items()
}
log.debug(
"node bulk groups: \n{}".format(yaml.safe_dump(grouped_nodelists).rstrip())
)
- log.debug(
- "TPU node bulk groups: \n{}".format(
- yaml.safe_dump(grouped_tpu_nodelists).rstrip()
- )
- )
+
tpu_start_data = []
tpu_objs = {}
- for group, chunk in grouped_tpu_nodes.items():
- # do not create multiple tpu_objs if nodes with the same prefix are used
- if chunk.prefix not in tpu_objs.keys():
- model = chunk.nodes[0]
- tpu_objs[chunk.prefix] = TPU(lookup().node_nodeset(model))
-
- tpu_start_data.append({"tpu": tpu_objs[chunk.prefix], "node": chunk.nodes})
-
- # make all bulkInsert requests and execute with batch
- inserts = {
- group: create_instances_request(
- chunk.nodes, chunk.partition_name, chunk.placement_group, chunk.job_id
- )
- for group, chunk in grouped_nodes.items()
- }
+ bi_inserts = {}
+
+ for group, chunk in grouped_nodes.items():
+ model = chunk.nodes[0]
+ if lookup().node_is_tpu(model):
+ # do not create multiple tpu_objs if nodes with the same prefix are used
+ if chunk.prefix not in tpu_objs.keys():
+ tpu_objs[chunk.prefix] = util.TPU(lookup().node_nodeset(model))
+ tpu_start_data.append({"tpu": tpu_objs[chunk.prefix], "node": chunk.nodes})
+ else:
+ bi_inserts[group] = create_instances_request(
+ chunk.nodes, chunk.placement_group, chunk.excl_job_id
+ )
+ # execute all bulkInsert requests with batch
bulk_ops = dict(
- zip(inserts.keys(), map_with_futures(ensure_execute, inserts.values()))
+ zip(bi_inserts.keys(), map_with_futures(ensure_execute, bi_inserts.values()))
)
log.debug(f"bulk_ops={yaml.safe_dump(bulk_ops)}")
started = {
@@ -400,7 +377,7 @@ def resume_nodes(nodes: List[str], resume_data=None):
failed_reqs = [str(e) for e in failed.items()]
log.error("bulkInsert API failures: {}".format("; ".join(failed_reqs)))
for ident, exc in failed.items():
- down_nodes(grouped_nodes[ident].nodes, f"GCP Error: {exc._get_reason()}")
+ down_nodes_notify_jobs(grouped_nodes[ident].nodes, f"GCP Error: {exc._get_reason()}", resume_data)
if log.isEnabledFor(logging.DEBUG):
for group, op in started.items():
@@ -449,7 +426,7 @@ def resume_nodes(nodes: List[str], resume_data=None):
for err in failed_op["error"]["errors"]
)
if code != "RESOURCE_ALREADY_EXISTS":
- down_nodes(hostlist, f"GCP Error: {msg}")
+ down_nodes_notify_jobs(failed_nodes, f"GCP Error: {msg}", resume_data)
log.error(
f"errors from insert for node '{failed_node}' ({failed_op['name']}): {msg}"
)
@@ -461,33 +438,25 @@ def resume_nodes(nodes: List[str], resume_data=None):
all_successful_inserts.extend(successful_inserts)
-def update_job_comment(nodelist: str, comment: str):
- if global_resume_data is None:
- log.warning(
- "Cannot update and notify jobs with API failures as no valid resume file is present."
- )
- return
-
- nodes = util.to_hostnames(nodelist)
- job_list = (
- job
- for job in global_resume_data.jobs
- if any(map(lambda node: node in nodes, util.to_hostnames(job.nodelist_resume)))
- )
- for job in job_list:
- run(f"{lookup().scontrol} update jobid={job.job_id} admincomment='{comment}'")
- run(f"{lookup().scontrol} notify {job.job_id} '{comment}'")
-
-
-def down_nodes(nodelist, reason):
+def down_nodes_notify_jobs(nodes: List[str], reason: str, resume_data: Optional[ResumeData]) -> None:
"""set nodes down with reason"""
- if isinstance(nodelist, list):
- nodelist = util.to_hostlist(nodelist)
- update_job_comment(nodelist, reason)
+ nodelist = util.to_hostlist_fast(nodes)
reason_quoted = shlex.quote(reason)
+
log.error(f"Marking nodes {nodelist} as DOWN, reason: {reason}")
run(f"{lookup().scontrol} update nodename={nodelist} state=down reason={reason_quoted}")
+ if resume_data is None:
+ log.warning("Cannot update and notify jobs with API failures as no valid resume file is present.")
+ return
+
+ nodes = set(nodes) # turn into set to speed up intersection
+ for job in resume_data.jobs:
+ if not (set(job.nodes_alloc) & nodes):
+ continue
+ run(f"{lookup().scontrol} update jobid={job.job_id} admincomment='{reason_quoted}'")
+ run(f"{lookup().scontrol} notify {job.job_id} '{reason_quoted}'")
+
def hold_job(job_id, reason):
"""hold job, set comment to reason"""
@@ -514,42 +483,79 @@ def create_placement_request(pg_name, region):
return request
-def create_placement_groups(node_list: List[str], job_id:int=0) -> Dict[str, List[str]]:
- pgs = {}
- node_map = lookup().nodeset_map(node_list)
- for _, nodes in node_map.items():
- pgs.update(create_nodeset_placement_groups(nodes, job_id))
- return pgs
+def create_placements(nodes: List[str], excl_job_id:Optional[int], lkp: util.Lookup) -> List[PlacementAndNodes]:
+ nodeset_map = collections.defaultdict(list)
+ for node in nodes: # split nodes on nodesets
+ nodeset_map[lkp.node_nodeset_name(node)].append(node)
+ placements = []
+ for _, ns_nodes in nodeset_map.items():
+ placements.extend(create_nodeset_placements(ns_nodes, excl_job_id, lkp))
+ return placements
-def create_nodeset_placement_groups(node_list: List[str], job_id:int) -> Dict[str, List[str]]:
- no_pg = {None: node_list} # canned result for no placement policies created
- if len(node_list) < 2:
- return no_pg # don't create placement_policy for just one node
+def _allocate_nodes_to_placements(nodes: List[str], excl_job_id:Optional[int], lkp: util.Lookup) -> List[PlacementAndNodes]:
+ # canned result for no placement policies created
+ no_pp = [PlacementAndNodes(placement=None, nodes=nodes)]
- model = next(iter(node_list))
- nodeset = lookup().node_nodeset(model)
- if not (nodeset.enable_placement and valid_placement_nodes(node_list)):
- return no_pg
+ if excl_job_id and len(nodes) < 2:
+ return no_pp # don't create placement_policy for just one node
- region = lookup().node_region(model)
+ model = nodes[0]
+ nodeset = lkp.node_nodeset(model)
- groups = {
- f"{lookup().cfg.slurm_cluster_name}-slurmgcp-managed-{nodeset.nodeset_name}-{job_id}-{i}": nodes
- for i, nodes in enumerate(chunked(node_list, n=PLACEMENT_MAX_CNT))
- }
+ if lkp.node_is_tpu(model):
+ return no_pp
+ if not (nodeset.enable_placement and valid_placement_node(model)):
+ return no_pp
+
+ name_prefix = f"{lkp.cfg.slurm_cluster_name}-slurmgcp-managed-{nodeset.nodeset_name}"
+ if excl_job_id: # simply chunk given nodes by max size of placement
+ return [
+ PlacementAndNodes(placement=f"{name_prefix}-{excl_job_id}-{i}", nodes=chunk)
+ for i, chunk in enumerate(chunked(nodes, n=PLACEMENT_MAX_CNT))
+ ]
+
+ # split whole nodeset (not only nodes to resume) into chunks of max size of placement
+ # create placements (most likely already exists) placements for requested nodes
+ chunks = collections.defaultdict(list) # chunk_id -> nodes
+ invalid = []
+
+ for node in nodes:
+ try:
+ chunk = lkp.node_index(node) // PLACEMENT_MAX_CNT
+ chunks[chunk].append(node)
+ except:
+ invalid.append(node)
+
+ placements = [
+ # NOTE: use 0 instead of job_id for consistency with previous SlurmGCP behavior
+ PlacementAndNodes(placement=f"{name_prefix}-0-{c_id}", nodes=c_nodes)
+ for c_id, c_nodes in chunks.items()
+ ]
+
+ if invalid:
+ placements.append(PlacementAndNodes(placement=None, nodes=invalid))
+ log.error(f"Could not find placement for nodes with unexpected names: {to_hostlist_fast(invalid)}")
+
+ return placements
+
+def create_nodeset_placements(nodes: List[str], excl_job_id:Optional[int], lkp: util.Lookup) -> List[PlacementAndNodes]:
+ placements = _allocate_nodes_to_placements(nodes, excl_job_id, lkp)
+ region = lkp.node_region(nodes[0])
if log.isEnabledFor(logging.DEBUG):
- debug_groups = {
- group: to_hostlist_fast(nodes) for group, nodes in groups.items()
- }
+ debug_p = {p.placement: to_hostlist_fast(p.nodes) for p in placements}
log.debug(
- f"creating {len(groups)} placement groups: \n{yaml.safe_dump(debug_groups).rstrip()}"
+ f"creating {len(placements)} placement groups: \n{yaml.safe_dump(debug_p).rstrip()}"
)
+
requests = {
- group: create_placement_request(group, region) for group in groups.keys()
+ p.placement: create_placement_request(p.placement, region) for p in placements if p.placement
}
+ if not requests:
+ return placements
+ # TODO: aggregate all requests for whole resume and execute them at once (don't limit to nodeset/job)
ops = dict(
zip(requests.keys(), map_with_futures(ensure_execute, requests.values()))
)
@@ -587,68 +593,43 @@ def classify_result(item):
log.info(
f"created {len(operations)} placement groups ({to_hostlist_fast(operations.keys())})"
)
- return groups
+ return placements
-def valid_placement_nodes(nodelist):
+def valid_placement_node(node: str) -> bool:
invalid_types = frozenset(["e2", "t2d", "n1", "t2a", "m1", "m2", "m3"])
- for node in nodelist:
- mt = lookup().node_template_info(node).machineType
- if mt.split("-")[0] in invalid_types:
- log.warn(f"Unsupported machine type for placement policy: {mt}.")
- log.warn(
- f"Please do not use any the following machine types with placement policy: ({','.join(invalid_types)})"
- )
- return False
- return True
-
-
-def get_resume_file_data():
- SLURM_RESUME_FILE = os.getenv("SLURM_RESUME_FILE")
- if SLURM_RESUME_FILE is None:
- log.warning(
- "SLURM_RESUME_FILE was not in environment. Cannot get detailed job, node, partition allocation data."
+ mt = lookup().node_template_info(node).machineType
+ if mt.split("-")[0] in invalid_types:
+ log.warn(f"Unsupported machine type for placement policy: {mt}.")
+ log.warn(
+ f"Please do not use any the following machine types with placement policy: ({','.join(invalid_types)})"
)
- return None
- resume_file = Path(SLURM_RESUME_FILE)
- resume_json = resume_file.read_text()
- if log.isEnabledFor(logging.DEBUG):
- (dirs.scripts / "resume_data.json").write_text(resume_json)
- return NSDict(json.loads(resume_json))
+ return False
+ return True
-def main(nodelist):
+def main(nodelist: str) -> None:
"""main called when run as script"""
log.debug(f"ResumeProgram {nodelist}")
# Filter out nodes not in config.yaml
- other_nodes, pm_nodes = separate(
+ other_nodes, nodes = separate(
lookup().is_power_managed_node, util.to_hostnames(nodelist)
)
if other_nodes:
- log.debug(
+ log.error(
f"Ignoring non-power-managed nodes '{to_hostlist_fast(other_nodes)}' from '{nodelist}'"
)
- pm_nodelist = util.to_hostlist_fast(pm_nodes)
- if pm_nodes:
- log.debug(f"Resuming nodes '{pm_nodelist}' from '{nodelist}'")
- else:
- log.debug("No nodes to resume")
+ if not nodes:
+ log.info("No nodes to resume")
return
- log.info(f"resume {pm_nodelist}")
- resume_nodes(pm_nodes, global_resume_data)
- # TODO only run below if resume_nodes succeeds but
- # resume_nodes does not currently return any status.
- if lookup().cfg.enable_slurm_gcp_plugins:
- slurm_gcp_plugins.post_main_resume_nodes(
- lkp=lookup(), nodelist=nodelist, global_resume_data=global_resume_data
- )
-
+ resume_data = get_resume_file_data()
+ log.info(f"resume {util.to_hostlist_fast(nodes)}")
+ resume_nodes(nodes, resume_data)
+
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("nodelist", help="list of nodes to resume")
args = util.init_log_and_parse(parser)
-
- global_resume_data = get_resume_file_data()
main(args.nodelist)
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/__init__.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/__init__.py
index c56793c4be..dec7085994 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/__init__.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/__init__.py
@@ -100,14 +100,6 @@ def register_instance_information_fields(*pos_args, **keyword_args):
)
-# Called just after VM instances have been created and are up
-def post_main_resume_nodes(*pos_args, **keyword_args):
- run_plugins_for_function(
- plugin_function_name="post_main_resume_nodes",
- pos_args=pos_args,
- keyword_args=keyword_args,
- )
-
# Called just before VM instances are deleted should be still up
# (NOTE: if a node has failed it might not be up or unresponsive)
@@ -141,7 +133,6 @@ def pre_placement_group_insert(*pos_args, **keyword_args):
__all__ = [
- "post_main_resume_nodes",
"pre_main_suspend_nodes",
"register_instance_information_fields",
"pre_instance_bulk_insert",
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/test_plugin/__init__.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/test_plugin/__init__.py
index 67dbd5d408..b4b3be580d 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/test_plugin/__init__.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurm_gcp_plugins/test_plugin/__init__.py
@@ -22,20 +22,7 @@ def register_instance_information_fields(*pos_args, **keyword_args):
keyword_args["instance_information_fields"].extend(instance_information_fields)
-def post_main_resume_nodes(*pos_args, **keyword_args):
- logging.debug("post_main_resume_nodes called from test_plugin")
- for node in keyword_args["nodelist"]:
- logging.info(
- (
- "test_plugin:"
- + f"nodename:{node} "
- + f"instance_id:{keyword_args['lkp'].instance(node)['id']} "
- + f"physicalHost:{keyword_args['lkp'].instance(node)['resourceStatus']['physicalHost']}"
- )
- )
-
__all__ = [
"register_instance_information_fields",
- "post_main_resume_nodes",
]
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurmsync.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurmsync.py
index 1bd876a56f..d21211e8e7 100755
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurmsync.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/slurmsync.py
@@ -15,19 +15,18 @@
# limitations under the License.
import argparse
-import datetime
import fcntl
import json
import logging
import re
import sys
+import shlex
+from datetime import datetime, timedelta
from enum import Enum
from itertools import chain
from pathlib import Path
-import yaml
-import datetime as dt
-from datetime import datetime
-from typing import Dict, Tuple
+from dataclasses import dataclass
+from typing import Dict, Tuple, List, Optional, Protocol
from functools import lru_cache
import util
@@ -35,11 +34,13 @@
batch_execute,
ensure_execute,
execute_with_futures,
+ FutureReservation,
install_custom_scripts,
run,
separate,
to_hostlist_fast,
NSDict,
+ NodeState,
TPU,
chunked,
dirs,
@@ -54,21 +55,71 @@
TOT_REQ_CNT = 1000
_MAINTENANCE_SBATCH_SCRIPT_PATH = dirs.custom_scripts / "perform_maintenance.sh"
-NodeStatus = Enum(
- "NodeStatus",
- (
- "orphan",
- "power_down",
- "preempted",
- "restore",
- "resume",
- "terminated",
- "unbacked",
- "unchanged",
- "unknown",
- ),
-)
+class NodeAction(Protocol):
+ def apply(self, nodes:List[str]) -> None:
+ ...
+
+ def __hash__(self):
+ ...
+
+@dataclass(frozen=True)
+class NodeActionPowerUp():
+ def apply(self, nodes:List[str]) -> None:
+ hostlist = util.to_hostlist_fast(nodes)
+ log.info(f"{len(nodes)} instances to resume ({hostlist})")
+ run(f"{lookup().scontrol} update nodename={hostlist} state=power_up")
+
+@dataclass(frozen=True)
+class NodeActionIdle():
+ def apply(self, nodes:List[str]) -> None:
+ hostlist = util.to_hostlist_fast(nodes)
+ log.info(f"{len(nodes)} nodes to idle ({hostlist})")
+ run(f"{lookup().scontrol} update nodename={hostlist} state=resume")
+@dataclass(frozen=True)
+class NodeActionPowerDown():
+ def apply(self, nodes:List[str]) -> None:
+ hostlist = util.to_hostlist_fast(nodes)
+ log.info(f"{len(nodes)} instances to power down ({hostlist})")
+ run(f"{lookup().scontrol} update nodename={hostlist} state=power_down")
+
+@dataclass(frozen=True)
+class NodeActionDelete():
+ def apply(self, nodes:List[str]) -> None:
+ hostlist = util.to_hostlist_fast(nodes)
+ log.info(f"{len(nodes)} instances to delete ({hostlist})")
+ delete_instances(nodes)
+
+@dataclass(frozen=True)
+class NodeActionPrempt():
+ def apply(self, nodes:List[str]) -> None:
+ NodeActionDown(reason="Preempted instance").apply(nodes)
+ hostlist = util.to_hostlist_fast(nodes)
+ log.info(f"{len(nodes)} instances restarted ({hostlist})")
+ start_instances(nodes)
+
+@dataclass(frozen=True)
+class NodeActionUnchanged():
+ def apply(self, nodes:List[str]) -> None:
+ pass
+
+@dataclass(frozen=True)
+class NodeActionDown():
+ reason: str
+
+ def apply(self, nodes: List[str]) -> None:
+ hostlist = util.to_hostlist_fast(nodes)
+ log.info(f"{len(nodes)} nodes set down ({hostlist}) with reason={self.reason}")
+ run(f"{lookup().scontrol} update nodename={hostlist} state=down reason={shlex.quote(self.reason)}")
+
+@dataclass(frozen=True)
+class NodeActionUnknown():
+ slurm_state: Optional[NodeState]
+ instance_state: Optional[str]
+
+ def apply(self, nodes:List[str]) -> None:
+ hostlist = util.to_hostlist_fast(nodes)
+ log.error(f"{len(nodes)} nodes have unexpected {self.slurm_state} and instance state:{self.instance_state}, ({hostlist})")
def start_instance_op(inst):
return lookup().compute.instances().start(
@@ -94,14 +145,25 @@ def start_instances(node_list):
execute_with_futures(start_tpu, tpu_start_data)
-def _find_dynamic_node_status() -> NodeStatus:
+def _find_dynamic_node_status() -> NodeAction:
# TODO: cover more cases:
# * delete dead dynamic nodes
# * delete orhpaned instances
- return NodeStatus.unchanged # don't touch dynamic nodes
-
-
-def _find_tpu_node_status(nodename, state):
+ return NodeActionUnchanged() # don't touch dynamic nodes
+
+def get_fr_action(fr: FutureReservation, nodename:str, state:NodeState) -> Optional[NodeAction]:
+ now = datetime.utcnow()
+ if fr.start_time < now < fr.end_time:
+ return None # handle like any other node
+ if state.base == "DOWN":
+ return NodeActionUnchanged()
+ if fr.start_time >= now:
+ msg = f"Waiting for reservation:{fr.name} to start at {fr.start_time}"
+ else:
+ msg = f"Reservation:{fr.name} is after its end-time"
+ return NodeActionDown(reason=msg)
+
+def _find_tpu_node_action(nodename, state) -> NodeAction:
ns = lookup().node_nodeset(nodename)
tpuobj = TPU(ns)
inst = tpuobj.get_node(nodename)
@@ -123,24 +185,24 @@ def _find_tpu_node_status(nodename, state):
log.error(
f"More than one cloud tpu node for tpu group {nodelist}, there should be only one that should be {l_nodelist[0]}, but we have found {tpus_int}"
)
- return NodeStatus.unknown
+ return NodeActionUnknown(slurm_state=state, instance_state=None)
if len(tpus_int) == 1:
inst = tpuobj.get_node(tpus_int[0])
# if len(tpus_int ==0) this case is not relevant as this would be the case always that a TPU group is not running
if inst is None:
if state.base == "DOWN" and "POWERED_DOWN" in state.flags:
- return NodeStatus.restore
+ return NodeActionIdle()
if "POWERING_DOWN" in state.flags:
- return NodeStatus.restore
+ return NodeActionIdle()
if "COMPLETING" in state.flags:
- return NodeStatus.unbacked
+ return NodeActionDown(reason="Unbacked instance")
if state.base != "DOWN" and not (
set(("POWER_DOWN", "POWERING_UP", "POWERING_DOWN", "POWERED_DOWN"))
& state.flags
):
- return NodeStatus.unbacked
+ return NodeActionDown(reason="Unbacked instance")
if lookup().is_static_node(nodename):
- return NodeStatus.resume
+ return NodeActionPowerUp()
elif (
state is not None
and "POWERED_DOWN" not in state.flags
@@ -148,28 +210,33 @@ def _find_tpu_node_status(nodename, state):
and inst.state == TPU.State.STOPPED
):
if tpuobj.preemptible:
- return NodeStatus.preempted
+ return NodeActionPrempt()
if state.base != "DOWN":
- return NodeStatus.terminated
+ return NodeActionDown(reason="Instance terminated")
elif (
state is None or "POWERED_DOWN" in state.flags
) and inst.state == TPU.State.READY:
- return NodeStatus.orphan
+ return NodeActionDelete()
elif state is None:
# if state is None here, the instance exists but it's not in Slurm
- return NodeStatus.unknown
+ return NodeActionUnknown(slurm_state=state, instance_state=inst.status)
- return NodeStatus.unchanged
+ return NodeActionUnchanged()
-def find_node_status(nodename):
+def get_node_action(nodename: str) -> NodeAction:
"""Determine node/instance status that requires action"""
state = lookup().slurm_node(nodename)
+ if lookup().node_is_fr(nodename):
+ fr = lookup().future_reservation(lookup().node_nodeset(nodename))
+ if action := get_fr_action(fr, nodename, state):
+ return action
+
if lookup().node_is_dyn(nodename):
return _find_dynamic_node_status()
if lookup().node_is_tpu(nodename):
- return _find_tpu_node_status(nodename, state)
+ return _find_tpu_node_action(nodename, state)
# split below is workaround for VMs whose hostname is FQDN
inst = lookup().instance(nodename.split(".")[0])
@@ -179,19 +246,19 @@ def find_node_status(nodename):
if inst is None:
if "POWERING_UP" in state.flags:
- return NodeStatus.unchanged
+ return NodeActionUnchanged()
if state.base == "DOWN" and "POWERED_DOWN" in state.flags:
- return NodeStatus.restore
+ return NodeActionIdle()
if "POWERING_DOWN" in state.flags:
- return NodeStatus.restore
+ return NodeActionIdle()
if "COMPLETING" in state.flags:
- return NodeStatus.unbacked
+ return NodeActionDown(reason="Unbacked instance")
if state.base != "DOWN" and not power_flags:
- return NodeStatus.unbacked
+ return NodeActionDown(reason="Unbacked instance")
if state.base == "DOWN" and not power_flags:
- return NodeStatus.power_down
+ return NodeActionPowerDown()
if "POWERED_DOWN" in state.flags and lookup().is_static_node(nodename):
- return NodeStatus.resume
+ return NodeActionPowerUp()
elif (
state is not None
and "POWERED_DOWN" not in state.flags
@@ -199,9 +266,9 @@ def find_node_status(nodename):
and inst.status == "TERMINATED"
):
if inst.scheduling.preemptible:
- return NodeStatus.preempted
+ return NodeActionPrempt()
if state.base != "DOWN":
- return NodeStatus.terminated
+ return NodeActionDown(reason="Instance terminated")
elif (state is None or "POWERED_DOWN" in state.flags) and inst.status == "RUNNING":
log.info("%s is potential orphan node", nodename)
age_threshold_seconds = 90
@@ -214,13 +281,13 @@ def find_node_status(nodename):
age_threshold_seconds,
inst_seconds_old,
)
- return NodeStatus.unchanged
- return NodeStatus.orphan
+ return NodeActionUnchanged()
+ return NodeActionDelete()
elif state is None:
# if state is None here, the instance exists but it's not in Slurm
- return NodeStatus.unknown
+ return NodeActionUnknown(slurm_state=state, instance_state=inst.status)
- return NodeStatus.unchanged
+ return NodeActionUnchanged()
def _seconds_since_timestamp(timestamp):
@@ -236,69 +303,6 @@ def _seconds_since_timestamp(timestamp):
return datetime.now().timestamp() - creation_dt.timestamp()
-def do_node_update(status, nodes):
- """update node/instance based on node status"""
- if status == NodeStatus.unchanged:
- return
- count = len(nodes)
- hostlist = util.to_hostlist(nodes)
-
- def nodes_down():
- """down nodes"""
- log.info(
- f"{count} nodes set down due to node status '{status.name}' ({hostlist})"
- )
- run(
- f"{lookup().scontrol} update nodename={hostlist} state=down reason='Instance stopped/deleted'"
- )
-
- def nodes_restart():
- """start instances for nodes"""
- log.info(f"{count} instances restarted ({hostlist})")
- start_instances(nodes)
-
- def nodes_idle():
- """idle nodes"""
- log.info(f"{count} nodes to idle ({hostlist})")
- run(f"{lookup().scontrol} update nodename={hostlist} state=resume")
-
- def nodes_resume():
- """resume nodes via scontrol"""
- log.info(f"{count} instances to resume ({hostlist})")
- run(f"{lookup().scontrol} update nodename={hostlist} state=power_up")
-
- def nodes_delete():
- """delete instances for nodes"""
- log.info(f"{count} instances to delete ({hostlist})")
- delete_instances(nodes)
-
- def nodes_power_down():
- """power_down node in slurm"""
- log.info(f"{count} instances to power down ({hostlist})")
- run(f"{lookup().scontrol} update nodename={hostlist} state=power_down")
-
- def nodes_unknown():
- """Error status, nodes shouldn't get in this status"""
- log.error(f"{count} nodes have unexpected status: ({hostlist})")
- first = next(iter(nodes))
- state = lookup().slurm_node(first)
- state = "{}+{}".format(state.base, "+".join(state.flags)) if state else "None"
- inst = lookup().instance(first)
- log.error(f"{first} state: {state}, instance status:{inst.status}")
-
- {
- NodeStatus.orphan: nodes_delete,
- NodeStatus.power_down: nodes_power_down,
- NodeStatus.preempted: lambda: (nodes_down(), nodes_restart()),
- NodeStatus.restore: nodes_idle,
- NodeStatus.resume: nodes_resume,
- NodeStatus.terminated: nodes_down,
- NodeStatus.unbacked: nodes_down,
- NodeStatus.unchanged: lambda: None,
- NodeStatus.unknown: nodes_unknown,
- }[status]()
-
-
def delete_placement_groups(placement_groups):
def delete_placement_request(pg_name, region):
return lookup().compute.resourcePolicies().delete(
@@ -393,18 +397,9 @@ def sync_slurm():
log.debug(
f"reconciling {len(compute_instances)} ({len(all_nodes)-len(compute_instances)}) GCP instances and {len(slurm_nodes)} Slurm nodes ({len(all_nodes)-len(slurm_nodes)})."
)
- node_statuses = {
- k: list(v) for k, v in util.groupby_unsorted(all_nodes, find_node_status)
- }
- if log.isEnabledFor(logging.DEBUG):
- status_nodelist = {
- status.name: to_hostlist_fast(nodes)
- for status, nodes in node_statuses.items()
- }
- log.debug(f"node statuses: \n{yaml.safe_dump(status_nodelist).rstrip()}")
- for status, nodes in node_statuses.items():
- do_node_update(status, nodes)
+ for action, nodes in util.groupby_unsorted(all_nodes, get_node_action):
+ action.apply(list(nodes))
def reconfigure_slurm():
@@ -446,10 +441,12 @@ def reconfigure_slurm():
def update_topology(lkp: util.Lookup) -> None:
if conf.topology_plugin(lkp) != conf.TOPOLOGY_PLUGIN_TREE:
return
- updated = conf.gen_topology_conf(lkp)
+ updated, summary = conf.gen_topology_conf(lkp)
if updated:
- log.debug("Topology configuration updated. Reconfiguring Slurm.")
+ log.info("Topology configuration updated. Reconfiguring Slurm.")
util.scontrol_reconfigure(lkp)
+ # Safe summary only after Slurm got reconfigured, so summary reflects Slurm POV
+ summary.dump(lkp)
def delete_reservation(lkp: util.Lookup, reservation_name: str) -> None:
@@ -520,7 +517,7 @@ def sync_maintenance_reservation(lkp: util.Lookup) -> None:
if res_name in curr_reservation_map:
diff = curr_reservation_map[res_name] - start_time
- if abs(diff) <= dt.timedelta(seconds=1):
+ if abs(diff) <= timedelta(seconds=1):
continue
else:
del_reservation.add(res_name)
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/common.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/common.py
index 2272aeef99..a807c00f28 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/common.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/common.py
@@ -36,6 +36,14 @@ class TstNodeset:
instance_template: Optional[str] = None
reservation_name: Optional[str] = ""
zone_policy_allow: Optional[list[str]] = field(default_factory=list)
+ enable_placement: bool = True
+
+@dataclass
+class TstPartition:
+ partition_name: str = "euler"
+ partition_nodeset: list[str] = field(default_factory=list)
+ partition_nodeset_tpu: list[str] = field(default_factory=list)
+ enable_job_exclusive: bool = False
@dataclass
class TstCfg:
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_resume.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_resume.py
new file mode 100644
index 0000000000..147ba00658
--- /dev/null
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_resume.py
@@ -0,0 +1,173 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+import os
+import pytest
+import unittest.mock
+import unittest
+import tempfile
+
+from common import TstCfg, TstNodeset, TstPartition, TstTPU # needed to import util
+import util
+import resume
+from resume import ResumeData, ResumeJobData, BulkChunk, PlacementAndNodes
+
+def test_get_resume_file_data_no_env():
+ with unittest.mock.patch.dict(os.environ, {"SLURM_RESUME_FILE": ""}):
+ assert resume.get_resume_file_data() is None
+
+
+def test_get_resume_file_data():
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(b"""{
+ "jobs": [
+ {
+ "extra": null,
+ "job_id": 1,
+ "features": null,
+ "nodes_alloc": "green-[0-2]",
+ "nodes_resume": "green-[0-1]",
+ "oversubscribe": "OK",
+ "partition": "red",
+ "reservation": null
+ }
+ ],
+ "all_nodes_resume": "green-[0-1]"
+}""")
+ f.flush()
+ with (
+ unittest.mock.patch.dict(os.environ, {"SLURM_RESUME_FILE": f.name}),
+ unittest.mock.patch("util.to_hostnames") as mock_to_hostnames,
+ ):
+ mock_to_hostnames.return_value = ["green-0", "green-1", "green-2"]
+ assert resume.get_resume_file_data() == ResumeData(jobs=[
+ ResumeJobData(
+ job_id = 1,
+ partition="red",
+ nodes_alloc=["green-0", "green-1", "green-2"],
+ )
+ ])
+ mock_to_hostnames.assert_called_once_with("green-[0-2]")
+
+
+@unittest.mock.patch("util.TPU")
+@unittest.mock.patch("resume.create_placements")
+def test_group_nodes_bulk(mock_create_placements, mock_tpu):
+ cfg = TstCfg(
+ nodeset={
+ "n": TstNodeset(nodeset_name="n"),
+ },
+ nodeset_tpu={
+ "t": TstNodeset(nodeset_name="t"),
+ },
+ partitions={
+ "p1": TstPartition(
+ partition_name="p1",
+ enable_job_exclusive=True,
+ ),
+ "p2": TstPartition(
+ partition_name="p2",
+ partition_nodeset_tpu=["t"],
+ enable_job_exclusive=True,
+ )
+ }
+ )
+ lkp = util.Lookup(cfg)
+
+ def mock_create_placements_se(nodes, excl_job_id, lkp):
+ args = (set(nodes), excl_job_id)
+ if ({'c-n-1', 'c-n-2', 'c-t-8', 'c-t-9'}, None) == args:
+ return [
+ PlacementAndNodes("g0", ["c-n-1", "c-n-2"]),
+ PlacementAndNodes(None, ['c-t-8', 'c-t-9']),
+ ]
+ if ({"c-n-0", "c-n-8"}, 1) == args:
+ return [
+ PlacementAndNodes("g10", ["c-n-0"]),
+ PlacementAndNodes("g11", ["c-n-8"]),
+ ]
+ if ({'c-t-0', 'c-t-1', 'c-t-2', 'c-t-3', 'c-t-4', 'c-t-5'}, 2) == args:
+ return [
+ PlacementAndNodes(None, ['c-t-0', 'c-t-1', 'c-t-2', 'c-t-3', 'c-t-4', 'c-t-5'])
+ ]
+ raise AssertionError(f"unexpected invocation: '{args}'")
+ mock_create_placements.side_effect = mock_create_placements_se
+
+ def mock_tpu_se(ns: TstNodeset) -> TstTPU:
+ if ns.nodeset_name == "t":
+ return TstTPU(vmcount=2)
+ raise AssertionError(f"unexpected invocation: '{ns}'")
+ mock_tpu.side_effect = mock_tpu_se
+
+ got = resume.group_nodes_bulk(
+ ["c-n-0", "c-n-1", "c-n-2", "c-t-0", "c-t-1", "c-t-2", "c-t-3", "c-t-8", "c-t-9"],
+ ResumeData(jobs=[
+ ResumeJobData(job_id=1, partition="p1", nodes_alloc=["c-n-0", "c-n-8"]),
+ ResumeJobData(job_id=2, partition="p2", nodes_alloc=["c-t-0", "c-t-1", "c-t-2", "c-t-3", "c-t-4", "c-t-5"]),
+ ]), lkp)
+ mock_create_placements.assert_called()
+ assert got == {
+ "c-n:jobNone:g0:0": BulkChunk(
+ nodes=["c-n-1", "c-n-2"], prefix="c-n", chunk_idx=0, excl_job_id=None, placement_group="g0"),
+ "c-n:job1:g10:0": BulkChunk(
+ nodes=["c-n-0"], prefix="c-n", chunk_idx=0, excl_job_id=1, placement_group="g10"),
+ "c-t:0": BulkChunk(
+ nodes=["c-t-8", "c-t-9"], prefix="c-t", chunk_idx=0, excl_job_id=None, placement_group=None),
+ "c-t:job2:0": BulkChunk(
+ nodes=["c-t-0", "c-t-1"], prefix="c-t", chunk_idx=0, excl_job_id=2, placement_group=None),
+ "c-t:job2:1": BulkChunk(
+ nodes=["c-t-2", "c-t-3"], prefix="c-t", chunk_idx=1, excl_job_id=2, placement_group=None),
+ }
+
+
+@pytest.mark.parametrize(
+ "nodes,excl_job_id,expected",
+ [
+ ( # TPU - no placements
+ ["c-t-0", "c-t-2"], 4, [PlacementAndNodes(None, ["c-t-0", "c-t-2"])]
+ ),
+ ( # disabled placements - no placemens
+ ["c-x-0", "c-x-2"], 4, [PlacementAndNodes(None, ["c-x-0", "c-x-2"])]
+ ),
+ ( # excl_job
+ ["c-n-0", "c-n-uno", "c-n-2", "c-n-2011"], 4, [
+ PlacementAndNodes("c-slurmgcp-managed-n-4-0", ["c-n-0", "c-n-uno", "c-n-2", "c-n-2011"])
+ ]
+ ),
+ ( # no excl_job
+ ["c-n-0", "c-n-uno", "c-n-2", "c-n-2011"], None, [
+ PlacementAndNodes("c-slurmgcp-managed-n-0-0", ["c-n-0", "c-n-2"]),
+ PlacementAndNodes('c-slurmgcp-managed-n-0-1', ['c-n-2011']),
+ PlacementAndNodes(None, ["c-n-uno"]),
+ ]
+ ),
+ ],
+)
+def test_allocate_nodes_to_placements(nodes: list[str], excl_job_id: Optional[int], expected: list[PlacementAndNodes]):
+ cfg = TstCfg(
+ slurm_cluster_name="c",
+ nodeset={
+ "n": TstNodeset(nodeset_name="n", enable_placement=True),
+ "x": TstNodeset(nodeset_name="x", enable_placement=False)
+ },
+ nodeset_tpu={
+ "t": TstNodeset(nodeset_name="t")
+ })
+ lkp = util.Lookup(cfg)
+
+ with unittest.mock.patch("resume.valid_placement_node") as mock_valid_placement_node:
+ mock_valid_placement_node.return_value = True
+ assert resume._allocate_nodes_to_placements(nodes, excl_job_id, lkp) == expected
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_topology.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_topology.py
index 6d44338c81..78715bc5f6 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_topology.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_topology.py
@@ -119,10 +119,12 @@ def tpu_se(ns: TstNodeset) -> TstTPU:
"SwitchName=s1_1 Nodes=m22-slim-[0-2]"]
assert list(compressed.render_conf_lines()) == want_compressed
- assert conf.gen_topology_conf(lkp) == True
+ upd, summary = conf.gen_topology_conf(lkp)
+ assert upd == True
want_written = PRELUDE + "\n".join(want_compressed) + "\n\n"
assert open(cfg.output_dir + "/cloud_topology.conf").read() == want_written
+ summary.dump(lkp)
summary_got = json.loads(open(cfg.output_dir + "/cloud_topology.summary.json").read())
assert summary_got == {
@@ -154,31 +156,45 @@ def test_gen_topology_conf_update():
lkp.instances = lambda: {} # no instances
# initial generation - reconfigure
- assert conf.gen_topology_conf(lkp) == True
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == True
+ sum.dump(lkp)
# add node: node_count_static 2 -> 3 - reconfigure
lkp.cfg.nodeset["c"].node_count_static = 3
- assert conf.gen_topology_conf(lkp) == True
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == True
+ sum.dump(lkp)
# remove node: node_count_static 3 -> 2 - no reconfigure
lkp.cfg.nodeset["c"].node_count_static = 2
- assert conf.gen_topology_conf(lkp) == False
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == False
+ # don't dump
# set empty physicalHost - no reconfigure
lkp.instances = lambda: { n.name: n for n in [TstInstance("m22-green-0", physicalHost="")]}
- assert conf.gen_topology_conf(lkp) == False
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == False
+ # don't dump
# set physicalHost - reconfigure
lkp.instances = lambda: { n.name: n for n in [TstInstance("m22-green-0", physicalHost="/a/b/c")]}
- assert conf.gen_topology_conf(lkp) == True
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == True
+ sum.dump(lkp)
# change physicalHost - reconfigure
lkp.instances = lambda: { n.name: n for n in [TstInstance("m22-green-0", physicalHost="/a/b/z")]}
- assert conf.gen_topology_conf(lkp) == True
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == True
+ sum.dump(lkp)
# shut down node - no reconfigure
lkp.instances = lambda: {}
- assert conf.gen_topology_conf(lkp) == False
+ upd, sum = conf.gen_topology_conf(lkp)
+ assert upd == False
+ # don't dump
@pytest.mark.parametrize(
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_util.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_util.py
index 1b75a1fbb2..2807740464 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_util.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tests/test_util.py
@@ -63,6 +63,24 @@ def test_node_desc(name, expected):
assert util.lookup()._node_desc(name) == expected
+@pytest.mark.parametrize(
+ "name,expected",
+ [
+ ("az-buka-23", 23),
+ ("az-buka-0", 0),
+ ("az-buka", Exception),
+ ("az-buka-xyzf", ValueError),
+ ("az-buka-[2-3]", ValueError),
+ ],
+)
+def test_node_index(name, expected):
+ if type(expected) is type and issubclass(expected, Exception):
+ with pytest.raises(expected):
+ util.lookup().node_index(name)
+ else:
+ assert util.lookup().node_index(name) == expected
+
+
@pytest.mark.parametrize(
"name",
[
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tools/gpu-test b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tools/gpu-test
index 0aaaeb2fc0..6be548a7ed 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tools/gpu-test
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/tools/gpu-test
@@ -31,8 +31,8 @@ fi
# Exit if GPU isn't H100
GPU_MODEL=$(nvidia-smi --query-gpu=name --format=csv,noheader)
-if [[ "$GPU_MODEL" != *"H100"* ]]; then
- echo "Non-H100 GPU detected" >&2
+if ! [[ "$GPU_MODEL" =~ H[1-2]00 ]]; then
+ echo "No H100 or H200 GPU detected" >&2
exit 0
fi
@@ -80,7 +80,7 @@ if [ $NUMGPUS -gt 0 ]; then
if [ $DCGM_FAILED -eq 0 ] || \
[ $ECC_ERRORS -gt 0 ] || \
[ $NVLINK_ERRORS -gt 0 ]; then
- REASON="H100 GPU issues detected: "
+ REASON="GPU issues detected: "
[ $DCGM_FAILED -eq 0 ] && REASON+="DCGM test failed, "
[ $ECC_ERRORS -gt 0 ] && REASON+="ECC errors found ($ECC_ERRORS double-bit errors), "
[ $NVLINK_ERRORS -gt 0 ] && REASON+="NVLink errors detected ($NVLINK_ERRORS errors), "
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/util.py b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/util.py
index 1d07678619..605283c5bb 100755
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/util.py
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/modules/slurm_files/scripts/util.py
@@ -19,7 +19,7 @@
import base64
import collections
from dataclasses import dataclass
-from datetime import timedelta
+from datetime import timedelta, datetime
import hashlib
import inspect
import json
@@ -947,11 +947,7 @@ def cur_repr():
res.append(f"{p}[{','.join(cs)}]")
return ",".join(res)
-
-def part_is_tpu(part):
- """check if partition with name part contains a nodeset of type tpu"""
- return len(lookup().cfg.partitions[part].partition_nodeset_tpu) > 0
-
+@lru_cache(maxsize=None)
def to_hostnames(nodelist: str) -> List[str]:
"""make list of hostnames from hostlist expression"""
if not nodelist:
@@ -1462,6 +1458,21 @@ class ReservationDetails:
bulk_insert_name: str # name in format suitable for bulk insert (currently identical to user supplied name in long format)
deployment_type: Optional[str]
+ @property
+ def dense(self) -> bool:
+ return self.deployment_type == "DENSE"
+
+@dataclass(frozen=True)
+class FutureReservation:
+ project: str
+ zone: str
+ name: str
+ specific: bool
+ start_time: datetime
+ end_time: datetime
+ active_reservation: Optional[ReservationDetails]
+
+
@dataclass
class Job:
id: int
@@ -1470,6 +1481,10 @@ class Job:
job_state: Optional[str] = None
duration: Optional[timedelta] = None
+@dataclass(frozen=True)
+class NodeState:
+ base: str
+ flags: frozenset
class Lookup:
"""Wrapper class for cached data access"""
@@ -1564,30 +1579,46 @@ def _node_desc(self, node_name):
def node_prefix(self, node_name=None):
return self._node_desc(node_name)["prefix"]
+
+ def node_index(self, node: str) -> int:
+ """ node_index("cluster-nodeset-45") == 45 """
+ suff = self._node_desc(node)["suffix"]
+
+ if suff is None:
+ raise ValueError(f"Node {node} name does not end with numeric index")
+ return int(suff)
def node_nodeset_name(self, node_name=None):
return self._node_desc(node_name)["nodeset"]
def node_nodeset(self, node_name=None):
nodeset_name = self.node_nodeset_name(node_name)
- ns = self.cfg.nodeset.get(nodeset_name)
- if ns:
- return ns
- return self.cfg.nodeset_tpu.get(nodeset_name)
+ if nodeset_name in self.cfg.nodeset_tpu:
+ return self.cfg.nodeset_tpu[nodeset_name]
+ return self.cfg.nodeset[nodeset_name]
+
+ def partition_is_tpu(self, part: str) -> bool:
+ """check if partition with name part contains a nodeset of type tpu"""
+ return len(self.cfg.partitions[part].partition_nodeset_tpu) > 0
+
def node_is_tpu(self, node_name=None):
nodeset_name = self.node_nodeset_name(node_name)
return self.cfg.nodeset_tpu.get(nodeset_name) is not None
+
+ def node_is_fr(self, node_name:str) -> bool:
+ return bool(self.node_nodeset(node_name).future_reservation)
+
+ def is_dormant_fr_node(self, node_name:str) -> bool:
+ fr = self.future_reservation(self.node_nodeset(node_name))
+ if not fr:
+ return False
+ return fr.active_reservation is None
def node_is_dyn(self, node_name=None) -> bool:
nodeset = self.node_nodeset_name(node_name)
return self.cfg.nodeset_dyn.get(nodeset) is not None
- def chunk_tpu_nodes(self, tpu_nodes):
- model = tpu_nodes[0]
- tpu = TPU(self.node_nodeset(model))
- return chunked(tpu_nodes, n=tpu.vmcount)
-
def node_template(self, node_name=None):
return self.node_nodeset(node_name).instance_template
@@ -1646,15 +1677,14 @@ def is_static_node(self, node_name: str) -> bool:
@lru_cache(maxsize=None)
def slurm_nodes(self):
- StateTuple = namedtuple("StateTuple", "base,flags")
def make_node_tuple(node_line):
- """turn node,state line to (node, StateTuple(state))"""
+ """turn node,state line to (node, NodeState(state))"""
# state flags include: CLOUD, COMPLETING, DRAIN, FAIL, POWERED_DOWN,
# POWERING_DOWN
node, fullstate = node_line.split(",")
state = fullstate.split("+")
- state_tuple = StateTuple(state[0], set(state[1:]))
+ state_tuple = NodeState(base=state[0], flags=frozenset(state[1:]))
return (node, state_tuple)
cmd = (
@@ -1766,7 +1796,27 @@ def _get_reservation(self, project: str, zone: str, name: str) -> object:
"""See https://cloud.google.com/compute/docs/reference/rest/v1/reservations"""
return self.compute.reservations().get(
project=project, zone=zone, reservation=name).execute()
+
+ @lru_cache()
+ def _get_future_reservation(self, project:str, zone:str, name: str) -> object:
+ """See https://cloud.google.com/compute/docs/reference/rest/v1/futureReservations"""
+ return self.compute.futureReservations().get(project=project, zone=zone, futureReservation=name).execute()
+
+ def get_reservation_details(self, project:str, zone:str, name:str, bulk_insert_name:str) -> ReservationDetails:
+ reservation = self._get_reservation(project, zone, name)
+
+ # Converts policy URLs to names, e.g.:
+ # projects/111111/regions/us-central1/resourcePolicies/zebra -> zebra
+ policies = [u.split("/")[-1] for u in reservation.get("resourcePolicies", {}).values()]
+ return ReservationDetails(
+ project=project,
+ zone=zone,
+ name=name,
+ policies=policies,
+ deployment_type=reservation.get("deploymentType"),
+ bulk_insert_name=bulk_insert_name)
+
def nodeset_reservation(self, nodeset: object) -> Optional[ReservationDetails]:
if not nodeset.reservation_name:
return None
@@ -1782,19 +1832,37 @@ def nodeset_reservation(self, nodeset: object) -> Optional[ReservationDetails]:
)
project, name = match.group("project", "reservation")
- reservation = self._get_reservation(project, zone, name)
+ return self.get_reservation_details(project, zone, name, nodeset.reservation_name)
+
+ def future_reservation(self, nodeset:object) -> Optional[FutureReservation]:
+ if not nodeset.future_reservation:
+ return None
- # Converts policy URLs to names, e.g.:
- # projects/111111/regions/us-central1/resourcePolicies/zebra -> zebra
- policies = [u.split("/")[-1] for u in reservation.get("resourcePolicies", {}).values()]
+ active_reservation = None
+ match = re.search(r'^projects/(?P[^/]+)/zones/(?P[^/]+)/futureReservations/(?P[^/]+)(/.*)?$', nodeset.future_reservation)
+ project, zone, name = match.group("project","zone","name")
+ fr = self._get_future_reservation(project,zone,name)
- return ReservationDetails(
+ # TODO: Remove this "hack" of trimming the Z from timestamps once we move to Python 3.11 (context: https://discuss.python.org/t/parse-z-timezone-suffix-in-datetime/2220/30)
+ start_time = datetime.fromisoformat(fr["timeWindow"]["startTime"][:-1])
+ end_time = datetime.fromisoformat(fr["timeWindow"]["endTime"][:-1])
+
+ if "autoCreatedReservations" in fr["status"] and (fr_res:=fr["status"]["autoCreatedReservations"][0]):
+ if (start_time<=datetime.utcnow()<=end_time):
+ match = re.search(r'projects/(?P[^/]+)/zones/(?P[^/]+)/reservations/(?P[^/]+)(/.*)?$',fr_res)
+ res_name = match.group("name")
+ bulk_insert_name = f"projects/{project}/reservations/{res_name}"
+ active_reservation = self.get_reservation_details(project, zone, res_name, bulk_insert_name)
+
+ return FutureReservation(
project=project,
zone=zone,
name=name,
- policies=policies,
- deployment_type=reservation.get("deploymentType"),
- bulk_insert_name=nodeset.reservation_name)
+ specific=fr["specificReservationRequired"],
+ start_time=start_time,
+ end_time=end_time,
+ active_reservation=active_reservation
+ )
@lru_cache(maxsize=1)
def machine_types(self):
@@ -1931,12 +1999,6 @@ def template_info(self, template_link):
return template
- def nodeset_map(self, hostnames: list):
- """Convert a list of nodes into a map of nodeset_name to hostnames"""
- nodeset_map = collections.defaultdict(list)
- for node in hostnames:
- nodeset_map[self.node_nodeset_name(node)].append(node)
- return nodeset_map
def _parse_job_info(self, job_info: str) -> Job:
"""Extract job details"""
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/partition.tf b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/partition.tf
index 2a76ed7dca..e8626bd1bd 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/partition.tf
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/partition.tf
@@ -26,7 +26,7 @@ locals {
# NODESET
# TODO: remove dependency on slurm-gcp repo, move to local template module
module "slurm_nodeset_template" {
- source = "github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_instance_template?ref=6.8.6"
+ source = "../../internal/slurm-gcp-v6/instance_template"
for_each = local.nodeset_map
project_id = var.project_id
@@ -89,6 +89,7 @@ locals {
node_count_static = ns.node_count_static
subnetwork = ns.subnetwork_self_link
reservation_name = ns.reservation_name
+ future_reservation = ns.future_reservation
maintenance_interval = ns.maintenance_interval
instance_properties_json = ns.instance_properties_json
enable_placement = ns.enable_placement
@@ -103,7 +104,7 @@ locals {
# NODESET TPU
module "slurm_nodeset_tpu" {
- source = "github.com/GoogleCloudPlatform/slurm-gcp.git//terraform/slurm_cluster/modules/slurm_nodeset_tpu?ref=6.8.6"
+ source = "../../internal/slurm-gcp-v6/nodeset_tpu"
for_each = local.nodeset_tpu_map
project_id = var.project_id
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/variables.tf b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/variables.tf
index b06d62b39f..6264576b2c 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/variables.tf
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/variables.tf
@@ -278,6 +278,7 @@ variable "nodeset" {
tags = optional(list(string), [])
termination_action = optional(string)
reservation_name = optional(string)
+ future_reservation = string
startup_script = optional(list(object({
filename = string
content = string })), [])
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/versions.tf b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/versions.tf
index 38f97edcff..10ba69beae 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/versions.tf
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-controller/versions.tf
@@ -24,6 +24,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-controller/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-controller/v1.44.0"
}
}
diff --git a/community/modules/scheduler/schedmd-slurm-gcp-v6-login/versions.tf b/community/modules/scheduler/schedmd-slurm-gcp-v6-login/versions.tf
index cbcb20a4a1..cb3dca1bc2 100644
--- a/community/modules/scheduler/schedmd-slurm-gcp-v6-login/versions.tf
+++ b/community/modules/scheduler/schedmd-slurm-gcp-v6-login/versions.tf
@@ -24,6 +24,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-login/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:schedmd-slurm-gcp-v6-login/v1.44.0"
}
}
diff --git a/community/modules/scripts/wait-for-startup/versions.tf b/community/modules/scripts/wait-for-startup/versions.tf
index 2e55cca8b0..c5c429481f 100644
--- a/community/modules/scripts/wait-for-startup/versions.tf
+++ b/community/modules/scripts/wait-for-startup/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:wait-for-startup/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:wait-for-startup/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/community/modules/scripts/windows-startup-script/versions.tf b/community/modules/scripts/windows-startup-script/versions.tf
index bd33fff2e1..1e592099f1 100644
--- a/community/modules/scripts/windows-startup-script/versions.tf
+++ b/community/modules/scripts/windows-startup-script/versions.tf
@@ -16,7 +16,7 @@
terraform {
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:windows-startup-script/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:windows-startup-script/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/docs/slurm-dws-calendar.md b/docs/slurm-dws-calendar.md
new file mode 100644
index 0000000000..6ae223cee6
--- /dev/null
+++ b/docs/slurm-dws-calendar.md
@@ -0,0 +1,24 @@
+# Provisioning SlurmGCP nodes with Future Reservations (DWS Calendar Mode)
+
+Use [Future Reservations](https://cloud.google.com/compute/docs/instances/future-reservations-overview) to request assurance of important or difficult-to-obtain capacity in advance.
+[Dynamic Workload Scheduler](https://cloud.google.com/blog/products/compute/introducing-dynamic-workload-scheduler) Calendar mode extends the future reservation capabilities and caters to training/experimentation workloads that demand precise start times and have a defined duration.
+
+Compared to on-demand reservations, future reservations provide you with an even higher level of assurance in obtaining capacity for Compute Engine zonal resources.
+
+With Calendar mode, you will be able to request GPU capacity in fixed duration capacity blocks. It will initially support future reservations with durations of 7 or 14 days and can be purchased up to 8 weeks in advance. Your reservation will get confirmed, based on availability, and the capacity will be delivered to your project on your requested start date. Your VMs will be able to target this reservation to consume this capacity block. At the end of the defined duration, the VMs will be terminated, and the reservations will get deleted.
+
+> [!IMPORTANT]
+> To use DWS Calendar mode your project needs to be allowlisted for private preview access.
+> Fill out the [form](https://docs.google.com/forms/d/1etaaXMW9jJUTTxfUC7TIIMttLWT5H-3Q8_3-sG6vwKk/edit).
+
+In order to make use of Future Reservations/DWS Calendar mode with SlurmGCP, you must use the `future_reservation` variable in the `schedmd-slurm-gcp-v6-nodeset` module. From there you can specify the name of the future reservation that you would like it to use. Ensure fields like `machine_type` matches with your reservation. Once deployed nodes in your nodeset will appear `DOWN` until your reservation begins, and will appear `DOWN` again once the reservation is complete (no redeploying necessary).
+
+```yaml
+ - id: fr_nodeset
+ source: community/modules/compute/schedmd-slurm-gcp-v6-nodeset
+ use: [network]
+ settings:
+ future_reservation: name OR project/PROJECT/zone/ZONE/futureReservations/name
+ enable_placement: false
+ # the rest of the settings, e.g. node_count_static, machine_type, additional_disks, etc.
+```
diff --git a/docs/tutorials/openfoam/README.md b/docs/tutorials/openfoam/README.md
new file mode 100644
index 0000000000..91482046c9
--- /dev/null
+++ b/docs/tutorials/openfoam/README.md
@@ -0,0 +1,12 @@
+# Cluster Toolkit - OpenFOAM Simulation and Visualization
+
+Cluster Toolkit is an open-source software offered by Google Cloud which makes
+it easy for customers to deploy HPC environments on Google Cloud.
+[OpenFOAM](https://www.openfoam.com/) is a popular tool for running
+computational fluid dynamics (CFD) simulations. This
+[demo](https://github.com/GoogleCloudPlatform/scientific-computing-examples/tree/main/apptainer/demos/openfoam)
+in the Google Cloud Platform scientific computing examples
+[repo](https://github.com/GoogleCloudPlatform/scientific-computing-examples/tree/main)
+walks you through the process of running and visualizing an OpenFOAM simulation
+using a set of [apptainer](https://apptainer.org/) containers on a Slurm-based
+HPC system deployed with the Cluster Toolkit.
diff --git a/docs/tutorials/openfoam/spack-openfoam.md b/docs/tutorials/openfoam/spack-openfoam.md
deleted file mode 100644
index d9bf3ea6d5..0000000000
--- a/docs/tutorials/openfoam/spack-openfoam.md
+++ /dev/null
@@ -1,290 +0,0 @@
-# Cluster Toolkit - Install and Run OpenFOAM on a Slurm Cluster
-
-Cluster Toolkit is an open-source software offered by Google Cloud which makes it
-easy for customers to deploy HPC environments on Google Cloud.
-
-In this tutorial you will use the Cluster Toolkit to:
-
-* Deploy a [Slurm](https://github.com/GoogleCloudPlatform/slurm-gcp#readme) HPC cluster on
- Google Cloud
-* Use [Spack](https://spack.io/) to install the OpenFOAM application and all of
- its dependencies
-* Run a [OpenFOAM](https://www.openfoam.com/) job on your newly provisioned
- cluster
-* Tear down the cluster
-
-Estimated time to complete:
-The tutorial takes 3 hr. to complete,
-of which 2.5 hr is for installing software
-(without cache).
-
-> **_NOTE:_** With a complete Spack cache, the tutorial takes 30 min.
-
-## Select a Project
-
-Select a project in which to deploy an HPC cluster on Google.
-
-
-
-Once you have selected a project, click START.
-
-## Enable APIs & Permissions
-
-In a new Google Cloud project there are several apis that must be enabled to
-deploy your HPC cluster. These will be caught when you perform `./gcluster create`
-but you can save time by enabling them now by running:
-
-
-
-We also need to grant the default compute service account project edit access so
-the slurm controller can perform actions such as auto-scaling.
-
-
-
-```bash
-PROJECT_NUMBER=$(gcloud projects list --filter= --format='value(PROJECT_NUMBER)')
-
-echo "granting roles/editor to $PROJECT_NUMBER-compute@developer.gserviceaccount.com"
-
-gcloud iam service-accounts enable --project $PROJECT_NUMBER-compute@developer.gserviceaccount.com
-
-gcloud projects add-iam-policy-binding --member=serviceAccount:$PROJECT_NUMBER-compute@developer.gserviceaccount.com --role=roles/editor
-```
-
-## Build the Toolkit Binary
-
-To build Cluster Toolkit binary from source run:
-
-```bash
-make
-```
-
-You should now have a binary named gcluster in the current directory. To verify the
-build run:
-
-```bash
-./gcluster --version
-```
-
-This should show you the version of the Cluster Toolkit you are using.
-
-## Generate a Deployment
-
-This tutorial will use the blueprint docs/tutorials/openfoam/spack-openfoam.yaml,
-which should be open in the Cloud Shell Editor (on the left).
-
-This file describes the cluster you will deploy. It defines:
-
-* a vpc network
-* a monitoring dashboard with metrics on your cluster
-* a definition of a custom Spack installation
-* a startup script that
- * installs ansible
- * installs Spack & OpenFOAM using the definition above
- * sets up a Spack environment including downloading an example input deck
- * places a submission script on a shared drive
-* a Slurm cluster
- * a Slurm login node
- * a Slurm controller
- * An auto-scaling Slurm partition
-
-After you have inspected the file, use the gcluster binary to create a deployment
-folder by running:
-
-```bash
-./gcluster create docs/tutorials/openfoam/spack-openfoam.yaml --vars project_id=
-```
-
-> **_NOTE:_** The `--vars` argument is used to override `project_id` in the
-> deployment variables.
-
-This will create a deployment directory named `spack-openfoam/`, which
-contains the terraform needed to deploy your cluster.
-
-## Deploy the Cluster
-
-Use below command to deploy your cluster.
-
-```bash
-./gcluster deploy spack-openfoam
-```
-
-You can also use below command to generate a _plan_ that describes the Google
-Cloud resources that will be deployed.
-
-```bash
-terraform -chdir=spack-openfoam/primary init
-terraform -chdir=spack-openfoam/primary apply
-```
-
-
-
-
-```shell
-Apply complete! Resources: xx added, 0 changed, 0 destroyed.
-```
-
-## Waiting for the cluster to be configured
-
-Although the cluster has been successfully deployed, the startup scripts that
-install Spack and OpenFOAM take additional time to complete. When run without a
-Spack cache, this installation takes about 2.5 hrs (or 6 min with complete
-cache).
-
-The following command will print logging from the startup script running on the
-controller. This command can be used to view progress and check for completion
-of the startup script:
-
-```bash
-gcloud compute instances get-serial-port-output --port 1 --zone us-central1-c --project spackopenf-controller | grep google_metadata_script_runner
-```
-
-When the startup script has finished running you will see the following line as
-the final output from the above command:
-> _`spackopenf-controller google_metadata_script_runner: Finished running startup scripts.`_
-
-Optionally while you wait, you can see your deployed VMs on Google Cloud
-Console. Open the link below in a new window. Look for
-`spackopenf-controller` and `spackopenf-login-login-001`. If you don't
-see your VMs make sure you have the correct project selected (top left).
-
-```text
-https://console.cloud.google.com/compute?project=
-```
-
-## Connecting to the login node
-
-Once the startup script has completed, connect to the login node.
-
-Use the following command to ssh into the login node from cloud shell:
-
-```bash
-gcloud compute ssh spackopenf-login-login-001 --zone us-central1-c --project
-```
-
-You may be prompted to set up SSH. If so follow the prompts and if asked for a
-password, just hit `[enter]` leaving the input blank.
-
-If the above command succeeded (and you see a Slurm printout in the console)
-then **continue to the next page.**
-
-
-
-In some organizations you will not be able to SSH from cloud shell. If the above
-command fails you can SSH into the VM through the Cloud Console UI using the
-following instructions:
-
-1. Open the following URL in a new tab. This will take you to `Compute Engine` >
- `VM instances` in the Google Cloud Console:
-
-
-
- ```text
- https://console.cloud.google.com/compute?project=
- ```
-
-1. Click on the `SSH` button associated with the `spackopenf-login-login-001`
- instance.
-
- This will open a separate pop up window with a terminal into our newly
- created Slurm login VM.
-
-## Run a Job on the Cluster
-
- **The commands below should be run on the Slurm login node.**
-
-We will use the submission script (see line 122 of the blueprint) to submit a
-OpenFOAM job.
-
-1. Make a directory in which we will run the job:
-
- ```bash
- mkdir test_run && cd test_run
- ```
-
-2. Submit the job to Slurm to be scheduled:
-
- ```bash
- sbatch /opt/apps/openfoam/submit_openfoam.sh
- ```
-
-3. Once submitted, you can watch the job progress by repeatedly calling the
- following command:
-
- ```bash
- squeue
- ```
-
-The `sbatch` command trigger Slurm to auto-scale up several nodes to run the job.
-
-You can refresh the `Compute Engine` > `VM instances` page and see that
-additional VMs are being/have been created. These will be named something like
-`spackopenf-comput-0`.
-
-When running `squeue`, observe the job status start as `CF` (configuring),
-change to `R` (running) once the compute VMs have been created, and finally `CG`
-(completing) when job has finished and nodes are spooling down.
-
-When `squeue` no longer shows any jobs the job has finished. The whole job takes
-about 5 minutes to run.
-
-> **_NOTE:_** If the allocation fails, the message
-> `salloc: PrologSlurmctld failed, job killed` most likely indicates that your
-> project does not have sufficient quota for C2 instances in your region. \
-> **_NOTE:_** If the Slurm controller is shut down before the auto-scale nodes
-> are destroyed then they will be left running.
-
-## Review the output
-
-Several files will have been generated in the `test_run/` folder you created.
-
-The `slurm-1.out` file has information on the run such as performance. You can
-view this file by running the following command on the login node:
-
-```bash
-cat slurm-*.out
-```
-
-## View the cluster monitoring dashboard
-
-To view the monitoring dashboard containing metrics on your cluster, open the
-following URL in a new tab and click on the dashboard named
-`Cluster Toolkit Dashboard: spack-openfoam`.
-
-```text
-https://console.cloud.google.com/monitoring/dashboards?project=
-```
-
-## Destroy the Cluster
-
-To avoid incurring ongoing charges we will want to destroy our cluster.
-
-For this we need to return to our cloud shell terminal. Run `exit` in the
-terminal to close the SSH connection to the login node:
-
-> **_NOTE:_** If you are accessing the login node terminal via a separate pop-up
-> then make sure to call `exit` in the pop-up window.
-
-```bash
-exit
-```
-
-Run the following command in the cloud shell terminal to destroy the cluster:
-
-```bash
-./gcluster destroy spack-openfoam
-```
-
-When complete you should see something like:
-
-```shell
-Destroy complete! Resources: xx destroyed.
-```
-
-> **_NOTE:_** If destroy is run before Slurm shut down the auto-scale nodes then
-> they will be left behind and destroy may fail. In this case you can delete the
-> VMs manually and rerun the destroy command above.
-
-## Tutorial Complete
-
-
diff --git a/docs/tutorials/openfoam/spack-openfoam.yaml b/docs/tutorials/openfoam/spack-openfoam.yaml
deleted file mode 100644
index 5725b49905..0000000000
--- a/docs/tutorials/openfoam/spack-openfoam.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2022 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-
-blueprint_name: spack-openfoam
-
-vars:
- project_id: ## Set GCP Project ID Here ##
- deployment_name: spack-openfoam
- region: us-central1
- zone: us-central1-c
-
-deployment_groups:
-- group: primary
- modules:
- - id: network1
- source: modules/network/vpc
-
- - id: hpc_dash
- source: modules/monitoring/dashboard
-
- ## Install Scripts
- - id: spack-setup
- source: community/modules/scripts/spack-setup
- settings:
- install_dir: /opt/apps/spack
- spack_ref: v0.20.0
-
- - id: spack-execute
- source: community/modules/scripts/spack-execute
- use: [spack-setup]
- settings:
- log_file: /var/log/spack.log
- data_files:
- - destination: /tmp/projections-config.yaml
- content: |
- modules:
- default:
- tcl:
- hash_length: 0
- all:
- conflict:
- - '{name}'
- projections:
- all: '{name}/{version}-{compiler.name}-{compiler.version}'
- - destination: /tmp/slurm-external-config.yaml
- content: |
- packages:
- slurm:
- externals:
- - spec: slurm@21-08-8-2
- prefix: /usr/local
- buildable: False
- - destination: /share/spack/openfoam_env.yaml
- content: |
- spack:
- definitions:
- - compilers:
- - gcc@9.3.0
- - mpis:
- - openmpi@4.1.3~atomics~cuda+cxx~cxx_exceptions~gpfs~internal-hwloc~java+legacylaunchers~lustre~memchecker+pmi+romio+rsh~singularity+static+vt+wrapper-rpath fabrics=none schedulers=slurm
- - packages:
- - flex@2.6.4
- - mpi_packages:
- - openfoam-org@7 ^flex@2.6.4
- specs:
- - matrix:
- - - $mpis
- - - $%compilers
- - matrix:
- - - $packages
- - - $%compilers
- - matrix:
- - - $mpi_packages
- - - $%compilers
- - - $^mpis
- concretizer:
- unify: when_possible
- commands: |
- # Un-comment and update mirror_url to install from spack cache
- # if ! spack mirror list | grep -q gcs_cache; then
- # spack mirror add --scope site gcs_cache gs://optionally_set_spack_cache_bucket
- # fi
- # spack buildcache keys --install --trust
-
- spack config --scope defaults add config:build_stage:/opt/apps/spack/spack-stage
- spack config --scope defaults add -f /tmp/projections-config.yaml
- spack config --scope site add -f /tmp/slurm-external-config.yaml
-
- spack install gcc@9.3.0 %gcc@8.5.0 target=x86_64
- spack load gcc@9.3.0 %gcc@8.5.0 target=x86_64
- spack compiler find --scope site
-
- if ! spack env list | grep -q openfoam; then
- spack env create openfoam /share/spack/openfoam_env.yaml
- spack env activate openfoam
- spack concretize
- spack install
- fi
-
- - id: login-setup
- source: modules/scripts/startup-script
- settings:
- runners:
- - $(spack-execute.spack_runner)
- - type: shell
- destination: setup_openfoam.sh
- content: |
- #!/bin/bash
- source /opt/apps/spack/share/spack/setup-env.sh
- spack env activate openfoam
- - type: data
- destination: /opt/apps/openfoam/submit_openfoam.sh
- content: |
- #!/bin/bash
- #SBATCH -N 2
- #SBATCH --ntasks-per-node 30
-
- source /opt/apps/spack/share/spack/setup-env.sh
- spack env activate openfoam
-
- cd $SLURM_SUBMIT_DIR
- cp -R $FOAM_TUTORIALS/incompressible/simpleFoam/motorBike/* .
- mkdir -p constant/triSurface
- mkdir -p constant/geometry
- cp $FOAM_TUTORIALS/resources/geometry/motorBikemotorBike.obj.gz constant/triSurface/.
- cp $FOAM_TUTORIALS/resources/geometry/motorBikemotorBike.obj.gz constant/geometry/.
-
- sed "/^numberOfSubdomains/ c\\numberOfSubdomains 60;" -i system/decomposeParDict*
- sed "/^method/c\\method scotch;" -i system/decomposeParDict*
- ln -s 0 0.orig
-
- surfaceFeatures
- blockMesh
- decomposePar -copyZero
-
- scontrol show hostnames ${SLURM_JOB_NODELIST} > hostfile
- mpirun -n 60 -npernode 30 -hostfile hostfile snappyHexMesh -overwrite -parallel
- mpirun -n 60 -npernode 30 -hostfile hostfile potentialFoam -parallel
- mpirun -n 60 -npernode 30 -hostfile hostfile simpleFoam -parallel
-
- - id: compute_nodeset
- source: community/modules/compute/schedmd-slurm-gcp-v6-nodeset
- use: [network1]
- settings:
- node_count_dynamic_max: 2
- bandwidth_tier: gvnic_enabled
-
- - id: compute_partition
- source: community/modules/compute/schedmd-slurm-gcp-v6-partition
- use: [compute_nodeset]
- settings:
- partition_name: compute
- is_default: true
-
- - id: slurm_login
- source: community/modules/scheduler/schedmd-slurm-gcp-v6-login
- use: [network1]
- settings:
- machine_type: n2-standard-4
- enable_login_public_ips: true
-
- - id: slurm_controller
- source: community/modules/scheduler/schedmd-slurm-gcp-v6-controller
- use:
- - network1
- - compute_partition
- - slurm_login
- settings:
- login_startup_script: $(login-setup.startup_script)
- login_startup_scripts_timeout: 21600
- enable_controller_public_ips: true
diff --git a/examples/README.md b/examples/README.md
index 53a84d1a08..29db27df94 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -28,7 +28,7 @@ md_toc github examples/README.md | sed -e "s/\s-\s/ * /"
* [ml-slurm-v5-legacy.yaml](#ml-slurm-v5-legacyyaml--) ![core-badge] ![deprecated-badge]
* [ml-slurm.yaml](#ml-slurmyaml-) ![core-badge]
* [image-builder-v5-legacy.yaml](#image-builder-v5-legacyyaml--) ![core-badge] ![deprecated-badge]
- * [image-builder.yaml](#image-builderyaml--) ![core-badge]
+ * [image-builder.yaml](#image-builderyaml-) ![core-badge]
* [serverless-batch.yaml](#serverless-batchyaml-) ![core-badge]
* [serverless-batch-mpi.yaml](#serverless-batch-mpiyaml-) ![core-badge]
* [pfs-lustre.yaml](#pfs-lustreyaml-) ![core-badge]
@@ -1518,6 +1518,30 @@ cleaned up when the job is deleted.
[storage-gke.yaml]: ../examples/storage-gke.yaml
+### [gke-managed-parallelstore.yaml] ![core-badge] ![experimental-badge]
+
+This blueprint shows how to use managed parallelstore storage options with GKE in the toolkit.
+
+The blueprint contains the following:
+
+* A K8s Job that uses a managed parallelstore storage volume option.
+* A K8s Job that demonstrates ML training workload with managed parallelstore storage disk operation.
+
+> **Warning**: In this example blueprint, when storage type `Parallelstore` is specified in `gke-storage` module.
+> The lifecycle of the parallelstore is managed by the blueprint.
+> On glcuster destroy operation, the Parallelstore storage created will also be destroyed.
+>
+> [!Note]
+> The Kubernetes API server will only allow requests from authorized networks.
+> The `gke-cluster` module needs access to the Kubernetes API server
+> to create a Persistent Volume and a Persistent Volume Claim. **You must use
+> the `authorized_cidr` variable to supply an authorized network which contains
+> the IP address of the machine deploying the blueprint, for example
+> `--vars authorized_cidr=/32`.** You can use a service like
+> [whatismyip.com](https://whatismyip.com) to determine your IP address.
+
+[gke-managed-parallelstore.yaml]: ../examples/gke-managed-parallelstore.yaml
+
### [gke-a3-megagpu.yaml] ![core-badge] ![experimental-badge]
This blueprint shows how to provision a GKE cluster with A3 Mega machines in the toolkit.
diff --git a/examples/gke-a3-highgpu.yaml b/examples/gke-a3-highgpu.yaml
index 25d0d992e2..a5df211900 100644
--- a/examples/gke-a3-highgpu.yaml
+++ b/examples/gke-a3-highgpu.yaml
@@ -21,11 +21,9 @@ vars:
deployment_name: gke-a3-highgpu
region: us-central1
zone: us-central1-c
-
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: /32
-
gcp_public_cidrs_access_enabled: false
deployment_groups:
@@ -34,10 +32,11 @@ deployment_groups:
- id: network1
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet-a3-highgpu
+ subnetwork_name: $(vars.deployment_name)-subnet
mtu: 8244
- secondary_ranges:
- gke-subnet-a3-highgpu:
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
diff --git a/examples/gke-a3-megagpu.yaml b/examples/gke-a3-megagpu.yaml
index 96c0671293..e4ebbe0de5 100644
--- a/examples/gke-a3-megagpu.yaml
+++ b/examples/gke-a3-megagpu.yaml
@@ -21,11 +21,9 @@ vars:
deployment_name: gke-a3-mega
region: us-central1
zone: us-central1-c
-
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: /32
-
gcp_public_cidrs_access_enabled: false
deployment_groups:
@@ -34,10 +32,11 @@ deployment_groups:
- id: network1
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet-a3-mega
+ subnetwork_name: $(vars.deployment_name)-subnet
mtu: 8244
- secondary_ranges:
- gke-subnet-a3-mega:
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
diff --git a/examples/gke-storage-parallelstore.yaml b/examples/gke-managed-parallelstore.yaml
similarity index 95%
rename from examples/gke-storage-parallelstore.yaml
rename to examples/gke-managed-parallelstore.yaml
index 05b9766381..4425f13181 100644
--- a/examples/gke-storage-parallelstore.yaml
+++ b/examples/gke-managed-parallelstore.yaml
@@ -12,17 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-blueprint_name: gke-storage-parallelstore
+blueprint_name: gke-managed-parallelstore
vars:
project_id: ## Set GCP Project ID Here ##
- deployment_name: gke-storage-parallelstore
+ deployment_name: gke-storage-managed-ps
region: us-central1
zone: us-central1-c
-
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: /32
-
gcp_public_cidrs_access_enabled: false
deployment_groups:
@@ -31,9 +29,10 @@ deployment_groups:
- id: network
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet-parallelstore
- secondary_ranges:
- gke-subnet-parallelstore:
+ subnetwork_name: $(vars.deployment_name)-subnet
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
diff --git a/examples/hpc-gke.yaml b/examples/hpc-gke.yaml
index 60f4f57ce8..160f842bb2 100644
--- a/examples/hpc-gke.yaml
+++ b/examples/hpc-gke.yaml
@@ -28,9 +28,10 @@ deployment_groups:
- id: network1
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet
- secondary_ranges:
- gke-subnet:
+ subnetwork_name: $(vars.deployment_name)-subnet
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
@@ -39,7 +40,7 @@ deployment_groups:
- id: gke_service_account
source: community/modules/project/service-account
settings:
- name: gke-service-account
+ name: gke-sa
project_roles:
- logging.logWriter
- monitoring.metricWriter
diff --git a/examples/ml-gke.yaml b/examples/ml-gke.yaml
index 053a5dcedc..9ae10780bd 100644
--- a/examples/ml-gke.yaml
+++ b/examples/ml-gke.yaml
@@ -21,11 +21,9 @@ vars:
region: asia-southeast1
zones:
- asia-southeast1-b # g2 machine has better availability in this zone
-
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: /32
-
gcp_public_cidrs_access_enabled: false
deployment_groups:
@@ -34,9 +32,10 @@ deployment_groups:
- id: network1
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet
- secondary_ranges:
- gke-subnet:
+ subnetwork_name: $(vars.deployment_name)-subnet
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
diff --git a/examples/storage-gke.yaml b/examples/storage-gke.yaml
index 108392cd18..faa587b046 100644
--- a/examples/storage-gke.yaml
+++ b/examples/storage-gke.yaml
@@ -19,11 +19,9 @@ vars:
deployment_name: storage-gke-01
region: us-central1
zone: us-central1-c
-
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: /32
-
gcp_public_cidrs_access_enabled: false
deployment_groups:
@@ -32,9 +30,10 @@ deployment_groups:
- id: network1
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet
- secondary_ranges:
- gke-subnet:
+ subnetwork_name: $(vars.deployment_name)-subnet
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
diff --git a/go.mod b/go.mod
index 012b792751..61a5f523e6 100644
--- a/go.mod
+++ b/go.mod
@@ -7,13 +7,13 @@ require (
github.com/go-git/go-git/v5 v5.12.0
github.com/hashicorp/go-getter v1.7.6
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/hashicorp/hcl/v2 v2.22.0
+ github.com/hashicorp/hcl/v2 v2.23.0
github.com/hashicorp/terraform-config-inspect v0.0.0-20230925220900-5a6f8d18746d
github.com/otiai10/copy v1.14.0
github.com/pkg/errors v0.9.1
github.com/spf13/afero v1.11.0
github.com/spf13/cobra v1.8.1
- github.com/zclconf/go-cty v1.15.0
+ github.com/zclconf/go-cty v1.15.1
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
@@ -98,7 +98,7 @@ require (
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.26.0
+ golang.org/x/sys v0.27.0
golang.org/x/text v0.16.0 // indirect
google.golang.org/grpc v1.64.1 // indirect
google.golang.org/protobuf v1.34.2 // indirect
diff --git a/go.sum b/go.sum
index 3d4849db05..1e4a67b6ba 100644
--- a/go.sum
+++ b/go.sum
@@ -391,8 +391,8 @@ github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9
github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M=
-github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
+github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos=
+github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
github.com/hashicorp/terraform-config-inspect v0.0.0-20230925220900-5a6f8d18746d h1:g6kHlvZrFPFKeWRj5q/zyJA5gu7rlJGPf17h8hX7LHY=
github.com/hashicorp/terraform-config-inspect v0.0.0-20230925220900-5a6f8d18746d/go.mod h1:l8HcFPm9cQh6Q0KSWoYPiePqMvRFenybP1CH2MjKdlg=
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
@@ -496,8 +496,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ=
-github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0=
+github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -732,8 +732,8 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
diff --git a/modules/compute/gke-job-template/README.md b/modules/compute/gke-job-template/README.md
index f2a50de63b..52f78260c8 100644
--- a/modules/compute/gke-job-template/README.md
+++ b/modules/compute/gke-job-template/README.md
@@ -100,6 +100,7 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [allocatable\_cpu\_per\_node](#input\_allocatable\_cpu\_per\_node) | The allocatable cpu per node. Used to claim whole nodes. Generally populated from gke-node-pool via `use` field. | `list(number)` | [
-1
]
| no |
+| [allocatable\_gpu\_per\_node](#input\_allocatable\_gpu\_per\_node) | The allocatable gpu per node. Used to claim whole nodes. Generally populated from gke-node-pool via `use` field. | `list(number)` | [
-1
]
| no |
| [backoff\_limit](#input\_backoff\_limit) | Controls the number of retries before considering a Job as failed. Set to zero for shared fate. | `number` | `0` | no |
| [command](#input\_command) | The command and arguments for the container that run in the Pod. The command field corresponds to entrypoint in some container runtimes. | `list(string)` | [
"hostname"
]
| no |
| [completion\_mode](#input\_completion\_mode) | Sets value of `completionMode` on the job. Default uses indexed jobs. See [documentation](https://kubernetes.io/blog/2021/04/19/introducing-indexed-jobs/) for more information | `string` | `"Indexed"` | no |
@@ -116,6 +117,7 @@ No modules.
| [persistent\_volume\_claims](#input\_persistent\_volume\_claims) | A list of objects that describes a k8s PVC that is to be used and mounted on the job. Generally supplied by the gke-persistent-volume module. | list(object({
name = string
mount_path = string
mount_options = string
is_gcs = bool
}))
| `[]` | no |
| [random\_name\_sufix](#input\_random\_name\_sufix) | Appends a random suffix to the job name to avoid clashes. | `bool` | `true` | no |
| [requested\_cpu\_per\_pod](#input\_requested\_cpu\_per\_pod) | The requested cpu per pod. If null, allocatable\_cpu\_per\_node will be used to claim whole nodes. If provided will override allocatable\_cpu\_per\_node. | `number` | `-1` | no |
+| [requested\_gpu\_per\_pod](#input\_requested\_gpu\_per\_pod) | The requested gpu per pod. If null, allocatable\_gpu\_per\_node will be used to claim whole nodes. If provided will override allocatable\_gpu\_per\_node. | `number` | `-1` | no |
| [restart\_policy](#input\_restart\_policy) | Job restart policy. Only a RestartPolicy equal to `Never` or `OnFailure` is allowed. | `string` | `"Never"` | no |
| [security\_context](#input\_security\_context) | The security options the container should be run with. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | list(object({
key = string
value = string
}))
| `[]` | no |
| [tolerations](#input\_tolerations) | Tolerations allow the scheduler to schedule pods with matching taints. Generally populated from gke-node-pool via `use` field. | list(object({
key = string
operator = string
value = string
effect = string
}))
| [
{
"effect": "NoSchedule",
"key": "user-workload",
"operator": "Equal",
"value": "true"
}
]
| no |
diff --git a/modules/compute/gke-job-template/main.tf b/modules/compute/gke-job-template/main.tf
index 2e21c7c394..31143b5e65 100644
--- a/modules/compute/gke-job-template/main.tf
+++ b/modules/compute/gke-job-template/main.tf
@@ -61,9 +61,11 @@ locals {
value = "true"
}] : []
- # arbitrarily, user can edit in template.
- # May come from node pool in future.
- gpu_limit_string = alltrue(var.has_gpu) ? "1" : null
+ # Setup limit for GPUs per pod
+ min_allocatable_gpu = min(var.allocatable_gpu_per_node...)
+ min_allocatable_gpu_per_pod = local.min_allocatable_gpu > 0 ? local.min_allocatable_gpu : null
+ gpu_limit_per_pod = var.requested_gpu_per_pod > 0 ? var.requested_gpu_per_pod : local.min_allocatable_gpu_per_pod
+ gpu_limit_string = alltrue(var.has_gpu) ? tostring(local.gpu_limit_per_pod) : null
empty_dir_volumes = [for ed in var.ephemeral_volumes :
{
diff --git a/modules/compute/gke-job-template/variables.tf b/modules/compute/gke-job-template/variables.tf
index 6a37c344c1..2b403fac3c 100644
--- a/modules/compute/gke-job-template/variables.tf
+++ b/modules/compute/gke-job-template/variables.tf
@@ -74,6 +74,18 @@ variable "requested_cpu_per_pod" {
default = -1
}
+variable "allocatable_gpu_per_node" {
+ description = "The allocatable gpu per node. Used to claim whole nodes. Generally populated from gke-node-pool via `use` field."
+ type = list(number)
+ default = [-1]
+}
+
+variable "requested_gpu_per_pod" {
+ description = "The requested gpu per pod. If null, allocatable_gpu_per_node will be used to claim whole nodes. If provided will override allocatable_gpu_per_node."
+ type = number
+ default = -1
+}
+
variable "tolerations" {
description = "Tolerations allow the scheduler to schedule pods with matching taints. Generally populated from gke-node-pool via `use` field."
type = list(object({
diff --git a/modules/compute/gke-node-pool/README.md b/modules/compute/gke-node-pool/README.md
index 72c9beb527..d2715ff652 100644
--- a/modules/compute/gke-node-pool/README.md
+++ b/modules/compute/gke-node-pool/README.md
@@ -338,6 +338,7 @@ limitations under the License.
| [placement\_policy](#input\_placement\_policy) | Group placement policy to use for the node pool's nodes. `COMPACT` is the only supported value for `type` currently. `name` is the name of the placement policy.
It is assumed that the specified policy exists. To create a placement policy refer to https://cloud.google.com/sdk/gcloud/reference/compute/resource-policies/create/group-placement.
Note: Placement policies have the [following](https://cloud.google.com/compute/docs/instances/placement-policies-overview#restrictions-compact-policies) restrictions. | object({
type = string
name = optional(string)
})
| {
"name": null,
"type": null
}
| no |
| [project\_id](#input\_project\_id) | The project ID to host the cluster in. | `string` | n/a | yes |
| [reservation\_affinity](#input\_reservation\_affinity) | Reservation resource to consume. When targeting SPECIFIC\_RESERVATION, specific\_reservations needs be specified.
Even though specific\_reservations is a list, only one reservation is allowed by the NodePool API.
It is assumed that the specified reservation exists and has available capacity.
For a shared reservation, specify the project\_id as well in which it was created.
To create a reservation refer to https://cloud.google.com/compute/docs/instances/reservations-single-project and https://cloud.google.com/compute/docs/instances/reservations-shared | object({
consume_reservation_type = string
specific_reservations = optional(list(object({
name = string
project = optional(string)
})))
})
| {
"consume_reservation_type": "NO_RESERVATION",
"specific_reservations": []
}
| no |
+| [run\_workload\_script](#input\_run\_workload\_script) | Whether execute the script to create a sample workload and inject rxdm sidecar into workload. Currently, implemented for A3-Highgpu and A3-Megagpu only. | `bool` | `true` | no |
| [service\_account](#input\_service\_account) | DEPRECATED: use service\_account\_email and scopes. | object({
email = string,
scopes = set(string)
})
| `null` | no |
| [service\_account\_email](#input\_service\_account\_email) | Service account e-mail address to use with the node pool | `string` | `null` | no |
| [service\_account\_scopes](#input\_service\_account\_scopes) | Scopes to to use with the node pool. | `set(string)` | [
"https://www.googleapis.com/auth/cloud-platform"
]
| no |
@@ -349,6 +350,7 @@ limitations under the License.
| [timeout\_update](#input\_timeout\_update) | Timeout for updating a node pool | `string` | `null` | no |
| [total\_max\_nodes](#input\_total\_max\_nodes) | DEPRECATED: Use autoscaling\_total\_max\_nodes. | `number` | `null` | no |
| [total\_min\_nodes](#input\_total\_min\_nodes) | DEPRECATED: Use autoscaling\_total\_min\_nodes. | `number` | `null` | no |
+| [upgrade\_settings](#input\_upgrade\_settings) | Defines node pool upgrade settings. It is highly recommended that you define all max\_surge and max\_unavailable.
If max\_surge is not specified, it would be set to a default value of 0.
If max\_unavailable is not specified, it would be set to a default value of 1. | object({
strategy = string
max_surge = optional(number)
max_unavailable = optional(number)
})
| {
"max_surge": 0,
"max_unavailable": 1,
"strategy": "SURGE"
}
| no |
| [zones](#input\_zones) | A list of zones to be used. Zones must be in region of cluster. If null, cluster zones will be inherited. Note `zones` not `zone`; does not work with `zone` deployment variable. | `list(string)` | `null` | no |
## Outputs
@@ -356,6 +358,7 @@ limitations under the License.
| Name | Description |
|------|-------------|
| [allocatable\_cpu\_per\_node](#output\_allocatable\_cpu\_per\_node) | Number of CPUs available for scheduling pods on each node. |
+| [allocatable\_gpu\_per\_node](#output\_allocatable\_gpu\_per\_node) | Number of GPUs available for scheduling pods on each node. |
| [has\_gpu](#output\_has\_gpu) | Boolean value indicating whether nodes in the pool are configured with GPUs. |
| [instructions](#output\_instructions) | Instructions for submitting the sample GPUDirect enabled job. |
| [node\_pool\_name](#output\_node\_pool\_name) | Name of the node pool. |
diff --git a/modules/compute/gke-node-pool/disk_definitions.tf b/modules/compute/gke-node-pool/disk_definitions.tf
index 3afefa9354..3d250ef768 100644
--- a/modules/compute/gke-node-pool/disk_definitions.tf
+++ b/modules/compute/gke-node-pool/disk_definitions.tf
@@ -22,9 +22,9 @@
locals {
local_ssd_machines = {
- "a3-highgpu-8g" = { local_ssd_count_ephemeral_storage = null, local_ssd_count_nvme_block = 16 },
- "a3-megagpu-8g" = { local_ssd_count_ephemeral_storage = null, local_ssd_count_nvme_block = 16 },
- "a3-ultragpu-8g" = { local_ssd_count_ephemeral_storage = null, local_ssd_count_nvme_block = 32 },
+ "a3-highgpu-8g" = { local_ssd_count_ephemeral_storage = 16, local_ssd_count_nvme_block = null },
+ "a3-megagpu-8g" = { local_ssd_count_ephemeral_storage = 16, local_ssd_count_nvme_block = null },
+ "a3-ultragpu-8g" = { local_ssd_count_ephemeral_storage = 32, local_ssd_count_nvme_block = null },
}
generated_local_ssd_config = lookup(local.local_ssd_machines, var.machine_type, { local_ssd_count_ephemeral_storage = null, local_ssd_count_nvme_block = null })
diff --git a/modules/compute/gke-node-pool/gpu_direct.tf b/modules/compute/gke-node-pool/gpu_direct.tf
index 00dd298971..9403ea34fc 100644
--- a/modules/compute/gke-node-pool/gpu_direct.tf
+++ b/modules/compute/gke-node-pool/gpu_direct.tf
@@ -53,6 +53,7 @@ locals {
"1.28" = "1.28.9-gke.1250000"
"1.29" = "1.29.4-gke.1542000"
"1.30" = "1.30.4-gke.1129000"
+ "1.31" = "1.31.1-gke.2008000"
}
}
}
diff --git a/modules/compute/gke-node-pool/main.tf b/modules/compute/gke-node-pool/main.tf
index e971af24dc..f1999cbd0b 100644
--- a/modules/compute/gke-node-pool/main.tf
+++ b/modules/compute/gke-node-pool/main.tf
@@ -20,7 +20,16 @@ locals {
}
locals {
- has_gpu = length(local.guest_accelerator) > 0
+ upgrade_settings = {
+ strategy = var.upgrade_settings.strategy
+ max_surge = coalesce(var.upgrade_settings.max_surge, 0)
+ max_unavailable = coalesce(var.upgrade_settings.max_unavailable, 1)
+ }
+}
+
+locals {
+ has_gpu = length(local.guest_accelerator) > 0
+ allocatable_gpu_per_node = local.has_gpu ? max(local.guest_accelerator[*].count...) : -1
gpu_taint = local.has_gpu ? [{
key = "nvidia.com/gpu"
value = "present"
@@ -74,9 +83,9 @@ resource "google_container_node_pool" "node_pool" {
}
upgrade_settings {
- strategy = "SURGE"
- max_surge = 0
- max_unavailable = 1
+ strategy = local.upgrade_settings.strategy
+ max_surge = local.upgrade_settings.max_surge
+ max_unavailable = local.upgrade_settings.max_unavailable
}
dynamic "placement_policy" {
@@ -282,12 +291,33 @@ resource "google_container_node_pool" "node_pool" {
)
error_message = "Shared extended reservations are not supported by GKE."
}
+ precondition {
+ condition = contains(["SURGE"], local.upgrade_settings.strategy)
+ error_message = "Only SURGE strategy is supported"
+ }
+ precondition {
+ condition = local.upgrade_settings.max_unavailable >= 0
+ error_message = "max_unavailable should be set to 0 or greater"
+ }
+ precondition {
+ condition = local.upgrade_settings.max_surge >= 0
+ error_message = "max_surge should be set to 0 or greater"
+ }
+ precondition {
+ condition = local.upgrade_settings.max_unavailable > 0 || local.upgrade_settings.max_surge > 0
+ error_message = "At least one of max_unavailable or max_surge must greater than 0"
+ }
}
}
+locals {
+ supported_machine_types_for_install_dependencies = ["a3-highgpu-8g", "a3-megagpu-8g"]
+}
+
resource "null_resource" "install_dependencies" {
+ count = var.run_workload_script && contains(local.supported_machine_types_for_install_dependencies, var.machine_type) ? 1 : 0
provisioner "local-exec" {
- command = "pip3 install pyyaml argparse"
+ command = "pip3 install pyyaml"
}
}
@@ -297,7 +327,7 @@ locals {
# execute script to inject rxdm sidecar into workload to enable tcpx for a3-highgpu-8g VM workload
resource "null_resource" "enable_tcpx_in_workload" {
- count = var.machine_type == "a3-highgpu-8g" ? 1 : 0
+ count = var.run_workload_script && var.machine_type == "a3-highgpu-8g" ? 1 : 0
triggers = {
always_run = timestamp()
}
@@ -310,7 +340,7 @@ resource "null_resource" "enable_tcpx_in_workload" {
# execute script to inject rxdm sidecar into workload to enable tcpxo for a3-megagpu-8g VM workload
resource "null_resource" "enable_tcpxo_in_workload" {
- count = var.machine_type == "a3-megagpu-8g" ? 1 : 0
+ count = var.run_workload_script && var.machine_type == "a3-megagpu-8g" ? 1 : 0
triggers = {
always_run = timestamp()
}
diff --git a/modules/compute/gke-node-pool/outputs.tf b/modules/compute/gke-node-pool/outputs.tf
index 6d309502b1..2bcf947983 100644
--- a/modules/compute/gke-node-pool/outputs.tf
+++ b/modules/compute/gke-node-pool/outputs.tf
@@ -45,6 +45,11 @@ output "has_gpu" {
value = local.has_gpu
}
+output "allocatable_gpu_per_node" {
+ description = "Number of GPUs available for scheduling pods on each node."
+ value = local.allocatable_gpu_per_node
+}
+
locals {
translate_toleration = {
PREFER_NO_SCHEDULE = "PreferNoSchedule"
diff --git a/modules/compute/gke-node-pool/variables.tf b/modules/compute/gke-node-pool/variables.tf
index 5cd9624da8..d3b403b564 100644
--- a/modules/compute/gke-node-pool/variables.tf
+++ b/modules/compute/gke-node-pool/variables.tf
@@ -391,3 +391,27 @@ variable "max_pods_per_node" {
type = number
default = null
}
+
+variable "upgrade_settings" {
+ description = <<-EOT
+ Defines node pool upgrade settings. It is highly recommended that you define all max_surge and max_unavailable.
+ If max_surge is not specified, it would be set to a default value of 0.
+ If max_unavailable is not specified, it would be set to a default value of 1.
+ EOT
+ type = object({
+ strategy = string
+ max_surge = optional(number)
+ max_unavailable = optional(number)
+ })
+ default = {
+ strategy = "SURGE"
+ max_surge = 0
+ max_unavailable = 1
+ }
+}
+
+variable "run_workload_script" {
+ description = "Whether execute the script to create a sample workload and inject rxdm sidecar into workload. Currently, implemented for A3-Highgpu and A3-Megagpu only."
+ type = bool
+ default = true
+}
diff --git a/modules/compute/gke-node-pool/versions.tf b/modules/compute/gke-node-pool/versions.tf
index 7b49320ede..fcbc8e2f94 100644
--- a/modules/compute/gke-node-pool/versions.tf
+++ b/modules/compute/gke-node-pool/versions.tf
@@ -30,6 +30,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:gke-node-pool/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:gke-node-pool/v1.44.0"
}
}
diff --git a/modules/compute/vm-instance/README.md b/modules/compute/vm-instance/README.md
index 149f472d68..8fe80e1cdc 100644
--- a/modules/compute/vm-instance/README.md
+++ b/modules/compute/vm-instance/README.md
@@ -170,7 +170,7 @@ limitations under the License.
|------|---------|
| [terraform](#requirement\_terraform) | >= 1.3.0 |
| [google](#requirement\_google) | >= 4.73.0 |
-| [google-beta](#requirement\_google-beta) | >= 4.73.0 |
+| [google-beta](#requirement\_google-beta) | >= 6.13.0 |
| [null](#requirement\_null) | >= 3.0 |
## Providers
@@ -178,14 +178,14 @@ limitations under the License.
| Name | Version |
|------|---------|
| [google](#provider\_google) | >= 4.73.0 |
-| [google-beta](#provider\_google-beta) | >= 4.73.0 |
+| [google-beta](#provider\_google-beta) | >= 6.13.0 |
| [null](#provider\_null) | >= 3.0 |
## Modules
| Name | Source | Version |
|------|--------|---------|
-| [netstorage\_startup\_script](#module\_netstorage\_startup\_script) | github.com/GoogleCloudPlatform/hpc-toolkit//modules/scripts/startup-script | v1.39.0 |
+| [netstorage\_startup\_script](#module\_netstorage\_startup\_script) | ../../scripts/startup-script | n/a |
## Resources
@@ -231,6 +231,7 @@ limitations under the License.
| [placement\_policy](#input\_placement\_policy) | Control where your VM instances are physically located relative to each other within a zone.
See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_resource_policy#nested_group_placement_policy | `any` | `null` | no |
| [project\_id](#input\_project\_id) | Project in which the HPC deployment will be created | `string` | n/a | yes |
| [region](#input\_region) | The region to deploy to | `string` | n/a | yes |
+| [reservation\_name](#input\_reservation\_name) | Name of the reservation to use for VM resources, should be in one of the following formats:
- projects/PROJECT\_ID/reservations/RESERVATION\_NAME
- RESERVATION\_NAME
Must be a "SPECIFIC\_RESERVATION"
Set to empty string if using no reservation or automatically-consumed reservations | `string` | `""` | no |
| [service\_account](#input\_service\_account) | DEPRECATED - Use `service_account_email` and `service_account_scopes` instead. | object({
email = string,
scopes = set(string)
})
| `null` | no |
| [service\_account\_email](#input\_service\_account\_email) | Service account e-mail address to use with the node pool | `string` | `null` | no |
| [service\_account\_scopes](#input\_service\_account\_scopes) | Scopes to to use with the node pool. | `set(string)` | [
"https://www.googleapis.com/auth/cloud-platform"
]
| no |
diff --git a/modules/compute/vm-instance/main.tf b/modules/compute/vm-instance/main.tf
index c639f075d6..dcb43fe91a 100644
--- a/modules/compute/vm-instance/main.tf
+++ b/modules/compute/vm-instance/main.tf
@@ -261,6 +261,17 @@ resource "google_compute_instance" "compute_vm" {
}
}
+ dynamic "reservation_affinity" {
+ for_each = var.reservation_name == "" ? [] : [1]
+ content {
+ type = "SPECIFIC_RESERVATION"
+ specific_reservation {
+ key = "compute.googleapis.com/reservation-name"
+ values = [var.reservation_name]
+ }
+ }
+ }
+
metadata = merge(
local.network_storage,
local.startup_script,
diff --git a/modules/compute/vm-instance/startup_from_network_storage.tf b/modules/compute/vm-instance/startup_from_network_storage.tf
index 070b0b8c33..02bc58e4f7 100644
--- a/modules/compute/vm-instance/startup_from_network_storage.tf
+++ b/modules/compute/vm-instance/startup_from_network_storage.tf
@@ -55,7 +55,7 @@ locals {
}
module "netstorage_startup_script" {
- source = "github.com/GoogleCloudPlatform/hpc-toolkit//modules/scripts/startup-script?ref=v1.39.0"
+ source = "../../scripts/startup-script"
labels = local.labels
project_id = var.project_id
diff --git a/modules/compute/vm-instance/variables.tf b/modules/compute/vm-instance/variables.tf
index a874ddf825..1c32348587 100644
--- a/modules/compute/vm-instance/variables.tf
+++ b/modules/compute/vm-instance/variables.tf
@@ -223,9 +223,9 @@ variable "network_interfaces" {
}
validation {
condition = alltrue([
- for ni in var.network_interfaces : ni.nic_type == "GVNIC" || ni.nic_type == "VIRTIO_NET" || ni.nic_type == null
+ for ni in var.network_interfaces : ni.nic_type == "GVNIC" || ni.nic_type == "VIRTIO_NET" || ni.nic_type == "MRDMA" || ni.nic_type == "IRDMA" || ni.nic_type == null
])
- error_message = "In the variable network_interfaces, field \"nic_type\" must be either \"GVNIC\", \"VIRTIO_NET\" or null."
+ error_message = "In the variable network_interfaces, field \"nic_type\" must be \"GVNIC\", \"VIRTIO_NET\", \"MRDMA\", \"IRDMA\", or null."
}
validation {
condition = alltrue([
@@ -409,3 +409,22 @@ variable "allow_automatic_updates" {
default = true
nullable = false
}
+
+variable "reservation_name" {
+ description = <<-EOD
+ Name of the reservation to use for VM resources, should be in one of the following formats:
+ - projects/PROJECT_ID/reservations/RESERVATION_NAME
+ - RESERVATION_NAME
+
+ Must be a "SPECIFIC_RESERVATION"
+ Set to empty string if using no reservation or automatically-consumed reservations
+ EOD
+ type = string
+ default = ""
+ nullable = false
+
+ validation {
+ condition = length(regexall("^((projects/([a-z0-9-]+)/reservations/)?([a-z0-9-]+))?$", var.reservation_name)) > 0
+ error_message = "Reservation name must be either empty or in the format '[projects/PROJECT_ID/reservations/]RESERVATION_NAME', [...] is an optional part."
+ }
+}
diff --git a/modules/compute/vm-instance/versions.tf b/modules/compute/vm-instance/versions.tf
index 74d863de06..e2d709d999 100644
--- a/modules/compute/vm-instance/versions.tf
+++ b/modules/compute/vm-instance/versions.tf
@@ -23,7 +23,7 @@ terraform {
google-beta = {
source = "hashicorp/google-beta"
- version = ">= 4.73.0"
+ version = ">= 6.13.0"
}
null = {
source = "hashicorp/null"
@@ -31,10 +31,10 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:vm-instance/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:vm-instance/v1.44.0"
}
provider_meta "google-beta" {
- module_name = "blueprints/terraform/hpc-toolkit:vm-instance/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:vm-instance/v1.44.0"
}
required_version = ">= 1.3.0"
diff --git a/modules/file-system/filestore/versions.tf b/modules/file-system/filestore/versions.tf
index b747c21184..6a422772a3 100644
--- a/modules/file-system/filestore/versions.tf
+++ b/modules/file-system/filestore/versions.tf
@@ -26,10 +26,10 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:filestore/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:filestore/v1.44.0"
}
provider_meta "google-beta" {
- module_name = "blueprints/terraform/hpc-toolkit:filestore/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:filestore/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/modules/file-system/gke-persistent-volume/versions.tf b/modules/file-system/gke-persistent-volume/versions.tf
index 9aa0deab4c..b9e2c1f067 100644
--- a/modules/file-system/gke-persistent-volume/versions.tf
+++ b/modules/file-system/gke-persistent-volume/versions.tf
@@ -29,6 +29,6 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:gke-persistent-volume/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:gke-persistent-volume/v1.44.0"
}
}
diff --git a/modules/file-system/gke-storage/README.md b/modules/file-system/gke-storage/README.md
index 17c718aa37..fc65e76d4d 100644
--- a/modules/file-system/gke-storage/README.md
+++ b/modules/file-system/gke-storage/README.md
@@ -39,7 +39,7 @@ then use them in a `gke-job-template` to dynamically provision the resource.
```
See example
-[gke-storage-parallelstore.yaml](../../../examples/README.md#gke-storage-parallelstoreyaml--) blueprint
+[gke-managed-parallelstore.yaml](../../../examples/README.md#gke-managed-parallelstoreyaml--) blueprint
for a complete example.
### Authorized Network
diff --git a/modules/file-system/gke-storage/versions.tf b/modules/file-system/gke-storage/versions.tf
index b712251894..bfb6a565c3 100644
--- a/modules/file-system/gke-storage/versions.tf
+++ b/modules/file-system/gke-storage/versions.tf
@@ -16,6 +16,6 @@ terraform {
required_version = ">= 1.0"
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:gke-storage/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:gke-storage/v1.44.0"
}
}
diff --git a/modules/file-system/parallelstore/scripts/install-daos-client.sh b/modules/file-system/parallelstore/scripts/install-daos-client.sh
index 5596ce8a2f..22ec324af7 100644
--- a/modules/file-system/parallelstore/scripts/install-daos-client.sh
+++ b/modules/file-system/parallelstore/scripts/install-daos-client.sh
@@ -19,6 +19,15 @@ OS_ID=$(awk -F '=' '/^ID=/ {print $2}' /etc/os-release | sed -e 's/"//g')
OS_VERSION=$(awk -F '=' '/VERSION_ID/ {print $2}' /etc/os-release | sed -e 's/"//g')
OS_VERSION_MAJOR=$(awk -F '=' '/VERSION_ID/ {print $2}' /etc/os-release | sed -e 's/"//g' -e 's/\..*$//')
+if ! {
+ { [[ "${OS_ID}" = "rocky" ]] || [[ "${OS_ID}" = "rhel" ]]; } && { [[ "${OS_VERSION_MAJOR}" = "8" ]] || [[ "${OS_VERSION_MAJOR}" = "9" ]]; } ||
+ { [[ "${OS_ID}" = "ubuntu" ]] && [[ "${OS_VERSION}" = "22.04" ]]; } ||
+ { [[ "${OS_ID}" = "debian" ]] && [[ "${OS_VERSION_MAJOR}" = "12" ]]; }
+}; then
+ echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
+ exit 1
+fi
+
if [ -x /bin/daos ]; then
echo "DAOS already installed"
daos version
@@ -27,29 +36,15 @@ else
# The following commands should be executed on each client vm.
## For Rocky linux 8 / RedHat 8.
if [ "${OS_ID}" = "rocky" ] || [ "${OS_ID}" = "rhel" ]; then
- if [ "${OS_VERSION_MAJOR}" = "8" ]; then
- # 1) Add the Parallelstore package repository
- cat >/etc/yum.repos.d/parallelstore-v2-6-el8.repo </etc/yum.repos.d/parallelstore-v2-6-el9.repo </etc/yum.repos.d/parallelstore-v2-6-el"${OS_VERSION_MAJOR}".repo <<-EOF
+ [parallelstore-v2-6-el${OS_VERSION_MAJOR}]
+ name=Parallelstore EL${OS_VERSION_MAJOR} v2.6
+ baseurl=https://us-central1-yum.pkg.dev/projects/parallelstore-packages/v2-6-el${OS_VERSION_MAJOR}
+ enabled=1
+ repo_gpgcheck=0
+ gpgcheck=0
+ EOF
## TODO: Remove disable automatic update script after issue is fixed.
if [ -x /usr/bin/google_disable_automatic_updates ]; then
@@ -65,19 +60,48 @@ EOF
dnf upgrade -y libfabric
# For Ubuntu 22.04 and debian 12,
- elif { [ "${OS_ID}" = "ubuntu" ] && [ "${OS_VERSION}" = "22.04" ]; } || { [ "${OS_ID}" = "debian" ] && [ "${OS_VERSION_MAJOR}" = "12" ]; }; then
+ elif [[ "${OS_ID}" = "ubuntu" ]] || [[ "${OS_ID}" = "debian" ]]; then
# shellcheck disable=SC2034
DEBIAN_FRONTEND=noninteractive
# 1) Add the Parallelstore package repository
curl -o /etc/apt/trusted.gpg.d/us-central1-apt.pkg.dev.asc https://us-central1-apt.pkg.dev/doc/repo-signing-key.gpg
- echo "deb https://us-central1-apt.pkg.dev/projects/parallelstore-packages v2-6-deb main" >>/etc/apt/sources.list.d/artifact-registry.list
+ echo "deb https://us-central1-apt.pkg.dev/projects/parallelstore-packages v2-6-deb main" >/etc/apt/sources.list.d/artifact-registry.list
apt-get update
# 2) Install daos-client
apt-get install -y daos-client
+ # 3) Create daos_agent.service (comes pre-installed with RedHat)
+ if ! getent passwd daos_agent >/dev/null 2>&1; then
+ useradd daos_agent
+ fi
+ cat >/etc/systemd/system/daos_agent.service <<-EOF
+ [Unit]
+ Description=DAOS Agent
+ StartLimitIntervalSec=60
+ Wants=network-online.target
+ After=network-online.target
+
+ [Service]
+ Type=notify
+ User=daos_agent
+ Group=daos_agent
+ RuntimeDirectory=daos_agent
+ RuntimeDirectoryMode=0755
+ ExecStart=/usr/bin/daos_agent -o /etc/daos/daos_agent.yml
+ StandardOutput=journal
+ StandardError=journal
+ Restart=always
+ RestartSec=10
+ LimitMEMLOCK=infinity
+ LimitCORE=infinity
+ StartLimitBurst=5
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF
else
echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
exit 1
diff --git a/modules/file-system/parallelstore/scripts/mount-daos.sh b/modules/file-system/parallelstore/scripts/mount-daos.sh
index bb64c9a4d3..50ac2b273c 100644
--- a/modules/file-system/parallelstore/scripts/mount-daos.sh
+++ b/modules/file-system/parallelstore/scripts/mount-daos.sh
@@ -19,6 +19,16 @@ OS_ID=$(awk -F '=' '/^ID=/ {print $2}' /etc/os-release | sed -e 's/"//g')
OS_VERSION=$(awk -F '=' '/VERSION_ID/ {print $2}' /etc/os-release | sed -e 's/"//g')
OS_VERSION_MAJOR=$(awk -F '=' '/VERSION_ID/ {print $2}' /etc/os-release | sed -e 's/"//g' -e 's/\..*$//')
+if ! {
+ { [[ "${OS_ID}" = "rocky" ]] || [[ "${OS_ID}" = "rhel" ]]; } && { [[ "${OS_VERSION_MAJOR}" = "8" ]] || [[ "${OS_VERSION_MAJOR}" = "9" ]]; } ||
+ { [[ "${OS_ID}" = "ubuntu" ]] && [[ "${OS_VERSION}" = "22.04" ]]; } ||
+ { [[ "${OS_ID}" = "debian" ]] && [[ "${OS_VERSION_MAJOR}" = "12" ]]; }
+}; then
+ echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
+ exit 1
+
+fi
+
# Parse local_mount, mount_options from argument.
# Format mount-options string to be compatible to dfuse mount command.
# e.g. "disable-wb-cache,eq-count=8" --> --disable-wb-cache --eq-count=8.
@@ -41,23 +51,25 @@ sed -i "s/#.*transport_config/transport_config/g" $daos_config
sed -i "s/#.*allow_insecure:.*false/ allow_insecure: true/g" $daos_config
sed -i "s/.*access_points.*/access_points: $access_points/g" $daos_config
-# Start service
-if { [ "${OS_ID}" = "rocky" ] || [ "${OS_ID}" = "rhel" ]; } && { [ "${OS_VERSION_MAJOR}" = "8" ] || [ "${OS_VERSION_MAJOR}" = "9" ]; }; then
- # TODO: Update script to change default log destination folder, after daos_agent user is supported in debian and ubuntu.
- # Move agent log destination from /tmp/ (default) to /var/log/daos_agent/
- mkdir -p /var/log/daos_agent
- chown daos_agent:daos_agent /var/log/daos_agent
- sed -i "s/#.*log_file:.*/log_file: \/var\/log\/daos_agent\/daos_agent.log/g" $daos_config
- systemctl enable daos_agent.service
- systemctl start daos_agent.service
-elif { [ "${OS_ID}" = "ubuntu" ] && [ "${OS_VERSION}" = "22.04" ]; } || { [ "${OS_ID}" = "debian" ] && [ "${OS_VERSION_MAJOR}" = "12" ]; }; then
- mkdir -p /var/run/daos_agent
- daos_agent -o /etc/daos/daos_agent.yml >/dev/null 2>&1 &
-else
- echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
- exit 1
+# Get names of network interfaces not in first PCI slot
+# The first PCI slot is a standard network adapter while remaining interfaces
+# are typically network cards dedicated to GPU or workload communication
+if [[ "$OS_ID" == "debian" ]]; then
+ extra_interfaces=$(find /sys/class/net/ -not -name 'enp0s*' -regextype posix-extended -regex '.*/enp[0-9]+s.*' -printf '"%f"\n' | paste -s -d ',')
+elif [[ "${OS_ID}" = "rocky" ]] || [[ "${OS_ID}" = "rhel" ]]; then
+ extra_interfaces=$(find /sys/class/net/ -not -name eth0 -regextype posix-extended -regex '.*/eth[0-9]+' -printf '"%f"\n' | paste -s -d ',')
+fi
+
+if [[ -n "$extra_interfaces" ]]; then
+ exclude_fabric_ifaces="\"lo\",$extra_interfaces"
+ sed -i "s/#.*exclude_fabric_ifaces: \[.*/exclude_fabric_ifaces: [$exclude_fabric_ifaces]/" $daos_config
fi
+# reroute logs from /tmp (default) to daos_agent dedicated directory
+mkdir -p /var/log/daos_agent
+chown daos_agent:daos_agent /var/log/daos_agent
+sed -i "s/#.*log_file:.*/log_file: \/var\/log\/daos_agent\/daos_agent.log/g" $daos_config
+
# Mount parallelstore instance to client vm.
mkdir -p "$local_mount"
chmod 777 "$local_mount"
@@ -69,39 +81,38 @@ sed -i "s/#.*user_allow_other/user_allow_other/g" $fuse_config
# make sure limit of open files is high enough for dfuse (1M of open files)
ulimit -n 1048576
-for i in {1..10}; do
- # To parse mount_options as --disable-wb-cache --eq-count=8.
- # shellcheck disable=SC2086
- dfuse -m "$local_mount" --pool default-pool --container default-container --multi-user $mount_options && break
-
- echo "dfuse failed, retrying in 1 seconds (attempt $i/10)..."
- sleep 1
-done
-
-if ! mountpoint -q "$local_mount"; then
- exit 1
-fi
-
-# Store the mounting logic in a variable
-mount_command='for i in {1..10}; do /bin/dfuse -m '$local_mount' --pool default-pool --container default-container --multi-user '$mount_options' --foreground && break; echo \"dfuse, failed, retrying in 1 second (attempt '$i'/10)\"; sleep 1; done'
+# Construct the service name with the local_mount suffix
+safe_mount_name=$(systemd-escape -p "${local_mount}")
+service_name="mount_parallelstore_${safe_mount_name}.service"
# --- Begin: Add systemd service creation ---
-cat >/usr/lib/systemd/system/mount_parallelstore.service </etc/systemd/system/"${service_name}" </etc/yum.repos.d/parallelstore-v2-6-el8.repo </etc/yum.repos.d/parallelstore-v2-6-el9.repo </etc/yum.repos.d/parallelstore-v2-6-el"${OS_VERSION_MAJOR}".repo <<-EOF
+ [parallelstore-v2-6-el${OS_VERSION_MAJOR}]
+ name=Parallelstore EL${OS_VERSION_MAJOR} v2.6
+ baseurl=https://us-central1-yum.pkg.dev/projects/parallelstore-packages/v2-6-el${OS_VERSION_MAJOR}
+ enabled=1
+ repo_gpgcheck=0
+ gpgcheck=0
+ EOF
## TODO: Remove disable automatic update script after issue is fixed.
if [ -x /usr/bin/google_disable_automatic_updates ]; then
@@ -65,19 +60,48 @@ EOF
dnf upgrade -y libfabric
# For Ubuntu 22.04 and debian 12,
- elif { [ "${OS_ID}" = "ubuntu" ] && [ "${OS_VERSION}" = "22.04" ]; } || { [ "${OS_ID}" = "debian" ] && [ "${OS_VERSION_MAJOR}" = "12" ]; }; then
+ elif [[ "${OS_ID}" = "ubuntu" ]] || [[ "${OS_ID}" = "debian" ]]; then
# shellcheck disable=SC2034
DEBIAN_FRONTEND=noninteractive
# 1) Add the Parallelstore package repository
curl -o /etc/apt/trusted.gpg.d/us-central1-apt.pkg.dev.asc https://us-central1-apt.pkg.dev/doc/repo-signing-key.gpg
- echo "deb https://us-central1-apt.pkg.dev/projects/parallelstore-packages v2-6-deb main" >>/etc/apt/sources.list.d/artifact-registry.list
+ echo "deb https://us-central1-apt.pkg.dev/projects/parallelstore-packages v2-6-deb main" >/etc/apt/sources.list.d/artifact-registry.list
apt-get update
# 2) Install daos-client
apt-get install -y daos-client
+ # 3) Create daos_agent.service (comes pre-installed with RedHat)
+ if ! getent passwd daos_agent >/dev/null 2>&1; then
+ useradd daos_agent
+ fi
+ cat >/etc/systemd/system/daos_agent.service <<-EOF
+ [Unit]
+ Description=DAOS Agent
+ StartLimitIntervalSec=60
+ Wants=network-online.target
+ After=network-online.target
+
+ [Service]
+ Type=notify
+ User=daos_agent
+ Group=daos_agent
+ RuntimeDirectory=daos_agent
+ RuntimeDirectoryMode=0755
+ ExecStart=/usr/bin/daos_agent -o /etc/daos/daos_agent.yml
+ StandardOutput=journal
+ StandardError=journal
+ Restart=always
+ RestartSec=10
+ LimitMEMLOCK=infinity
+ LimitCORE=infinity
+ StartLimitBurst=5
+
+ [Install]
+ WantedBy=multi-user.target
+ EOF
else
echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
exit 1
diff --git a/modules/file-system/pre-existing-network-storage/scripts/mount-daos.sh b/modules/file-system/pre-existing-network-storage/scripts/mount-daos.sh
index bb64c9a4d3..50ac2b273c 100644
--- a/modules/file-system/pre-existing-network-storage/scripts/mount-daos.sh
+++ b/modules/file-system/pre-existing-network-storage/scripts/mount-daos.sh
@@ -19,6 +19,16 @@ OS_ID=$(awk -F '=' '/^ID=/ {print $2}' /etc/os-release | sed -e 's/"//g')
OS_VERSION=$(awk -F '=' '/VERSION_ID/ {print $2}' /etc/os-release | sed -e 's/"//g')
OS_VERSION_MAJOR=$(awk -F '=' '/VERSION_ID/ {print $2}' /etc/os-release | sed -e 's/"//g' -e 's/\..*$//')
+if ! {
+ { [[ "${OS_ID}" = "rocky" ]] || [[ "${OS_ID}" = "rhel" ]]; } && { [[ "${OS_VERSION_MAJOR}" = "8" ]] || [[ "${OS_VERSION_MAJOR}" = "9" ]]; } ||
+ { [[ "${OS_ID}" = "ubuntu" ]] && [[ "${OS_VERSION}" = "22.04" ]]; } ||
+ { [[ "${OS_ID}" = "debian" ]] && [[ "${OS_VERSION_MAJOR}" = "12" ]]; }
+}; then
+ echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
+ exit 1
+
+fi
+
# Parse local_mount, mount_options from argument.
# Format mount-options string to be compatible to dfuse mount command.
# e.g. "disable-wb-cache,eq-count=8" --> --disable-wb-cache --eq-count=8.
@@ -41,23 +51,25 @@ sed -i "s/#.*transport_config/transport_config/g" $daos_config
sed -i "s/#.*allow_insecure:.*false/ allow_insecure: true/g" $daos_config
sed -i "s/.*access_points.*/access_points: $access_points/g" $daos_config
-# Start service
-if { [ "${OS_ID}" = "rocky" ] || [ "${OS_ID}" = "rhel" ]; } && { [ "${OS_VERSION_MAJOR}" = "8" ] || [ "${OS_VERSION_MAJOR}" = "9" ]; }; then
- # TODO: Update script to change default log destination folder, after daos_agent user is supported in debian and ubuntu.
- # Move agent log destination from /tmp/ (default) to /var/log/daos_agent/
- mkdir -p /var/log/daos_agent
- chown daos_agent:daos_agent /var/log/daos_agent
- sed -i "s/#.*log_file:.*/log_file: \/var\/log\/daos_agent\/daos_agent.log/g" $daos_config
- systemctl enable daos_agent.service
- systemctl start daos_agent.service
-elif { [ "${OS_ID}" = "ubuntu" ] && [ "${OS_VERSION}" = "22.04" ]; } || { [ "${OS_ID}" = "debian" ] && [ "${OS_VERSION_MAJOR}" = "12" ]; }; then
- mkdir -p /var/run/daos_agent
- daos_agent -o /etc/daos/daos_agent.yml >/dev/null 2>&1 &
-else
- echo "Unsupported operating system ${OS_ID} ${OS_VERSION}. This script only supports Rocky Linux 8, Redhat 8, Redhat 9, Ubuntu 22.04, and Debian 12."
- exit 1
+# Get names of network interfaces not in first PCI slot
+# The first PCI slot is a standard network adapter while remaining interfaces
+# are typically network cards dedicated to GPU or workload communication
+if [[ "$OS_ID" == "debian" ]]; then
+ extra_interfaces=$(find /sys/class/net/ -not -name 'enp0s*' -regextype posix-extended -regex '.*/enp[0-9]+s.*' -printf '"%f"\n' | paste -s -d ',')
+elif [[ "${OS_ID}" = "rocky" ]] || [[ "${OS_ID}" = "rhel" ]]; then
+ extra_interfaces=$(find /sys/class/net/ -not -name eth0 -regextype posix-extended -regex '.*/eth[0-9]+' -printf '"%f"\n' | paste -s -d ',')
+fi
+
+if [[ -n "$extra_interfaces" ]]; then
+ exclude_fabric_ifaces="\"lo\",$extra_interfaces"
+ sed -i "s/#.*exclude_fabric_ifaces: \[.*/exclude_fabric_ifaces: [$exclude_fabric_ifaces]/" $daos_config
fi
+# reroute logs from /tmp (default) to daos_agent dedicated directory
+mkdir -p /var/log/daos_agent
+chown daos_agent:daos_agent /var/log/daos_agent
+sed -i "s/#.*log_file:.*/log_file: \/var\/log\/daos_agent\/daos_agent.log/g" $daos_config
+
# Mount parallelstore instance to client vm.
mkdir -p "$local_mount"
chmod 777 "$local_mount"
@@ -69,39 +81,38 @@ sed -i "s/#.*user_allow_other/user_allow_other/g" $fuse_config
# make sure limit of open files is high enough for dfuse (1M of open files)
ulimit -n 1048576
-for i in {1..10}; do
- # To parse mount_options as --disable-wb-cache --eq-count=8.
- # shellcheck disable=SC2086
- dfuse -m "$local_mount" --pool default-pool --container default-container --multi-user $mount_options && break
-
- echo "dfuse failed, retrying in 1 seconds (attempt $i/10)..."
- sleep 1
-done
-
-if ! mountpoint -q "$local_mount"; then
- exit 1
-fi
-
-# Store the mounting logic in a variable
-mount_command='for i in {1..10}; do /bin/dfuse -m '$local_mount' --pool default-pool --container default-container --multi-user '$mount_options' --foreground && break; echo \"dfuse, failed, retrying in 1 second (attempt '$i'/10)\"; sleep 1; done'
+# Construct the service name with the local_mount suffix
+safe_mount_name=$(systemd-escape -p "${local_mount}")
+service_name="mount_parallelstore_${safe_mount_name}.service"
# --- Begin: Add systemd service creation ---
-cat >/usr/lib/systemd/system/mount_parallelstore.service </etc/systemd/system/"${service_name}" <.
+ properties:
+ jobIndex:
+ description: |-
+ JobIndex is the index of Job which contains the coordinator pod
+ (i.e., for a ReplicatedJob with N replicas, there are Job indexes 0 to N-1).
+ type: integer
+ podIndex:
+ description: PodIndex is the Job completion index of the coordinator pod.
+ type: integer
+ replicatedJob:
+ description: |-
+ ReplicatedJob is the name of the ReplicatedJob which contains
+ the coordinator pod.
+ type: string
+ required:
+ - replicatedJob
+ type: object
+ failurePolicy:
+ description: |-
+ FailurePolicy, if set, configures when to declare the JobSet as
+ failed.
+ The JobSet is always declared failed if any job in the set
+ finished with status failed.
+ properties:
+ maxRestarts:
+ description: |-
+ MaxRestarts defines the limit on the number of JobSet restarts.
+ A restart is achieved by recreating all active child jobs.
+ format: int32
+ type: integer
+ restartStrategy:
+ default: Recreate
+ description: |-
+ RestartStrategy defines the strategy to use when restarting the JobSet.
+ Defaults to Recreate.
+ enum:
+ - Recreate
+ - BlockingRecreate
+ type: string
+ rules:
+ description: |-
+ List of failure policy rules for this JobSet.
+ For a given Job failure, the rules will be evaluated in order,
+ and only the first matching rule will be executed.
+ If no matching rule is found, the RestartJobSet action is applied.
+ items:
+ description: |-
+ FailurePolicyRule defines a FailurePolicyAction to be executed if a child job
+ fails due to a reason listed in OnJobFailureReasons.
+ properties:
+ action:
+ description: The action to take if the rule is matched.
+ enum:
+ - FailJobSet
+ - RestartJobSet
+ - RestartJobSetAndIgnoreMaxRestarts
+ type: string
+ name:
+ description: |-
+ The name of the failure policy rule.
+ The name is defaulted to 'failurePolicyRuleN' where N is the index of the failure policy rule.
+ The name must match the regular expression "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$".
+ type: string
+ onJobFailureReasons:
+ description: |-
+ The requirement on the job failure reasons. The requirement
+ is satisfied if at least one reason matches the list.
+ The rules are evaluated in order, and the first matching
+ rule is executed.
+ An empty list applies the rule to any job failure reason.
+ items:
+ type: string
+ type: array
+ targetReplicatedJobs:
+ description: |-
+ TargetReplicatedJobs are the names of the replicated jobs the operator applies to.
+ An empty list will apply to all replicatedJobs.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - action
+ - name
+ type: object
+ type: array
+ type: object
+ x-kubernetes-validations:
+ - message: Value is immutable
+ rule: self == oldSelf
+ managedBy:
+ description: |-
+ ManagedBy is used to indicate the controller or entity that manages a JobSet.
+ The built-in JobSet controller reconciles JobSets which don't have this
+ field at all or the field value is the reserved string
+ `jobset.sigs.k8s.io/jobset-controller`, but skips reconciling JobSets
+ with a custom value for this field.
+
+ The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
+ all characters before the first "/" must be a valid subdomain as defined
+ by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
+ characters as defined by RFC 3986. The value cannot exceed 63 characters.
+ The field is immutable.
+ type: string
+ network:
+ description: Network defines the networking options for the jobset.
+ properties:
+ enableDNSHostnames:
+ description: |-
+ EnableDNSHostnames allows pods to be reached via their hostnames.
+ Pods will be reachable using the fully qualified pod hostname:
+ ---.
+ type: boolean
+ publishNotReadyAddresses:
+ description: |-
+ Indicates if DNS records of pods should be published before the pods are ready.
+ Defaults to True.
+ type: boolean
+ subdomain:
+ description: |-
+ Subdomain is an explicit choice for a network subdomain name
+ When set, any replicated job in the set is added to this network.
+ Defaults to if not set.
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: Value is immutable
+ rule: self == oldSelf
+ replicatedJobs:
+ description: ReplicatedJobs is the group of jobs that will form the set.
+ items:
+ properties:
+ name:
+ description: |-
+ Name is the name of the entry and will be used as a suffix
+ for the Job name.
+ type: string
+ replicas:
+ default: 1
+ description: |-
+ Replicas is the number of jobs that will be created from this ReplicatedJob's template.
+ Jobs names will be in the format: --
+ format: int32
+ type: integer
+ template:
+ description: Template defines the template of the Job that will be created.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata of the jobs created from this template.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the job.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Specifies the duration in seconds relative to the startTime that the job
+ may be continuously active before the system tries to terminate it; value
+ must be positive integer. If a Job is suspended (at creation or through an
+ update), this timer will effectively be stopped and reset when the Job is
+ resumed again.
+ format: int64
+ type: integer
+ backoffLimit:
+ description: |-
+ Specifies the number of retries before marking this job failed.
+ Defaults to 6
+ format: int32
+ type: integer
+ backoffLimitPerIndex:
+ description: |-
+ Specifies the limit for the number of retries within an
+ index before marking this index as failed. When enabled the number of
+ failures per index is kept in the pod's
+ batch.kubernetes.io/job-index-failure-count annotation. It can only
+ be set when Job's completionMode=Indexed, and the Pod's restart
+ policy is Never. The field is immutable.
+ This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
+ feature gate is enabled (enabled by default).
+ format: int32
+ type: integer
+ completionMode:
+ description: |-
+ completionMode specifies how Pod completions are tracked. It can be
+ `NonIndexed` (default) or `Indexed`.
+
+ `NonIndexed` means that the Job is considered complete when there have
+ been .spec.completions successfully completed Pods. Each Pod completion is
+ homologous to each other.
+
+ `Indexed` means that the Pods of a
+ Job get an associated completion index from 0 to (.spec.completions - 1),
+ available in the annotation batch.kubernetes.io/job-completion-index.
+ The Job is considered complete when there is one successfully completed Pod
+ for each index.
+ When value is `Indexed`, .spec.completions must be specified and
+ `.spec.parallelism` must be less than or equal to 10^5.
+ In addition, The Pod name takes the form
+ `$(job-name)-$(index)-$(random-string)`,
+ the Pod hostname takes the form `$(job-name)-$(index)`.
+
+ More completion modes can be added in the future.
+ If the Job controller observes a mode that it doesn't recognize, which
+ is possible during upgrades due to version skew, the controller
+ skips updates for the Job.
+ type: string
+ completions:
+ description: |-
+ Specifies the desired number of successfully finished pods the
+ job should be run with. Setting to null means that the success of any
+ pod signals the success of all pods, and allows parallelism to have any positive
+ value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ pod signals the success of the job.
+ More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ format: int32
+ type: integer
+ managedBy:
+ description: |-
+ ManagedBy field indicates the controller that manages a Job. The k8s Job
+ controller reconciles jobs which don't have this field at all or the field
+ value is the reserved string `kubernetes.io/job-controller`, but skips
+ reconciling Jobs with a custom value for this field.
+ The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
+ all characters before the first "/" must be a valid subdomain as defined
+ by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
+ characters as defined by RFC 3986. The value cannot exceed 63 characters.
+ This field is immutable.
+
+ This field is alpha-level. The job controller accepts setting the field
+ when the feature gate JobManagedBy is enabled (disabled by default).
+ type: string
+ manualSelector:
+ description: |-
+ manualSelector controls generation of pod labels and pod selectors.
+ Leave `manualSelector` unset unless you are certain what you are doing.
+ When false or unset, the system pick labels unique to this job
+ and appends those labels to the pod template. When true,
+ the user is responsible for picking unique labels and specifying
+ the selector. Failure to pick a unique label may cause this
+ and other jobs to not function correctly. However, You may see
+ `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ API.
+ More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
+ type: boolean
+ maxFailedIndexes:
+ description: |-
+ Specifies the maximal number of failed indexes before marking the Job as
+ failed, when backoffLimitPerIndex is set. Once the number of failed
+ indexes exceeds this number the entire Job is marked as Failed and its
+ execution is terminated. When left as null the job continues execution of
+ all of its indexes and is marked with the `Complete` Job condition.
+ It can only be specified when backoffLimitPerIndex is set.
+ It can be null or up to completions. It is required and must be
+ less than or equal to 10^4 when is completions greater than 10^5.
+ This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
+ feature gate is enabled (enabled by default).
+ format: int32
+ type: integer
+ parallelism:
+ description: |-
+ Specifies the maximum desired number of pods the job should
+ run at any given time. The actual number of pods running in steady state will
+ be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ i.e. when the work left to do is less than max parallelism.
+ More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ format: int32
+ type: integer
+ podFailurePolicy:
+ description: |-
+ Specifies the policy of handling failed pods. In particular, it allows to
+ specify the set of actions and conditions which need to be
+ satisfied to take the associated action.
+ If empty, the default behaviour applies - the counter of failed pods,
+ represented by the jobs's .status.failed field, is incremented and it is
+ checked against the backoffLimit. This field cannot be used in combination
+ with restartPolicy=OnFailure.
+ properties:
+ rules:
+ description: |-
+ A list of pod failure policy rules. The rules are evaluated in order.
+ Once a rule matches a Pod failure, the remaining of the rules are ignored.
+ When no rule matches the Pod failure, the default handling applies - the
+ counter of pod failures is incremented and it is checked against
+ the backoffLimit. At most 20 elements are allowed.
+ items:
+ description: |-
+ PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.
+ One of onExitCodes and onPodConditions, but not both, can be used in each rule.
+ properties:
+ action:
+ description: |-
+ Specifies the action taken on a pod failure when the requirements are satisfied.
+ Possible values are:
+
+ - FailJob: indicates that the pod's job is marked as Failed and all
+ running pods are terminated.
+ - FailIndex: indicates that the pod's index is marked as Failed and will
+ not be restarted.
+ This value is beta-level. It can be used when the
+ `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
+ - Ignore: indicates that the counter towards the .backoffLimit is not
+ incremented and a replacement pod is created.
+ - Count: indicates that the pod is handled in the default way - the
+ counter towards the .backoffLimit is incremented.
+ Additional values are considered to be added in the future. Clients should
+ react to an unknown action by skipping the rule.
+ type: string
+ onExitCodes:
+ description: Represents the requirement on the container exit codes.
+ properties:
+ containerName:
+ description: |-
+ Restricts the check for exit codes to the container with the
+ specified name. When null, the rule applies to all containers.
+ When specified, it should match one the container or initContainer
+ names in the pod template.
+ type: string
+ operator:
+ description: |-
+ Represents the relationship between the container exit code(s) and the
+ specified values. Containers completed with success (exit code 0) are
+ excluded from the requirement check. Possible values are:
+
+ - In: the requirement is satisfied if at least one container exit code
+ (might be multiple if there are multiple containers not restricted
+ by the 'containerName' field) is in the set of specified values.
+ - NotIn: the requirement is satisfied if at least one container exit code
+ (might be multiple if there are multiple containers not restricted
+ by the 'containerName' field) is not in the set of specified values.
+ Additional values are considered to be added in the future. Clients should
+ react to an unknown operator by assuming the requirement is not satisfied.
+ type: string
+ values:
+ description: |-
+ Specifies the set of values. Each returned container exit code (might be
+ multiple in case of multiple containers) is checked against this set of
+ values with respect to the operator. The list of values must be ordered
+ and must not contain duplicates. Value '0' cannot be used for the In operator.
+ At least one element is required. At most 255 elements are allowed.
+ items:
+ format: int32
+ type: integer
+ type: array
+ x-kubernetes-list-type: set
+ required:
+ - operator
+ - values
+ type: object
+ onPodConditions:
+ description: |-
+ Represents the requirement on the pod conditions. The requirement is represented
+ as a list of pod condition patterns. The requirement is satisfied if at
+ least one pattern matches an actual pod condition. At most 20 elements are allowed.
+ items:
+ description: |-
+ PodFailurePolicyOnPodConditionsPattern describes a pattern for matching
+ an actual pod condition type.
+ properties:
+ status:
+ description: |-
+ Specifies the required Pod condition status. To match a pod condition
+ it is required that the specified status equals the pod condition status.
+ Defaults to True.
+ type: string
+ type:
+ description: |-
+ Specifies the required Pod condition type. To match a pod condition
+ it is required that specified type equals the pod condition type.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - action
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - rules
+ type: object
+ podReplacementPolicy:
+ description: |-
+ podReplacementPolicy specifies when to create replacement Pods.
+ Possible values are:
+ - TerminatingOrFailed means that we recreate pods
+ when they are terminating (has a metadata.deletionTimestamp) or failed.
+ - Failed means to wait until a previously created Pod is fully terminated (has phase
+ Failed or Succeeded) before creating a replacement Pod.
+
+ When using podFailurePolicy, Failed is the the only allowed value.
+ TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
+ This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
+ This is on by default.
+ type: string
+ selector:
+ description: |-
+ A label query over pods that should match the pod count.
+ Normally, the system sets this field for you.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ successPolicy:
+ description: |-
+ successPolicy specifies the policy when the Job can be declared as succeeded.
+ If empty, the default behavior applies - the Job is declared as succeeded
+ only when the number of succeeded pods equals to the completions.
+ When the field is specified, it must be immutable and works only for the Indexed Jobs.
+ Once the Job meets the SuccessPolicy, the lingering pods are terminated.
+
+ This field is beta-level. To use this field, you must enable the
+ `JobSuccessPolicy` feature gate (enabled by default).
+ properties:
+ rules:
+ description: |-
+ rules represents the list of alternative rules for the declaring the Jobs
+ as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
+ the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
+ The terminal state for such a Job has the "Complete" condition.
+ Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
+ other rules are ignored. At most 20 elements are allowed.
+ items:
+ description: |-
+ SuccessPolicyRule describes rule for declaring a Job as succeeded.
+ Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
+ properties:
+ succeededCount:
+ description: |-
+ succeededCount specifies the minimal required size of the actual set of the succeeded indexes
+ for the Job. When succeededCount is used along with succeededIndexes, the check is
+ constrained only to the set of indexes specified by succeededIndexes.
+ For example, given that succeededIndexes is "1-4", succeededCount is "3",
+ and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
+ because only "1" and "3" indexes are considered in that rules.
+ When this field is null, this doesn't default to any value and
+ is never evaluated at any time.
+ When specified it needs to be a positive integer.
+ format: int32
+ type: integer
+ succeededIndexes:
+ description: |-
+ succeededIndexes specifies the set of indexes
+ which need to be contained in the actual set of the succeeded indexes for the Job.
+ The list of indexes must be within 0 to ".spec.completions-1" and
+ must not contain duplicates. At least one element is required.
+ The indexes are represented as intervals separated by commas.
+ The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
+ The number are listed in represented by the first and last element of the series,
+ separated by a hyphen.
+ For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
+ represented as "1,3-5,7".
+ When this field is null, this field doesn't default to any value
+ and is never evaluated at any time.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - rules
+ type: object
+ suspend:
+ description: |-
+ suspend specifies whether the Job controller should create Pods or not. If
+ a Job is created with suspend set to true, no Pods are created by the Job
+ controller. If a Job is suspended after creation (i.e. the flag goes from
+ false to true), the Job controller will delete all active Pods associated
+ with this Job. Users must design their workload to gracefully handle this.
+ Suspending a Job will reset the StartTime field of the Job, effectively
+ resetting the ActiveDeadlineSeconds timer too. Defaults to false.
+ type: boolean
+ template:
+ description: |-
+ Describes the pod that will be created when executing a job.
+ The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
+ More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ automountServiceAccountToken:
+ description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+ type: boolean
+ containers:
+ description: |-
+ List of containers belonging to the pod.
+ Containers cannot currently be added or removed.
+ There must be at least one container in a Pod.
+ Cannot be updated.
+ items:
+ description: A single application container that you want to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of the container that the device will be mapped to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ dnsConfig:
+ description: |-
+ Specifies the DNS parameters of a pod.
+ Parameters specified here will be merged to the generated DNS
+ configuration based on DNSPolicy.
+ properties:
+ nameservers:
+ description: |-
+ A list of DNS name server IP addresses.
+ This will be appended to the base nameservers generated from DNSPolicy.
+ Duplicated nameservers will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ options:
+ description: |-
+ A list of DNS resolver options.
+ This will be merged with the base options generated from DNSPolicy.
+ Duplicated entries will be removed. Resolution options given in Options
+ will override those that appear in the base DNSPolicy.
+ items:
+ description: PodDNSConfigOption defines DNS resolver options of a pod.
+ properties:
+ name:
+ description: Required.
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ searches:
+ description: |-
+ A list of DNS search domains for host-name lookup.
+ This will be appended to the base search paths generated from DNSPolicy.
+ Duplicated search paths will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ dnsPolicy:
+ description: |-
+ Set DNS policy for the pod.
+ Defaults to "ClusterFirst".
+ Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ To have DNS options set along with hostNetwork, you have to specify DNS policy
+ explicitly to 'ClusterFirstWithHostNet'.
+ type: string
+ enableServiceLinks:
+ description: |-
+ EnableServiceLinks indicates whether information about services should be injected into pod's
+ environment variables, matching the syntax of Docker links.
+ Optional: Defaults to true.
+ type: boolean
+ ephemeralContainers:
+ description: |-
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ items:
+ description: |-
+ An EphemeralContainer is a temporary container that you may add to an existing Pod for
+ user-initiated activities such as debugging. Ephemeral containers have no resource or
+ scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+ removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+ Pod to exceed its resource allocation.
+
+ To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+ Pod. Ephemeral containers may not be removed or restarted.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: Lifecycle is not allowed for ephemeral containers.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the ephemeral container specified as a DNS_LABEL.
+ This name must be unique among all containers, init containers and ephemeral containers.
+ type: string
+ ports:
+ description: Ports are not allowed for ephemeral containers.
+ items:
+ description: ContainerPort represents a network port in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ already allocated to the pod.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for the container to manage the restart behavior of each
+ container within a pod.
+ This may only be set for init containers. You cannot set this field on
+ ephemeral containers.
+ type: string
+ securityContext:
+ description: |-
+ Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ targetContainerName:
+ description: |-
+ If set, the name of the container from PodSpec that this ephemeral container targets.
+ The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+
+ The container runtime must implement support for this feature. If the runtime does not
+ support namespace targeting then the result of setting this field is undefined.
+ type: string
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of the container that the device will be mapped to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ hostAliases:
+ description: |-
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ file if specified.
+ items:
+ description: |-
+ HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+ pod's hosts file.
+ properties:
+ hostnames:
+ description: Hostnames for the above IP address.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ip:
+ description: IP address of the host file entry.
+ type: string
+ required:
+ - ip
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - ip
+ x-kubernetes-list-type: map
+ hostIPC:
+ description: |-
+ Use the host's ipc namespace.
+ Optional: Default to false.
+ type: boolean
+ hostNetwork:
+ description: |-
+ Host networking requested for this pod. Use the host's network namespace.
+ If this option is set, the ports that will be used must be specified.
+ Default to false.
+ type: boolean
+ hostPID:
+ description: |-
+ Use the host's pid namespace.
+ Optional: Default to false.
+ type: boolean
+ hostUsers:
+ description: |-
+ Use the host's user namespace.
+ Optional: Default to true.
+ If set to true or not present, the pod will be run in the host user namespace, useful
+ for when the pod needs a feature only available to the host user namespace, such as
+ loading a kernel module with CAP_SYS_MODULE.
+ When set to false, a new userns is created for the pod. Setting false is useful for
+ mitigating container breakout vulnerabilities even allowing users to run their
+ containers as root without actually having root privileges on the host.
+ This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ type: boolean
+ hostname:
+ description: |-
+ Specifies the hostname of the Pod
+ If not specified, the pod's hostname will be set to a system-defined value.
+ type: string
+ imagePullSecrets:
+ description: |-
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ If specified, these secrets will be passed to individual puller implementations for them to use.
+ More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ initContainers:
+ description: |-
+ List of initialization containers belonging to the pod.
+ Init containers are executed in order prior to containers being started. If any
+ init container fails, the pod is considered to have failed and is handled according
+ to its restartPolicy. The name for an init container or normal container must be
+ unique among all containers.
+ Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ The resourceRequirements of an init container are taken into account during scheduling
+ by finding the highest request/limit for each resource type, and then using the max of
+ of that value or the sum of the normal containers. Limits are applied to init containers
+ in a similar fashion.
+ Init containers cannot currently be added or removed.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ items:
+ description: A single application container that you want to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service. Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of the container that the device will be mapped to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ nodeName:
+ description: |-
+ NodeName indicates in which node this pod is scheduled.
+ If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+ Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+ This field should not be used to express a desire for the pod to be scheduled on a specific node.
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is a selector which must be true for the pod to fit on a node.
+ Selector which must match a node's labels for the pod to be scheduled on that node.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ x-kubernetes-map-type: atomic
+ os:
+ description: |-
+ Specifies the OS of the containers in the pod.
+ Some pod and container fields are restricted if this is set.
+
+ If the OS field is set to linux, the following fields must be unset:
+ -securityContext.windowsOptions
+
+ If the OS field is set to windows, following fields must be unset:
+ - spec.hostPID
+ - spec.hostIPC
+ - spec.hostUsers
+ - spec.securityContext.appArmorProfile
+ - spec.securityContext.seLinuxOptions
+ - spec.securityContext.seccompProfile
+ - spec.securityContext.fsGroup
+ - spec.securityContext.fsGroupChangePolicy
+ - spec.securityContext.sysctls
+ - spec.shareProcessNamespace
+ - spec.securityContext.runAsUser
+ - spec.securityContext.runAsGroup
+ - spec.securityContext.supplementalGroups
+ - spec.securityContext.supplementalGroupsPolicy
+ - spec.containers[*].securityContext.appArmorProfile
+ - spec.containers[*].securityContext.seLinuxOptions
+ - spec.containers[*].securityContext.seccompProfile
+ - spec.containers[*].securityContext.capabilities
+ - spec.containers[*].securityContext.readOnlyRootFilesystem
+ - spec.containers[*].securityContext.privileged
+ - spec.containers[*].securityContext.allowPrivilegeEscalation
+ - spec.containers[*].securityContext.procMount
+ - spec.containers[*].securityContext.runAsUser
+ - spec.containers[*].securityContext.runAsGroup
+ properties:
+ name:
+ description: |-
+ Name is the name of the operating system. The currently supported values are linux and windows.
+ Additional value may be defined in future and can be one of:
+ https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+ type: string
+ required:
+ - name
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ type: object
+ preemptionPolicy:
+ description: |-
+ PreemptionPolicy is the Policy for preempting pods with lower priority.
+ One of Never, PreemptLowerPriority.
+ Defaults to PreemptLowerPriority if unset.
+ type: string
+ priority:
+ description: |-
+ The priority value. Various system components use this field to find the
+ priority of the pod. When Priority Admission Controller is enabled, it
+ prevents users from setting this field. The admission controller populates
+ this field from PriorityClassName.
+ The higher the value, the higher the priority.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the pod's priority. "system-node-critical" and
+ "system-cluster-critical" are two special keywords which indicate the
+ highest priorities with the former being the highest priority. Any other
+ name must be defined by creating a PriorityClass object with that name.
+ If not specified, the pod priority will be default or zero if there is no
+ default.
+ type: string
+ readinessGates:
+ description: |-
+ If specified, all readiness gates will be evaluated for pod readiness.
+ A pod is ready when all its containers are ready AND
+ all conditions specified in the readiness gates have status equal to "True"
+ More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ items:
+ description: PodReadinessGate contains the reference to a pod condition
+ properties:
+ conditionType:
+ description: ConditionType refers to a condition in the pod's condition list with matching type.
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resourceClaims:
+ description: |-
+ ResourceClaims defines which ResourceClaims must be allocated
+ and reserved before the Pod is allowed to start. The resources
+ will be made available to those containers which consume them
+ by name.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable.
+ items:
+ description: |-
+ PodResourceClaim references exactly one ResourceClaim, either directly
+ or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+ for the pod.
+
+ It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+ Containers that need access to the ResourceClaim reference it with this name.
+ properties:
+ name:
+ description: |-
+ Name uniquely identifies this resource claim inside the pod.
+ This must be a DNS_LABEL.
+ type: string
+ resourceClaimName:
+ description: |-
+ ResourceClaimName is the name of a ResourceClaim object in the same
+ namespace as this pod.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ resourceClaimTemplateName:
+ description: |-
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ object in the same namespace as this pod.
+
+ The template will be used to create a new ResourceClaim, which will
+ be bound to this pod. When this pod is deleted, the ResourceClaim
+ will also be deleted. The pod name and resource name, along with a
+ generated component, will be used to form a unique name for the
+ ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+
+ This field is immutable and no changes will be made to the
+ corresponding ResourceClaim by the control plane after creating the
+ ResourceClaim.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ restartPolicy:
+ description: |-
+ Restart policy for all containers within the pod.
+ One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ Default to Always.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ type: string
+ runtimeClassName:
+ description: |-
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ empty definition that uses the default runtime handler.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ type: string
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified scheduler.
+ If not specified, the pod will be dispatched by default scheduler.
+ type: string
+ schedulingGates:
+ description: |-
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ scheduler will not attempt to schedule the pod.
+
+ SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+ items:
+ description: PodSchedulingGate is associated to a Pod to guard its scheduling.
+ properties:
+ name:
+ description: |-
+ Name of the scheduling gate.
+ Each scheduling gate must have a unique name field.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ securityContext:
+ description: |-
+ SecurityContext holds pod-level security attributes and common container settings.
+ Optional: Defaults to empty. See type description for default values of each field.
+ properties:
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod.
+ Some volume types allow the Kubelet to change the ownership of that volume
+ to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup
+ 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: |-
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ before being exposed inside Pod. This field will only apply to
+ volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir.
+ Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: |-
+ A list of groups applied to the first process run in each container, in
+ addition to the container's primary GID and fsGroup (if specified). If
+ the SupplementalGroupsPolicy feature is enabled, the
+ supplementalGroupsPolicy field determines whether these are in addition
+ to or instead of any group memberships defined in the container image.
+ If unspecified, no additional groups are added, though group memberships
+ defined in the container image may still be used, depending on the
+ supplementalGroupsPolicy field.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ x-kubernetes-list-type: atomic
+ supplementalGroupsPolicy:
+ description: |-
+ Defines how supplemental groups of the first container processes are calculated.
+ Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+ (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+ and the container runtime must implement support for this feature.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ sysctls:
+ description: |-
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options within a container's SecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ description: |-
+ DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+ Deprecated: Use serviceAccountName instead.
+ type: string
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ type: string
+ setHostnameAsFQDN:
+ description: |-
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ If a pod does not have FQDN, this has no effect.
+ Default to false.
+ type: boolean
+ shareProcessNamespace:
+ description: |-
+ Share a single process namespace between all of the containers in a pod.
+ When this is set containers will be able to view and signal processes from other containers
+ in the same pod, and the first process in each container will not be assigned PID 1.
+ HostPID and ShareProcessNamespace cannot both be set.
+ Optional: Default to false.
+ type: boolean
+ subdomain:
+ description: |-
+ If specified, the fully qualified Pod hostname will be "...svc.".
+ If not specified, the pod will not have a domainname at all.
+ type: string
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ If this value is nil, the default grace period will be used instead.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ Defaults to 30 seconds.
+ format: int64
+ type: integer
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ domains. Scheduler will schedule pods in a way which abides by the constraints.
+ All topologySpreadConstraints are ANDed.
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ description: |-
+ List of volumes that can be mounted by containers belonging to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes
+ items:
+ description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk in the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in the blob storage
+ type: string
+ fsType:
+ default: ext4
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared'
+ type: string
+ readOnly:
+ default: false
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume file
+ items:
+ description: DownwardAPIVolumeFile represents information to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target worldwide names (WWNs)'
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description: driver is the name of the driver to use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ image:
+ description: |-
+ image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+ The volume is resolved at pod startup depending on which PullPolicy value is provided:
+
+ - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+
+ The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+ A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+ The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+ The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+ The volume will be mounted read-only (ro) and non-executable files (noexec).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+ properties:
+ pullPolicy:
+ description: |-
+ Policy for pulling OCI objects. Possible values are:
+ Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ type: string
+ reference:
+ description: |-
+ Required: Image or artifact reference to be used.
+ Behaves in the same way as pod.spec.containers[*].image.
+ Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ default: default
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume file
+ items:
+ description: DownwardAPIVolumeFile represents information to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about the secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify whether the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ default: /etc/ceph/keyring
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ default: rbd
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ default: admin
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ default: xfs
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL communication with Gateway, default false
+ type: boolean
+ storageMode:
+ default: ThinProvisioned
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage Pool associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description: optional field specify whether the Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - containers
+ type: object
+ type: object
+ ttlSecondsAfterFinished:
+ description: |-
+ ttlSecondsAfterFinished limits the lifetime of a Job that has finished
+ execution (either Complete or Failed). If this field is set,
+ ttlSecondsAfterFinished after the Job finishes, it is eligible to be
+ automatically deleted. When the Job is being deleted, its lifecycle
+ guarantees (e.g. finalizers) will be honored. If this field is unset,
+ the Job won't be automatically deleted. If this field is set to zero,
+ the Job becomes eligible to be deleted immediately after it finishes.
+ format: int32
+ type: integer
+ required:
+ - template
+ type: object
+ type: object
+ required:
+ - name
+ - template
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ startupPolicy:
+ description: StartupPolicy, if set, configures in what order jobs must be started
+ properties:
+ startupPolicyOrder:
+ description: |-
+ StartupPolicyOrder determines the startup order of the ReplicatedJobs.
+ AnyOrder means to start replicated jobs in any order.
+ InOrder means to start them as they are listed in the JobSet. A ReplicatedJob is started only
+ when all the jobs of the previous one are ready.
+ enum:
+ - AnyOrder
+ - InOrder
+ type: string
+ required:
+ - startupPolicyOrder
+ type: object
+ x-kubernetes-validations:
+ - message: Value is immutable
+ rule: self == oldSelf
+ successPolicy:
+ description: |-
+ SuccessPolicy configures when to declare the JobSet as
+ succeeded.
+ The JobSet is always declared succeeded if all jobs in the set
+ finished with status complete.
+ properties:
+ operator:
+ description: Operator determines either All or Any of the selected jobs should succeed to consider the JobSet successful
+ enum:
+ - All
+ - Any
+ type: string
+ targetReplicatedJobs:
+ description: |-
+ TargetReplicatedJobs are the names of the replicated jobs the operator will apply to.
+ A null or empty list will apply to all replicatedJobs.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - operator
+ type: object
+ x-kubernetes-validations:
+ - message: Value is immutable
+ rule: self == oldSelf
+ suspend:
+ description: Suspend suspends all running child Jobs when set to true.
+ type: boolean
+ ttlSecondsAfterFinished:
+ description: |-
+ TTLSecondsAfterFinished limits the lifetime of a JobSet that has finished
+ execution (either Complete or Failed). If this field is set,
+ TTLSecondsAfterFinished after the JobSet finishes, it is eligible to be
+ automatically deleted. When the JobSet is being deleted, its lifecycle
+ guarantees (e.g. finalizers) will be honored. If this field is unset,
+ the JobSet won't be automatically deleted. If this field is set to zero,
+ the JobSet becomes eligible to be deleted immediately after it finishes.
+ format: int32
+ minimum: 0
+ type: integer
+ type: object
+ status:
+ description: JobSetStatus defines the observed state of JobSet
+ properties:
+ conditions:
+ items:
+ description: Condition contains details for one aspect of the current state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ replicatedJobsStatus:
+ description: ReplicatedJobsStatus track the number of JobsReady for each replicatedJob.
+ items:
+ description: ReplicatedJobStatus defines the observed ReplicatedJobs Readiness.
+ properties:
+ active:
+ description: |-
+ Active is the number of child Jobs with at least 1 pod in a running or pending state
+ which are not marked for deletion.
+ format: int32
+ type: integer
+ failed:
+ description: Failed is the number of failed child Jobs.
+ format: int32
+ type: integer
+ name:
+ description: Name of the ReplicatedJob.
+ type: string
+ ready:
+ description: |-
+ Ready is the number of child Jobs where the number of ready pods and completed pods
+ is greater than or equal to the total expected pod count for the Job (i.e., the minimum
+ of job.spec.parallelism and job.spec.completions).
+ format: int32
+ type: integer
+ succeeded:
+ description: Succeeded is the number of successfully completed child Jobs.
+ format: int32
+ type: integer
+ suspended:
+ description: Suspended is the number of child Jobs which are in a suspended state.
+ format: int32
+ type: integer
+ required:
+ - active
+ - failed
+ - name
+ - ready
+ - succeeded
+ - suspended
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ restarts:
+ description: Restarts tracks the number of times the JobSet has restarted (i.e. recreated in case of RecreateAll policy).
+ format: int32
+ type: integer
+ restartsCountTowardsMax:
+ description: RestartsCountTowardsMax tracks the number of times the JobSet has restarted that counts towards the maximum allowed number of restarts.
+ format: int32
+ type: integer
+ terminalState:
+ description: |-
+ TerminalState the state of the JobSet when it finishes execution.
+ It can be either Complete or Failed. Otherwise, it is empty by default.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/component: rbac
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: controller-manager
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: serviceaccount
+ app.kubernetes.io/part-of: jobset
+ name: jobset-controller-manager
+ namespace: jobset-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/component: rbac
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: leader-election-role
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: role
+ app.kubernetes.io/part-of: jobset
+ name: jobset-leader-election-role
+ namespace: jobset-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: jobset-manager-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets/status
+ verbs:
+ - get
+ - patch
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: kube-rbac-proxy
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: metrics-reader
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: clusterrole
+ app.kubernetes.io/part-of: jobset
+ name: jobset-metrics-reader
+rules:
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: kube-rbac-proxy
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: proxy-role
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: clusterrole
+ app.kubernetes.io/part-of: jobset
+ name: jobset-proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: rbac
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: leader-election-rolebinding
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: rolebinding
+ app.kubernetes.io/part-of: jobset
+ name: jobset-leader-election-rolebinding
+ namespace: jobset-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: jobset-leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: jobset-controller-manager
+ namespace: jobset-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: rbac
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: manager-rolebinding
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: clusterrolebinding
+ app.kubernetes.io/part-of: jobset
+ name: jobset-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: jobset-manager-role
+subjects:
+- kind: ServiceAccount
+ name: jobset-controller-manager
+ namespace: jobset-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: kube-rbac-proxy
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: proxy-rolebinding
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: clusterrolebinding
+ app.kubernetes.io/part-of: jobset
+ name: jobset-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: jobset-proxy-role
+subjects:
+- kind: ServiceAccount
+ name: jobset-controller-manager
+ namespace: jobset-system
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: jobset-webhook-server-cert
+ namespace: jobset-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: kube-rbac-proxy
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: controller-manager-metrics-service
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: service
+ app.kubernetes.io/part-of: jobset
+ control-plane: controller-manager
+ name: jobset-controller-manager-metrics-service
+ namespace: jobset-system
+spec:
+ ports:
+ - name: https
+ port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: webhook
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: webhook-service
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: service
+ app.kubernetes.io/part-of: jobset
+ name: jobset-webhook-service
+ namespace: jobset-system
+spec:
+ ports:
+ - port: 443
+ protocol: TCP
+ targetPort: 9443
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/component: manager
+ app.kubernetes.io/created-by: jobset
+ app.kubernetes.io/instance: controller-manager
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: deployment
+ app.kubernetes.io/part-of: jobset
+ control-plane: controller-manager
+ name: jobset-controller-manager
+ namespace: jobset-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ template:
+ metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
+ labels:
+ control-plane: controller-manager
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - arm64
+ - ppc64le
+ - s390x
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ containers:
+ - args:
+ - --zap-log-level=2
+ - --health-probe-bind-address=:8081
+ - --metrics-bind-address=127.0.0.1:8080
+ - --leader-elect
+ command:
+ - /manager
+ image: registry.k8s.io/jobset/jobset:v0.7.1
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ name: manager
+ ports:
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ limits:
+ cpu: 2
+ memory: 512Mi
+ requests:
+ cpu: 500m
+ memory: 128Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --logtostderr=true
+ - --v=0
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 5m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: jobset-controller-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: jobset-webhook-server-cert
+ tolerations:
+ - effect: NoSchedule
+ key: components.gke.io/gke-managed-components
+ operator: Equal
+ value: "true"
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: jobset-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: jobset-webhook-service
+ namespace: jobset-system
+ path: /mutate-jobset-x-k8s-io-v1alpha2-jobset
+ failurePolicy: Fail
+ name: mjobset.kb.io
+ rules:
+ - apiGroups:
+ - jobset.x-k8s.io
+ apiVersions:
+ - v1alpha2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - jobsets
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: jobset-webhook-service
+ namespace: jobset-system
+ path: /mutate--v1-pod
+ failurePolicy: Fail
+ name: mpod.kb.io
+ objectSelector:
+ matchExpressions:
+ - key: jobset.sigs.k8s.io/jobset-name
+ operator: Exists
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - pods
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: jobset-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: jobset-webhook-service
+ namespace: jobset-system
+ path: /validate-jobset-x-k8s-io-v1alpha2-jobset
+ failurePolicy: Fail
+ name: vjobset.kb.io
+ rules:
+ - apiGroups:
+ - jobset.x-k8s.io
+ apiVersions:
+ - v1alpha2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - jobsets
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: jobset-webhook-service
+ namespace: jobset-system
+ path: /validate--v1-pod
+ failurePolicy: Fail
+ name: vpod.kb.io
+ objectSelector:
+ matchExpressions:
+ - key: jobset.sigs.k8s.io/jobset-name
+ operator: Exists
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - pods
+ sideEffects: None
diff --git a/modules/management/kubectl-apply/manifests/kueue-v0.9.1.yaml b/modules/management/kubectl-apply/manifests/kueue-v0.9.1.yaml
new file mode 100644
index 0000000000..c4573abc34
--- /dev/null
+++ b/modules/management/kubectl-apply/manifests/kueue-v0.9.1.yaml
@@ -0,0 +1,13131 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-system
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: admissionchecks.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: AdmissionCheck
+ listKind: AdmissionCheckList
+ plural: admissionchecks
+ singular: admissioncheck
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: AdmissionCheck is the Schema for the admissionchecks API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: AdmissionCheckSpec defines the desired state of AdmissionCheck
+ properties:
+ controllerName:
+ description: |-
+ controllerName identifies the controller that processes the AdmissionCheck,
+ not necessarily a Kubernetes Pod or Deployment name. Cannot be empty.
+ type: string
+ x-kubernetes-validations:
+ - message: field is immutable
+ rule: self == oldSelf
+ parameters:
+ description: |-
+ Parameters identifies a configuration with additional parameters for the
+ check.
+ properties:
+ apiGroup:
+ description: ApiGroup is the group for the resource being referenced.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ kind:
+ description: Kind is the type of the resource being referenced.
+ maxLength: 63
+ pattern: ^(?i)[a-z]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ name:
+ description: Name is the name of the resource being referenced.
+ maxLength: 63
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ required:
+ - apiGroup
+ - kind
+ - name
+ type: object
+ retryDelayMinutes:
+ default: 15
+ description: |-
+ RetryDelayMinutes specifies how long to keep the workload suspended after
+ a failed check (after it transitioned to False). When the delay period has passed, the check
+ state goes to "Unknown". The default is 15 min.
+ Deprecated: retryDelayMinutes has already been deprecated since v0.8 and will be removed in v1beta2.
+ format: int64
+ type: integer
+ required:
+ - controllerName
+ type: object
+ status:
+ description: AdmissionCheckStatus defines the observed state of AdmissionCheck
+ properties:
+ conditions:
+ description: |-
+ conditions hold the latest available observations of the AdmissionCheck
+ current state.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: clusterqueues.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: ClusterQueue
+ listKind: ClusterQueueList
+ plural: clusterqueues
+ shortNames:
+ - cq
+ singular: clusterqueue
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - description: Cohort that this ClusterQueue belongs to
+ jsonPath: .spec.cohort
+ name: Cohort
+ type: string
+ - description: The queueing strategy used to prioritize workloads
+ jsonPath: .spec.queueingStrategy
+ name: Strategy
+ priority: 1
+ type: string
+ - description: Number of pending workloads
+ jsonPath: .status.pendingWorkloads
+ name: Pending Workloads
+ type: integer
+ - description: Number of admitted workloads that haven't finished yet
+ jsonPath: .status.admittedWorkloads
+ name: Admitted Workloads
+ priority: 1
+ type: integer
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: ClusterQueue is the Schema for the clusterQueue API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClusterQueueSpec defines the desired state of ClusterQueue
+ properties:
+ admissionChecks:
+ description: |-
+ admissionChecks lists the AdmissionChecks required by this ClusterQueue.
+ Cannot be used along with AdmissionCheckStrategy.
+ items:
+ type: string
+ type: array
+ admissionChecksStrategy:
+ description: |-
+ admissionCheckStrategy defines a list of strategies to determine which ResourceFlavors require AdmissionChecks.
+ This property cannot be used in conjunction with the 'admissionChecks' property.
+ properties:
+ admissionChecks:
+ description: admissionChecks is a list of strategies for AdmissionChecks
+ items:
+ description: AdmissionCheckStrategyRule defines rules for a
+ single AdmissionCheck
+ properties:
+ name:
+ description: name is an AdmissionCheck's name.
+ type: string
+ onFlavors:
+ description: |-
+ onFlavors is a list of ResourceFlavors' names that this AdmissionCheck should run for.
+ If empty, the AdmissionCheck will run for all workloads submitted to the ClusterQueue.
+ items:
+ description: ResourceFlavorReference is the name of the
+ ResourceFlavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ type: array
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ cohort:
+ description: |-
+ cohort that this ClusterQueue belongs to. CQs that belong to the
+ same cohort can borrow unused resources from each other.
+
+ A CQ can be a member of a single borrowing cohort. A workload submitted
+ to a queue referencing this CQ can borrow quota from any CQ in the cohort.
+ Only quota for the [resource, flavor] pairs listed in the CQ can be
+ borrowed.
+ If empty, this ClusterQueue cannot borrow from any other ClusterQueue and
+ vice versa.
+
+ A cohort is a name that links CQs together, but it doesn't reference any
+ object.
+
+ Validation of a cohort name is equivalent to that of object names:
+ subdomain in DNS (RFC 1123).
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ fairSharing:
+ description: |-
+ fairSharing defines the properties of the ClusterQueue when participating in fair sharing.
+ The values are only relevant if fair sharing is enabled in the Kueue configuration.
+ properties:
+ weight:
+ anyOf:
+ - type: integer
+ - type: string
+ default: 1
+ description: |-
+ weight gives a comparative advantage to this ClusterQueue when competing for unused
+ resources in the cohort against other ClusterQueues.
+ The share of a ClusterQueue is based on the dominant resource usage above nominal
+ quotas for each resource, divided by the weight.
+ Admission prioritizes scheduling workloads from ClusterQueues with the lowest share
+ and preempting workloads from the ClusterQueues with the highest share.
+ A zero weight implies infinite share value, meaning that this ClusterQueue will always
+ be at disadvantage against other ClusterQueues.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ flavorFungibility:
+ default: {}
+ description: |-
+ flavorFungibility defines whether a workload should try the next flavor
+ before borrowing or preempting in the flavor being evaluated.
+ properties:
+ whenCanBorrow:
+ default: Borrow
+ description: |-
+ whenCanBorrow determines whether a workload should try the next flavor
+ before borrowing in current flavor. The possible values are:
+
+ - `Borrow` (default): allocate in current flavor if borrowing
+ is possible.
+ - `TryNextFlavor`: try next flavor even if the current
+ flavor has enough resources to borrow.
+ enum:
+ - Borrow
+ - TryNextFlavor
+ type: string
+ whenCanPreempt:
+ default: TryNextFlavor
+ description: |-
+ whenCanPreempt determines whether a workload should try the next flavor
+ before borrowing in current flavor. The possible values are:
+
+ - `Preempt`: allocate in current flavor if it's possible to preempt some workloads.
+ - `TryNextFlavor` (default): try next flavor even if there are enough
+ candidates for preemption in the current flavor.
+ enum:
+ - Preempt
+ - TryNextFlavor
+ type: string
+ type: object
+ namespaceSelector:
+ description: |-
+ namespaceSelector defines which namespaces are allowed to submit workloads to
+ this clusterQueue. Beyond this basic support for policy, a policy agent like
+ Gatekeeper should be used to enforce more advanced policies.
+ Defaults to null which is a nothing selector (no namespaces eligible).
+ If set to an empty selector `{}`, then all namespaces are eligible.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ preemption:
+ default: {}
+ description: |-
+ preemption describes policies to preempt Workloads from this ClusterQueue
+ or the ClusterQueue's cohort.
+
+ Preemption can happen in two scenarios:
+
+ - When a Workload fits within the nominal quota of the ClusterQueue, but
+ the quota is currently borrowed by other ClusterQueues in the cohort.
+ Preempting Workloads in other ClusterQueues allows this ClusterQueue to
+ reclaim its nominal quota.
+ - When a Workload doesn't fit within the nominal quota of the ClusterQueue
+ and there are admitted Workloads in the ClusterQueue with lower priority.
+
+ The preemption algorithm tries to find a minimal set of Workloads to
+ preempt to accomomdate the pending Workload, preempting Workloads with
+ lower priority first.
+ properties:
+ borrowWithinCohort:
+ default: {}
+ description: |-
+ borrowWithinCohort provides configuration to allow preemption within
+ cohort while borrowing.
+ properties:
+ maxPriorityThreshold:
+ description: |-
+ maxPriorityThreshold allows to restrict the set of workloads which
+ might be preempted by a borrowing workload, to only workloads with
+ priority less than or equal to the specified threshold priority.
+ When the threshold is not specified, then any workload satisfying the
+ policy can be preempted by the borrowing workload.
+ format: int32
+ type: integer
+ policy:
+ default: Never
+ description: |-
+ policy determines the policy for preemption to reclaim quota within cohort while borrowing.
+ Possible values are:
+ - `Never` (default): do not allow for preemption, in other
+ ClusterQueues within the cohort, for a borrowing workload.
+ - `LowerPriority`: allow preemption, in other ClusterQueues
+ within the cohort, for a borrowing workload, but only if
+ the preempted workloads are of lower priority.
+ enum:
+ - Never
+ - LowerPriority
+ type: string
+ type: object
+ reclaimWithinCohort:
+ default: Never
+ description: |-
+ reclaimWithinCohort determines whether a pending Workload can preempt
+ Workloads from other ClusterQueues in the cohort that are using more than
+ their nominal quota. The possible values are:
+
+ - `Never` (default): do not preempt Workloads in the cohort.
+ - `LowerPriority`: **Classic Preemption** if the pending Workload
+ fits within the nominal quota of its ClusterQueue, only preempt
+ Workloads in the cohort that have lower priority than the pending
+ Workload. **Fair Sharing** only preempt Workloads in the cohort that
+ have lower priority than the pending Workload and that satisfy the
+ fair sharing preemptionStategies.
+ - `Any`: **Classic Preemption** if the pending Workload fits within
+ the nominal quota of its ClusterQueue, preempt any Workload in the
+ cohort, irrespective of priority. **Fair Sharing** preempt Workloads
+ in the cohort that satisfy the fair sharing preemptionStrategies.
+ enum:
+ - Never
+ - LowerPriority
+ - Any
+ type: string
+ withinClusterQueue:
+ default: Never
+ description: |-
+ withinClusterQueue determines whether a pending Workload that doesn't fit
+ within the nominal quota for its ClusterQueue, can preempt active Workloads in
+ the ClusterQueue. The possible values are:
+
+ - `Never` (default): do not preempt Workloads in the ClusterQueue.
+ - `LowerPriority`: only preempt Workloads in the ClusterQueue that have
+ lower priority than the pending Workload.
+ - `LowerOrNewerEqualPriority`: only preempt Workloads in the ClusterQueue that
+ either have a lower priority than the pending workload or equal priority
+ and are newer than the pending workload.
+ enum:
+ - Never
+ - LowerPriority
+ - LowerOrNewerEqualPriority
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: reclaimWithinCohort=Never and borrowWithinCohort.Policy!=Never
+ rule: '!(self.reclaimWithinCohort == ''Never'' && has(self.borrowWithinCohort)
+ && self.borrowWithinCohort.policy != ''Never'')'
+ queueingStrategy:
+ default: BestEffortFIFO
+ description: |-
+ QueueingStrategy indicates the queueing strategy of the workloads
+ across the queues in this ClusterQueue.
+ Current Supported Strategies:
+
+ - StrictFIFO: workloads are ordered strictly by creation time.
+ Older workloads that can't be admitted will block admitting newer
+ workloads even if they fit available quota.
+ - BestEffortFIFO: workloads are ordered by creation time,
+ however older workloads that can't be admitted will not block
+ admitting newer workloads that fit existing quota.
+ enum:
+ - StrictFIFO
+ - BestEffortFIFO
+ type: string
+ resourceGroups:
+ description: |-
+ resourceGroups describes groups of resources.
+ Each resource group defines the list of resources and a list of flavors
+ that provide quotas for these resources.
+ Each resource and each flavor can only form part of one resource group.
+ resourceGroups can be up to 16.
+ items:
+ properties:
+ coveredResources:
+ description: |-
+ coveredResources is the list of resources covered by the flavors in this
+ group.
+ Examples: cpu, memory, vendor.com/gpu.
+ The list cannot be empty and it can contain up to 16 resources.
+ items:
+ description: ResourceName is the name identifying various
+ resources in a ResourceList.
+ type: string
+ maxItems: 16
+ minItems: 1
+ type: array
+ flavors:
+ description: |-
+ flavors is the list of flavors that provide the resources of this group.
+ Typically, different flavors represent different hardware models
+ (e.g., gpu models, cpu architectures) or pricing models (on-demand vs spot
+ cpus).
+ Each flavor MUST list all the resources listed for this group in the same
+ order as the .resources field.
+ The list cannot be empty and it can contain up to 16 flavors.
+ items:
+ properties:
+ name:
+ description: |-
+ name of this flavor. The name should match the .metadata.name of a
+ ResourceFlavor. If a matching ResourceFlavor does not exist, the
+ ClusterQueue will have an Active condition set to False.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resources:
+ description: |-
+ resources is the list of quotas for this flavor per resource.
+ There could be up to 16 resources.
+ items:
+ properties:
+ borrowingLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ borrowingLimit is the maximum amount of quota for the [flavor, resource]
+ combination that this ClusterQueue is allowed to borrow from the unused
+ quota of other ClusterQueues in the same cohort.
+ In total, at a given time, Workloads in a ClusterQueue can consume a
+ quantity of quota equal to nominalQuota+borrowingLimit, assuming the other
+ ClusterQueues in the cohort have enough unused quota.
+ If null, it means that there is no borrowing limit.
+ If not null, it must be non-negative.
+ borrowingLimit must be null if spec.cohort is empty.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ lendingLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ lendingLimit is the maximum amount of unused quota for the [flavor, resource]
+ combination that this ClusterQueue can lend to other ClusterQueues in the same cohort.
+ In total, at a given time, ClusterQueue reserves for its exclusive use
+ a quantity of quota equals to nominalQuota - lendingLimit.
+ If null, it means that there is no lending limit, meaning that
+ all the nominalQuota can be borrowed by other clusterQueues in the cohort.
+ If not null, it must be non-negative.
+ lendingLimit must be null if spec.cohort is empty.
+ This field is in beta stage and is enabled by default.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ name:
+ description: name of this resource.
+ type: string
+ nominalQuota:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ nominalQuota is the quantity of this resource that is available for
+ Workloads admitted by this ClusterQueue at a point in time.
+ The nominalQuota must be non-negative.
+ nominalQuota should represent the resources in the cluster available for
+ running jobs (after discounting resources consumed by system components
+ and pods not managed by kueue). In an autoscaled cluster, nominalQuota
+ should account for resources that can be provided by a component such as
+ Kubernetes cluster-autoscaler.
+
+ If the ClusterQueue belongs to a cohort, the sum of the quotas for each
+ (flavor, resource) combination defines the maximum quantity that can be
+ allocated by a ClusterQueue in the cohort.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ - nominalQuota
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - name
+ - resources
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - coveredResources
+ - flavors
+ type: object
+ x-kubernetes-validations:
+ - message: flavors must have the same number of resources as the
+ coveredResources
+ rule: self.flavors.all(x, size(x.resources) == size(self.coveredResources))
+ maxItems: 16
+ type: array
+ x-kubernetes-list-type: atomic
+ stopPolicy:
+ default: None
+ description: |-
+ stopPolicy - if set to a value different from None, the ClusterQueue is considered Inactive, no new reservation being
+ made.
+
+ Depending on its value, its associated workloads will:
+
+ - None - Workloads are admitted
+ - HoldAndDrain - Admitted workloads are evicted and Reserving workloads will cancel the reservation.
+ - Hold - Admitted workloads will run to completion and Reserving workloads will cancel the reservation.
+ enum:
+ - None
+ - Hold
+ - HoldAndDrain
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: borrowingLimit must be nil when cohort is empty
+ rule: '!has(self.cohort) && has(self.resourceGroups) ? self.resourceGroups.all(rg,
+ rg.flavors.all(f, f.resources.all(r, !has(r.borrowingLimit)))) : true'
+ status:
+ description: ClusterQueueStatus defines the observed state of ClusterQueue
+ properties:
+ admittedWorkloads:
+ description: |-
+ admittedWorkloads is the number of workloads currently admitted to this
+ clusterQueue and haven't finished yet.
+ format: int32
+ type: integer
+ conditions:
+ description: |-
+ conditions hold the latest available observations of the ClusterQueue
+ current state.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ fairSharing:
+ description: FairSharing contains the information about the current
+ status of fair sharing.
+ properties:
+ weightedShare:
+ description: |-
+ WeightedShare represent the maximum of the ratios of usage above nominal
+ quota to the lendable resources in the cohort, among all the resources
+ provided by the ClusterQueue, and divided by the weight.
+ If zero, it means that the usage of the ClusterQueue is below the nominal quota.
+ If the ClusterQueue has a weight of zero, this will return 9223372036854775807,
+ the maximum possible share value.
+ format: int64
+ type: integer
+ required:
+ - weightedShare
+ type: object
+ flavorsReservation:
+ description: |-
+ flavorsReservation are the reserved quotas, by flavor, currently in use by the
+ workloads assigned to this ClusterQueue.
+ items:
+ properties:
+ name:
+ description: name of the flavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resources:
+ description: resources lists the quota usage for the resources
+ in this flavor.
+ items:
+ properties:
+ borrowed:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Borrowed is quantity of quota that is borrowed from the cohort. In other
+ words, it's the used quota that is over the nominalQuota.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ name:
+ description: name of the resource
+ type: string
+ total:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ total is the total quantity of used quota, including the amount borrowed
+ from the cohort.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - name
+ - resources
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ flavorsUsage:
+ description: |-
+ flavorsUsage are the used quotas, by flavor, currently in use by the
+ workloads admitted in this ClusterQueue.
+ items:
+ properties:
+ name:
+ description: name of the flavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resources:
+ description: resources lists the quota usage for the resources
+ in this flavor.
+ items:
+ properties:
+ borrowed:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Borrowed is quantity of quota that is borrowed from the cohort. In other
+ words, it's the used quota that is over the nominalQuota.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ name:
+ description: name of the resource
+ type: string
+ total:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ total is the total quantity of used quota, including the amount borrowed
+ from the cohort.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - name
+ - resources
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ pendingWorkloads:
+ description: |-
+ pendingWorkloads is the number of workloads currently waiting to be
+ admitted to this clusterQueue.
+ format: int32
+ type: integer
+ pendingWorkloadsStatus:
+ description: |-
+ PendingWorkloadsStatus contains the information exposed about the current
+ status of the pending workloads in the cluster queue.
+ Deprecated: This field will be removed on v1beta2, use VisibilityOnDemand
+ (https://kueue.sigs.k8s.io/docs/tasks/manage/monitor_pending_workloads/pending_workloads_on_demand/)
+ instead.
+ properties:
+ clusterQueuePendingWorkload:
+ description: Head contains the list of top pending workloads.
+ items:
+ description: |-
+ ClusterQueuePendingWorkload contains the information identifying a pending workload
+ in the cluster queue.
+ properties:
+ name:
+ description: Name indicates the name of the pending workload.
+ type: string
+ namespace:
+ description: Namespace indicates the name of the pending
+ workload.
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ lastChangeTime:
+ description: LastChangeTime indicates the time of the last change
+ of the structure.
+ format: date-time
+ type: string
+ required:
+ - lastChangeTime
+ type: object
+ reservingWorkloads:
+ description: |-
+ reservingWorkloads is the number of workloads currently reserving quota in this
+ clusterQueue.
+ format: int32
+ type: integer
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: cohorts.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: Cohort
+ listKind: CohortList
+ plural: cohorts
+ singular: cohort
+ scope: Cluster
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: |-
+ Cohort is the Schema for the cohorts API. Using Hierarchical
+ Cohorts (any Cohort which has a parent) with Fair Sharing
+ results in undefined behavior in 0.9
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CohortSpec defines the desired state of Cohort
+ properties:
+ parent:
+ description: |-
+ Parent references the name of the Cohort's parent, if
+ any. It satisfies one of three cases:
+ 1) Unset. This Cohort is the root of its Cohort tree.
+ 2) References a non-existent Cohort. We use default Cohort (no borrowing/lending limits).
+ 3) References an existent Cohort.
+
+ If a cycle is created, we disable all members of the
+ Cohort, including ClusterQueues, until the cycle is
+ removed. We prevent further admission while the cycle
+ exists.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resourceGroups:
+ description: |-
+ ResourceGroups describes groupings of Resources and
+ Flavors. Each ResourceGroup defines a list of Resources
+ and a list of Flavors which provide quotas for these
+ Resources. Each Resource and each Flavor may only form part
+ of one ResourceGroup. There may be up to 16 ResourceGroups
+ within a Cohort.
+
+ BorrowingLimit limits how much members of this Cohort
+ subtree can borrow from the parent subtree.
+
+ LendingLimit limits how much members of this Cohort subtree
+ can lend to the parent subtree.
+
+ Borrowing and Lending limits must only be set when the
+ Cohort has a parent. Otherwise, the Cohort create/update
+ will be rejected by the webhook.
+ items:
+ properties:
+ coveredResources:
+ description: |-
+ coveredResources is the list of resources covered by the flavors in this
+ group.
+ Examples: cpu, memory, vendor.com/gpu.
+ The list cannot be empty and it can contain up to 16 resources.
+ items:
+ description: ResourceName is the name identifying various
+ resources in a ResourceList.
+ type: string
+ maxItems: 16
+ minItems: 1
+ type: array
+ flavors:
+ description: |-
+ flavors is the list of flavors that provide the resources of this group.
+ Typically, different flavors represent different hardware models
+ (e.g., gpu models, cpu architectures) or pricing models (on-demand vs spot
+ cpus).
+ Each flavor MUST list all the resources listed for this group in the same
+ order as the .resources field.
+ The list cannot be empty and it can contain up to 16 flavors.
+ items:
+ properties:
+ name:
+ description: |-
+ name of this flavor. The name should match the .metadata.name of a
+ ResourceFlavor. If a matching ResourceFlavor does not exist, the
+ ClusterQueue will have an Active condition set to False.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resources:
+ description: |-
+ resources is the list of quotas for this flavor per resource.
+ There could be up to 16 resources.
+ items:
+ properties:
+ borrowingLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ borrowingLimit is the maximum amount of quota for the [flavor, resource]
+ combination that this ClusterQueue is allowed to borrow from the unused
+ quota of other ClusterQueues in the same cohort.
+ In total, at a given time, Workloads in a ClusterQueue can consume a
+ quantity of quota equal to nominalQuota+borrowingLimit, assuming the other
+ ClusterQueues in the cohort have enough unused quota.
+ If null, it means that there is no borrowing limit.
+ If not null, it must be non-negative.
+ borrowingLimit must be null if spec.cohort is empty.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ lendingLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ lendingLimit is the maximum amount of unused quota for the [flavor, resource]
+ combination that this ClusterQueue can lend to other ClusterQueues in the same cohort.
+ In total, at a given time, ClusterQueue reserves for its exclusive use
+ a quantity of quota equals to nominalQuota - lendingLimit.
+ If null, it means that there is no lending limit, meaning that
+ all the nominalQuota can be borrowed by other clusterQueues in the cohort.
+ If not null, it must be non-negative.
+ lendingLimit must be null if spec.cohort is empty.
+ This field is in beta stage and is enabled by default.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ name:
+ description: name of this resource.
+ type: string
+ nominalQuota:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ nominalQuota is the quantity of this resource that is available for
+ Workloads admitted by this ClusterQueue at a point in time.
+ The nominalQuota must be non-negative.
+ nominalQuota should represent the resources in the cluster available for
+ running jobs (after discounting resources consumed by system components
+ and pods not managed by kueue). In an autoscaled cluster, nominalQuota
+ should account for resources that can be provided by a component such as
+ Kubernetes cluster-autoscaler.
+
+ If the ClusterQueue belongs to a cohort, the sum of the quotas for each
+ (flavor, resource) combination defines the maximum quantity that can be
+ allocated by a ClusterQueue in the cohort.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ - nominalQuota
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - name
+ - resources
+ type: object
+ maxItems: 16
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - coveredResources
+ - flavors
+ type: object
+ x-kubernetes-validations:
+ - message: flavors must have the same number of resources as the
+ coveredResources
+ rule: self.flavors.all(x, size(x.resources) == size(self.coveredResources))
+ maxItems: 16
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: localqueues.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: LocalQueue
+ listKind: LocalQueueList
+ plural: localqueues
+ shortNames:
+ - queue
+ - queues
+ - lq
+ singular: localqueue
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Backing ClusterQueue
+ jsonPath: .spec.clusterQueue
+ name: ClusterQueue
+ type: string
+ - description: Number of pending workloads
+ jsonPath: .status.pendingWorkloads
+ name: Pending Workloads
+ type: integer
+ - description: Number of admitted workloads that haven't finished yet.
+ jsonPath: .status.admittedWorkloads
+ name: Admitted Workloads
+ type: integer
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: LocalQueue is the Schema for the localQueues API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: LocalQueueSpec defines the desired state of LocalQueue
+ properties:
+ clusterQueue:
+ description: clusterQueue is a reference to a clusterQueue that backs
+ this localQueue.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ x-kubernetes-validations:
+ - message: field is immutable
+ rule: self == oldSelf
+ stopPolicy:
+ default: None
+ description: |-
+ stopPolicy - if set to a value different from None, the LocalQueue is considered Inactive,
+ no new reservation being made.
+
+ Depending on its value, its associated workloads will:
+
+ - None - Workloads are admitted
+ - HoldAndDrain - Admitted workloads are evicted and Reserving workloads will cancel the reservation.
+ - Hold - Admitted workloads will run to completion and Reserving workloads will cancel the reservation.
+ enum:
+ - None
+ - Hold
+ - HoldAndDrain
+ type: string
+ type: object
+ status:
+ description: LocalQueueStatus defines the observed state of LocalQueue
+ properties:
+ admittedWorkloads:
+ description: |-
+ admittedWorkloads is the number of workloads in this LocalQueue
+ admitted to a ClusterQueue and that haven't finished yet.
+ format: int32
+ type: integer
+ conditions:
+ description: |-
+ Conditions hold the latest available observations of the LocalQueue
+ current state.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ flavorUsage:
+ description: |-
+ flavorsUsage are the used quotas, by flavor currently in use by the
+ workloads assigned to this LocalQueue.
+ items:
+ properties:
+ name:
+ description: name of the flavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resources:
+ description: resources lists the quota usage for the resources
+ in this flavor.
+ items:
+ properties:
+ name:
+ description: name of the resource.
+ type: string
+ total:
+ anyOf:
+ - type: integer
+ - type: string
+ description: total is the total quantity of used quota.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - name
+ - resources
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ flavors:
+ description: flavors lists all currently available ResourceFlavors
+ in specified ClusterQueue.
+ items:
+ properties:
+ name:
+ description: name of the flavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ nodeLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ nodeLabels are labels that associate the ResourceFlavor with Nodes that
+ have the same labels.
+ maxProperties: 8
+ type: object
+ x-kubernetes-map-type: atomic
+ nodeTaints:
+ description: |-
+ nodeTaints are taints that the nodes associated with this ResourceFlavor
+ have.
+ items:
+ description: |-
+ The node this Taint is attached to has the "effect" on
+ any pod that does not tolerate the Taint.
+ properties:
+ effect:
+ description: |-
+ Required. The effect of the taint on pods
+ that do not tolerate the taint.
+ Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Required. The taint key to be applied to
+ a node.
+ type: string
+ timeAdded:
+ description: |-
+ TimeAdded represents the time at which the taint was added.
+ It is only written for NoExecute taints.
+ format: date-time
+ type: string
+ value:
+ description: The taint value corresponding to the taint
+ key.
+ type: string
+ required:
+ - effect
+ - key
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: resources used in the flavor.
+ items:
+ description: ResourceName is the name identifying various
+ resources in a ResourceList.
+ type: string
+ maxItems: 16
+ type: array
+ x-kubernetes-list-type: set
+ required:
+ - name
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ flavorsReservation:
+ description: |-
+ flavorsReservation are the reserved quotas, by flavor currently in use by the
+ workloads assigned to this LocalQueue.
+ items:
+ properties:
+ name:
+ description: name of the flavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ resources:
+ description: resources lists the quota usage for the resources
+ in this flavor.
+ items:
+ properties:
+ name:
+ description: name of the resource.
+ type: string
+ total:
+ anyOf:
+ - type: integer
+ - type: string
+ description: total is the total quantity of used quota.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - name
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - name
+ - resources
+ type: object
+ maxItems: 16
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ pendingWorkloads:
+ description: PendingWorkloads is the number of Workloads in the LocalQueue
+ not yet admitted to a ClusterQueue
+ format: int32
+ type: integer
+ reservingWorkloads:
+ description: |-
+ reservingWorkloads is the number of workloads in this LocalQueue
+ reserving quota in a ClusterQueue and that haven't finished yet.
+ format: int32
+ type: integer
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: multikueueclusters.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: MultiKueueCluster
+ listKind: MultiKueueClusterList
+ plural: multikueueclusters
+ singular: multikueuecluster
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: MultiKueueCluster is the Schema for the multikueue API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ kubeConfig:
+ description: Information how to connect to the cluster.
+ properties:
+ location:
+ description: |-
+ Location of the KubeConfig.
+
+ If LocationType is Secret then Location is the name of the secret inside the namespace in
+ which the kueue controller manager is running. The config should be stored in the "kubeconfig" key.
+ type: string
+ locationType:
+ default: Secret
+ description: Type of the KubeConfig location.
+ enum:
+ - Secret
+ - Path
+ type: string
+ required:
+ - location
+ - locationType
+ type: object
+ required:
+ - kubeConfig
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: multikueueconfigs.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: MultiKueueConfig
+ listKind: MultiKueueConfigList
+ plural: multikueueconfigs
+ singular: multikueueconfig
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: MultiKueueConfig is the Schema for the multikueue API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: MultiKueueConfigSpec defines the desired state of MultiKueueConfig
+ properties:
+ clusters:
+ description: List of MultiKueueClusters names where the workloads
+ from the ClusterQueue should be distributed.
+ items:
+ type: string
+ maxItems: 10
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: set
+ required:
+ - clusters
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: provisioningrequestconfigs.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: ProvisioningRequestConfig
+ listKind: ProvisioningRequestConfigList
+ plural: provisioningrequestconfigs
+ singular: provisioningrequestconfig
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: ProvisioningRequestConfig is the Schema for the provisioningrequestconfig
+ API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ProvisioningRequestConfigSpec defines the desired state of
+ ProvisioningRequestConfig
+ properties:
+ managedResources:
+ description: |-
+ managedResources contains the list of resources managed by the autoscaling.
+
+ If empty, all resources are considered managed.
+
+ If not empty, the ProvisioningRequest will contain only the podsets that are
+ requesting at least one of them.
+
+ If none of the workloads podsets is requesting at least a managed resource,
+ the workload is considered ready.
+ items:
+ description: ResourceName is the name identifying various resources
+ in a ResourceList.
+ type: string
+ maxItems: 100
+ type: array
+ x-kubernetes-list-type: set
+ parameters:
+ additionalProperties:
+ description: Parameter is limited to 255 characters.
+ maxLength: 255
+ type: string
+ description: Parameters contains all other parameters classes may
+ require.
+ maxProperties: 100
+ type: object
+ provisioningClassName:
+ description: |-
+ ProvisioningClassName describes the different modes of provisioning the resources.
+ Check autoscaling.x-k8s.io ProvisioningRequestSpec.ProvisioningClassName for details.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ retryStrategy:
+ default:
+ backoffBaseSeconds: 60
+ backoffLimitCount: 3
+ backoffMaxSeconds: 1800
+ description: |-
+ retryStrategy defines strategy for retrying ProvisioningRequest.
+ If null, then the default configuration is applied with the following parameter values:
+ backoffLimitCount: 3
+ backoffBaseSeconds: 60 - 1 min
+ backoffMaxSeconds: 1800 - 30 mins
+
+ To switch off retry mechanism
+ set retryStrategy.backoffLimitCount to 0.
+ properties:
+ backoffBaseSeconds:
+ default: 60
+ description: |-
+ BackoffBaseSeconds defines the base for the exponential backoff for
+ re-queuing an evicted workload.
+
+ Defaults to 60.
+ format: int32
+ type: integer
+ backoffLimitCount:
+ default: 3
+ description: |-
+ BackoffLimitCount defines the maximum number of re-queuing retries.
+ Once the number is reached, the workload is deactivated (`.spec.activate`=`false`).
+
+ Every backoff duration is about "b*2^(n-1)+Rand" where:
+ - "b" represents the base set by "BackoffBaseSeconds" parameter,
+ - "n" represents the "workloadStatus.requeueState.count",
+ - "Rand" represents the random jitter.
+ During this time, the workload is taken as an inadmissible and
+ other workloads will have a chance to be admitted.
+ By default, the consecutive requeue delays are around: (60s, 120s, 240s, ...).
+
+ Defaults to 3.
+ format: int32
+ type: integer
+ backoffMaxSeconds:
+ default: 1800
+ description: |-
+ BackoffMaxSeconds defines the maximum backoff time to re-queue an evicted workload.
+
+ Defaults to 1800.
+ format: int32
+ type: integer
+ type: object
+ required:
+ - provisioningClassName
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: resourceflavors.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: ResourceFlavor
+ listKind: ResourceFlavorList
+ plural: resourceflavors
+ shortNames:
+ - flavor
+ - flavors
+ - rf
+ singular: resourceflavor
+ scope: Cluster
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: ResourceFlavor is the Schema for the resourceflavors API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ResourceFlavorSpec defines the desired state of the ResourceFlavor
+ properties:
+ nodeLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ nodeLabels are labels that associate the ResourceFlavor with Nodes that
+ have the same labels.
+ When a Workload is admitted, its podsets can only get assigned
+ ResourceFlavors whose nodeLabels match the nodeSelector and nodeAffinity
+ fields.
+ Once a ResourceFlavor is assigned to a podSet, the ResourceFlavor's
+ nodeLabels should be injected into the pods of the Workload by the
+ controller that integrates with the Workload object.
+
+ nodeLabels can be up to 8 elements.
+ maxProperties: 8
+ type: object
+ x-kubernetes-map-type: atomic
+ nodeTaints:
+ description: |-
+ nodeTaints are taints that the nodes associated with this ResourceFlavor
+ have.
+ Workloads' podsets must have tolerations for these nodeTaints in order to
+ get assigned this ResourceFlavor during admission.
+
+ An example of a nodeTaint is
+ cloud.provider.com/preemptible="true":NoSchedule
+
+ nodeTaints can be up to 8 elements.
+ items:
+ description: |-
+ The node this Taint is attached to has the "effect" on
+ any pod that does not tolerate the Taint.
+ properties:
+ effect:
+ description: |-
+ Required. The effect of the taint on pods
+ that do not tolerate the taint.
+ Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Required. The taint key to be applied to a node.
+ type: string
+ timeAdded:
+ description: |-
+ TimeAdded represents the time at which the taint was added.
+ It is only written for NoExecute taints.
+ format: date-time
+ type: string
+ value:
+ description: The taint value corresponding to the taint key.
+ type: string
+ required:
+ - effect
+ - key
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: 'supported taint effect values: ''NoSchedule'', ''PreferNoSchedule'',
+ ''NoExecute'''
+ rule: self.all(x, x.effect in ['NoSchedule', 'PreferNoSchedule',
+ 'NoExecute'])
+ tolerations:
+ description: |-
+ tolerations are extra tolerations that will be added to the pods admitted in
+ the quota associated with this resource flavor.
+
+ An example of a toleration is
+ cloud.provider.com/preemptible="true":NoSchedule
+
+ tolerations can be up to 8 elements.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-type: atomic
+ x-kubernetes-validations:
+ - message: operator must be Exists when 'key' is empty, which means
+ 'match all values and all keys'
+ rule: 'self.all(x, !has(x.key) ? x.operator == ''Exists'' : true)'
+ - message: effect must be 'NoExecute' when 'tolerationSeconds' is
+ set
+ rule: 'self.all(x, has(x.tolerationSeconds) ? x.effect == ''NoExecute''
+ : true)'
+ - message: 'supported toleration values: ''Equal''(default), ''Exists'''
+ rule: self.all(x, !has(x.operator) || x.operator in ['Equal', 'Exists'])
+ - message: a value must be empty when 'operator' is 'Exists'
+ rule: 'self.all(x, has(x.operator) && x.operator == ''Exists'' ?
+ !has(x.value) : true)'
+ - message: 'supported taint effect values: ''NoSchedule'', ''PreferNoSchedule'',
+ ''NoExecute'''
+ rule: self.all(x, !has(x.effect) || x.effect in ['NoSchedule', 'PreferNoSchedule',
+ 'NoExecute'])
+ topologyName:
+ description: |-
+ topologyName indicates topology for the TAS ResourceFlavor.
+ When specified, it enables scraping of the topology information from the
+ nodes matching to the Resource Flavor node labels.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: at least one nodeLabel is required when topology is set
+ rule: '!has(self.topologyName) || self.nodeLabels.size() >= 1'
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: topologies.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: Topology
+ listKind: TopologyList
+ plural: topologies
+ singular: topology
+ scope: Cluster
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: Topology is the Schema for the topology API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: TopologySpec defines the desired state of Topology
+ properties:
+ levels:
+ description: levels define the levels of topology.
+ items:
+ description: TopologyLevel defines the desired state of TopologyLevel
+ properties:
+ nodeLabel:
+ description: |-
+ nodeLabel indicates the name of the node label for a specific topology
+ level.
+
+ Examples:
+ - cloud.provider.com/topology-block
+ - cloud.provider.com/topology-rack
+ maxLength: 316
+ minLength: 1
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - nodeLabel
+ type: object
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - levels
+ type: object
+ type: object
+ served: true
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: workloadpriorityclasses.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: WorkloadPriorityClass
+ listKind: WorkloadPriorityClassList
+ plural: workloadpriorityclasses
+ singular: workloadpriorityclass
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - description: Value of workloadPriorityClass's Priority
+ jsonPath: .value
+ name: Value
+ type: integer
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: WorkloadPriorityClass is the Schema for the workloadPriorityClass
+ API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ description:
+ description: |-
+ description is an arbitrary string that usually provides guidelines on
+ when this workloadPriorityClass should be used.
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ value:
+ description: |-
+ value represents the integer value of this workloadPriorityClass. This is the actual priority that workloads
+ receive when jobs have the name of this class in their workloadPriorityClass label.
+ Changing the value of workloadPriorityClass doesn't affect the priority of workloads that were already created.
+ format: int32
+ type: integer
+ required:
+ - value
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ controller-gen.kubebuilder.io/version: v0.16.5
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: workloads.kueue.x-k8s.io
+spec:
+ group: kueue.x-k8s.io
+ names:
+ kind: Workload
+ listKind: WorkloadList
+ plural: workloads
+ shortNames:
+ - wl
+ singular: workload
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Name of the queue this workload was submitted to
+ jsonPath: .spec.queueName
+ name: Queue
+ type: string
+ - description: Name of the ClusterQueue where the workload is reserving quota
+ jsonPath: .status.admission.clusterQueue
+ name: Reserved in
+ type: string
+ - description: Admission status
+ jsonPath: .status.conditions[?(@.type=='Admitted')].status
+ name: Admitted
+ type: string
+ - description: Workload finished
+ jsonPath: .status.conditions[?(@.type=='Finished')].status
+ name: Finished
+ type: string
+ - description: Time this workload was created
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: Workload is the Schema for the workloads API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: WorkloadSpec defines the desired state of Workload
+ properties:
+ active:
+ default: true
+ description: |-
+ Active determines if a workload can be admitted into a queue.
+ Changing active from true to false will evict any running workloads.
+ Possible values are:
+
+ - false: indicates that a workload should never be admitted and evicts running workloads
+ - true: indicates that a workload can be evaluated for admission into it's respective queue.
+
+ Defaults to true
+ type: boolean
+ maximumExecutionTimeSeconds:
+ description: |-
+ maximumExecutionTimeSeconds if provided, determines the maximum time, in seconds,
+ the workload can be admitted before it's automatically deactivated.
+
+ If unspecified, no execution time limit is enforced on the Workload.
+ format: int32
+ minimum: 1
+ type: integer
+ podSets:
+ description: |-
+ podSets is a list of sets of homogeneous pods, each described by a Pod spec
+ and a count.
+ There must be at least one element and at most 8.
+ podSets cannot be changed.
+ items:
+ properties:
+ count:
+ default: 1
+ description: count is the number of pods for the spec.
+ format: int32
+ minimum: 0
+ type: integer
+ minCount:
+ description: |-
+ minCount is the minimum number of pods for the spec acceptable
+ if the workload supports partial admission.
+
+ If not provided, partial admission for the current PodSet is not
+ enabled.
+
+ Only one podSet within the workload can use this.
+
+ This is an alpha field and requires enabling PartialAdmission feature gate.
+ format: int32
+ minimum: 1
+ type: integer
+ name:
+ default: main
+ description: name is the PodSet name.
+ maxLength: 63
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ template:
+ description: |-
+ template is the Pod template.
+
+ The only allowed fields in template.metadata are labels and annotations.
+
+ If requests are omitted for a container or initContainer,
+ they default to the limits if they are explicitly specified for the
+ container or initContainer.
+
+ During admission, the rules in nodeSelector and
+ nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match
+ the keys in the nodeLabels from the ResourceFlavors considered for this
+ Workload are used to filter the ResourceFlavors that can be assigned to
+ this podSet.
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling
+ rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector
+ requirements by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector
+ requirements by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching
+ the corresponding nodeSelectorTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector
+ requirements by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description: A list of node selector
+ requirements by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules
+ (e.g. co-locate this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched
+ WeightedPodAffinityTerm fields are added
+ per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity
+ term, associated with the corresponding
+ weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling
+ rules (e.g. avoid putting this pod in the same
+ node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched
+ WeightedPodAffinityTerm fields are added
+ per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity
+ term, associated with the corresponding
+ weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ automountServiceAccountToken:
+ description: AutomountServiceAccountToken indicates
+ whether a service account token should be automatically
+ mounted.
+ type: boolean
+ containers:
+ description: |-
+ List of containers belonging to the pod.
+ Containers cannot currently be added or removed.
+ There must be at least one container in a Pod.
+ Cannot be updated.
+ items:
+ description: A single application container that you
+ want to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment
+ variable present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment
+ variable's value. Cannot be used if value
+ is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the
+ ConfigMap or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env
+ vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret
+ in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret
+ to select from. Must be a valid
+ secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the
+ Secret or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source
+ of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be
+ a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action
+ to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http
+ request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set
+ in the request. HTTP allows repeated
+ headers.
+ items:
+ description: HTTPHeader describes
+ a custom header to be used in
+ HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field
+ value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the
+ HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration
+ that the container should sleep before
+ being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number
+ of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name
+ to connect to, defaults to the pod
+ IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action
+ to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http
+ request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set
+ in the request. HTTP allows repeated
+ headers.
+ items:
+ description: HTTPHeader describes
+ a custom header to be used in
+ HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field
+ value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the
+ HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration
+ that the container should sleep before
+ being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number
+ of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name
+ to connect to, defaults to the pod
+ IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network
+ port in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents
+ resource resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one
+ entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX
+ capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX
+ capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label
+ that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label
+ that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label
+ that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label
+ that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is
+ the name of the GMSA credential spec
+ to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block
+ devices to be used by the container.
+ items:
+ description: volumeDevice describes a mapping
+ of a raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside
+ of the container that the device will
+ be mapped to.
+ type: string
+ name:
+ description: name must match the name of
+ a persistentVolumeClaim in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting
+ of a Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of
+ a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ dnsConfig:
+ description: |-
+ Specifies the DNS parameters of a pod.
+ Parameters specified here will be merged to the generated DNS
+ configuration based on DNSPolicy.
+ properties:
+ nameservers:
+ description: |-
+ A list of DNS name server IP addresses.
+ This will be appended to the base nameservers generated from DNSPolicy.
+ Duplicated nameservers will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ options:
+ description: |-
+ A list of DNS resolver options.
+ This will be merged with the base options generated from DNSPolicy.
+ Duplicated entries will be removed. Resolution options given in Options
+ will override those that appear in the base DNSPolicy.
+ items:
+ description: PodDNSConfigOption defines DNS resolver
+ options of a pod.
+ properties:
+ name:
+ description: Required.
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ searches:
+ description: |-
+ A list of DNS search domains for host-name lookup.
+ This will be appended to the base search paths generated from DNSPolicy.
+ Duplicated search paths will be removed.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ dnsPolicy:
+ description: |-
+ Set DNS policy for the pod.
+ Defaults to "ClusterFirst".
+ Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ To have DNS options set along with hostNetwork, you have to specify DNS policy
+ explicitly to 'ClusterFirstWithHostNet'.
+ type: string
+ enableServiceLinks:
+ description: |-
+ EnableServiceLinks indicates whether information about services should be injected into pod's
+ environment variables, matching the syntax of Docker links.
+ Optional: Defaults to true.
+ type: boolean
+ ephemeralContainers:
+ description: |-
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ items:
+ description: |-
+ An EphemeralContainer is a temporary container that you may add to an existing Pod for
+ user-initiated activities such as debugging. Ephemeral containers have no resource or
+ scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+ removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+ Pod to exceed its resource allocation.
+
+ To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+ Pod. Ephemeral containers may not be removed or restarted.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment
+ variable present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment
+ variable's value. Cannot be used if value
+ is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the
+ ConfigMap or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env
+ vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret
+ in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret
+ to select from. Must be a valid
+ secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the
+ Secret or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source
+ of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be
+ a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: Lifecycle is not allowed for ephemeral
+ containers.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action
+ to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http
+ request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set
+ in the request. HTTP allows repeated
+ headers.
+ items:
+ description: HTTPHeader describes
+ a custom header to be used in
+ HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field
+ value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the
+ HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration
+ that the container should sleep before
+ being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number
+ of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name
+ to connect to, defaults to the pod
+ IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action
+ to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http
+ request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set
+ in the request. HTTP allows repeated
+ headers.
+ items:
+ description: HTTPHeader describes
+ a custom header to be used in
+ HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field
+ value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the
+ HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration
+ that the container should sleep before
+ being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number
+ of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name
+ to connect to, defaults to the pod
+ IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: Probes are not allowed for ephemeral
+ containers.
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the ephemeral container specified as a DNS_LABEL.
+ This name must be unique among all containers, init containers and ephemeral containers.
+ type: string
+ ports:
+ description: Ports are not allowed for ephemeral
+ containers.
+ items:
+ description: ContainerPort represents a network
+ port in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: Probes are not allowed for ephemeral
+ containers.
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents
+ resource resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ already allocated to the pod.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one
+ entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for the container to manage the restart behavior of each
+ container within a pod.
+ This may only be set for init containers. You cannot set this field on
+ ephemeral containers.
+ type: string
+ securityContext:
+ description: |-
+ Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX
+ capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX
+ capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label
+ that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label
+ that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label
+ that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label
+ that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is
+ the name of the GMSA credential spec
+ to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral
+ containers.
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ targetContainerName:
+ description: |-
+ If set, the name of the container from PodSpec that this ephemeral container targets.
+ The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+
+ The container runtime must implement support for this feature. If the runtime does not
+ support namespace targeting then the result of setting this field is undefined.
+ type: string
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block
+ devices to be used by the container.
+ items:
+ description: volumeDevice describes a mapping
+ of a raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside
+ of the container that the device will
+ be mapped to.
+ type: string
+ name:
+ description: name must match the name of
+ a persistentVolumeClaim in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting
+ of a Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of
+ a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ hostAliases:
+ description: |-
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ file if specified.
+ items:
+ description: |-
+ HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+ pod's hosts file.
+ properties:
+ hostnames:
+ description: Hostnames for the above IP address.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ ip:
+ description: IP address of the host file entry.
+ type: string
+ required:
+ - ip
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - ip
+ x-kubernetes-list-type: map
+ hostIPC:
+ description: |-
+ Use the host's ipc namespace.
+ Optional: Default to false.
+ type: boolean
+ hostNetwork:
+ description: |-
+ Host networking requested for this pod. Use the host's network namespace.
+ If this option is set, the ports that will be used must be specified.
+ Default to false.
+ type: boolean
+ hostPID:
+ description: |-
+ Use the host's pid namespace.
+ Optional: Default to false.
+ type: boolean
+ hostUsers:
+ description: |-
+ Use the host's user namespace.
+ Optional: Default to true.
+ If set to true or not present, the pod will be run in the host user namespace, useful
+ for when the pod needs a feature only available to the host user namespace, such as
+ loading a kernel module with CAP_SYS_MODULE.
+ When set to false, a new userns is created for the pod. Setting false is useful for
+ mitigating container breakout vulnerabilities even allowing users to run their
+ containers as root without actually having root privileges on the host.
+ This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ type: boolean
+ hostname:
+ description: |-
+ Specifies the hostname of the Pod
+ If not specified, the pod's hostname will be set to a system-defined value.
+ type: string
+ imagePullSecrets:
+ description: |-
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ If specified, these secrets will be passed to individual puller implementations for them to use.
+ More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ initContainers:
+ description: |-
+ List of initialization containers belonging to the pod.
+ Init containers are executed in order prior to containers being started. If any
+ init container fails, the pod is considered to have failed and is handled according
+ to its restartPolicy. The name for an init container or normal container must be
+ unique among all containers.
+ Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ The resourceRequirements of an init container are taken into account during scheduling
+ by finding the highest request/limit for each resource type, and then using the max of
+ of that value or the sum of the normal containers. Limits are applied to init containers
+ in a similar fashion.
+ Init containers cannot currently be added or removed.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ items:
+ description: A single application container that you
+ want to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment
+ variable present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment
+ variable's value. Cannot be used if value
+ is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the
+ ConfigMap or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env
+ vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret
+ in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret
+ to select from. Must be a valid
+ secret key.
+ type: string
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the
+ Secret or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source
+ of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be
+ a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: Specify whether the Secret
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action
+ to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http
+ request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set
+ in the request. HTTP allows repeated
+ headers.
+ items:
+ description: HTTPHeader describes
+ a custom header to be used in
+ HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field
+ value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the
+ HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration
+ that the container should sleep before
+ being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number
+ of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name
+ to connect to, defaults to the pod
+ IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action
+ to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http
+ request to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set
+ in the request. HTTP allows repeated
+ headers.
+ items:
+ description: HTTPHeader describes
+ a custom header to be used in
+ HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field
+ value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the
+ HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration
+ that the container should sleep before
+ being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number
+ of seconds to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name
+ to connect to, defaults to the pod
+ IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network
+ port in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents
+ resource resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one
+ entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ overrides the pod's appArmorProfile.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX
+ capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX
+ capabilities type
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default value is Default which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label
+ that applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label
+ that applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label
+ that applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label
+ that applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is
+ the name of the GMSA credential spec
+ to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to
+ take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ default: ""
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in
+ the request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a
+ custom header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action
+ involving a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block
+ devices to be used by the container.
+ items:
+ description: volumeDevice describes a mapping
+ of a raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside
+ of the container that the device will
+ be mapped to.
+ type: string
+ name:
+ description: name must match the name of
+ a persistentVolumeClaim in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - devicePath
+ x-kubernetes-list-type: map
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting
+ of a Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of
+ a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - mountPath
+ x-kubernetes-list-type: map
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ nodeName:
+ description: |-
+ NodeName indicates in which node this pod is scheduled.
+ If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+ Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+ This field should not be used to express a desire for the pod to be scheduled on a specific node.
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is a selector which must be true for the pod to fit on a node.
+ Selector which must match a node's labels for the pod to be scheduled on that node.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ x-kubernetes-map-type: atomic
+ os:
+ description: |-
+ Specifies the OS of the containers in the pod.
+ Some pod and container fields are restricted if this is set.
+
+ If the OS field is set to linux, the following fields must be unset:
+ -securityContext.windowsOptions
+
+ If the OS field is set to windows, following fields must be unset:
+ - spec.hostPID
+ - spec.hostIPC
+ - spec.hostUsers
+ - spec.securityContext.appArmorProfile
+ - spec.securityContext.seLinuxOptions
+ - spec.securityContext.seccompProfile
+ - spec.securityContext.fsGroup
+ - spec.securityContext.fsGroupChangePolicy
+ - spec.securityContext.sysctls
+ - spec.shareProcessNamespace
+ - spec.securityContext.runAsUser
+ - spec.securityContext.runAsGroup
+ - spec.securityContext.supplementalGroups
+ - spec.securityContext.supplementalGroupsPolicy
+ - spec.containers[*].securityContext.appArmorProfile
+ - spec.containers[*].securityContext.seLinuxOptions
+ - spec.containers[*].securityContext.seccompProfile
+ - spec.containers[*].securityContext.capabilities
+ - spec.containers[*].securityContext.readOnlyRootFilesystem
+ - spec.containers[*].securityContext.privileged
+ - spec.containers[*].securityContext.allowPrivilegeEscalation
+ - spec.containers[*].securityContext.procMount
+ - spec.containers[*].securityContext.runAsUser
+ - spec.containers[*].securityContext.runAsGroup
+ properties:
+ name:
+ description: |-
+ Name is the name of the operating system. The currently supported values are linux and windows.
+ Additional value may be defined in future and can be one of:
+ https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+ type: string
+ required:
+ - name
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ type: object
+ preemptionPolicy:
+ description: |-
+ PreemptionPolicy is the Policy for preempting pods with lower priority.
+ One of Never, PreemptLowerPriority.
+ Defaults to PreemptLowerPriority if unset.
+ type: string
+ priority:
+ description: |-
+ The priority value. Various system components use this field to find the
+ priority of the pod. When Priority Admission Controller is enabled, it
+ prevents users from setting this field. The admission controller populates
+ this field from PriorityClassName.
+ The higher the value, the higher the priority.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the pod's priority. "system-node-critical" and
+ "system-cluster-critical" are two special keywords which indicate the
+ highest priorities with the former being the highest priority. Any other
+ name must be defined by creating a PriorityClass object with that name.
+ If not specified, the pod priority will be default or zero if there is no
+ default.
+ type: string
+ readinessGates:
+ description: |-
+ If specified, all readiness gates will be evaluated for pod readiness.
+ A pod is ready when all its containers are ready AND
+ all conditions specified in the readiness gates have status equal to "True"
+ More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ items:
+ description: PodReadinessGate contains the reference
+ to a pod condition
+ properties:
+ conditionType:
+ description: ConditionType refers to a condition
+ in the pod's condition list with matching type.
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resourceClaims:
+ description: |-
+ ResourceClaims defines which ResourceClaims must be allocated
+ and reserved before the Pod is allowed to start. The resources
+ will be made available to those containers which consume them
+ by name.
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+ This field is immutable.
+ items:
+ description: |-
+ PodResourceClaim references exactly one ResourceClaim, either directly
+ or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+ for the pod.
+
+ It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+ Containers that need access to the ResourceClaim reference it with this name.
+ properties:
+ name:
+ description: |-
+ Name uniquely identifies this resource claim inside the pod.
+ This must be a DNS_LABEL.
+ type: string
+ resourceClaimName:
+ description: |-
+ ResourceClaimName is the name of a ResourceClaim object in the same
+ namespace as this pod.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ resourceClaimTemplateName:
+ description: |-
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ object in the same namespace as this pod.
+
+ The template will be used to create a new ResourceClaim, which will
+ be bound to this pod. When this pod is deleted, the ResourceClaim
+ will also be deleted. The pod name and resource name, along with a
+ generated component, will be used to form a unique name for the
+ ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+
+ This field is immutable and no changes will be made to the
+ corresponding ResourceClaim by the control plane after creating the
+ ResourceClaim.
+
+ Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ be set.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ restartPolicy:
+ description: |-
+ Restart policy for all containers within the pod.
+ One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ Default to Always.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ type: string
+ runtimeClassName:
+ description: |-
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ empty definition that uses the default runtime handler.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ type: string
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified scheduler.
+ If not specified, the pod will be dispatched by default scheduler.
+ type: string
+ schedulingGates:
+ description: |-
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ scheduler will not attempt to schedule the pod.
+
+ SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+ items:
+ description: PodSchedulingGate is associated to a
+ Pod to guard its scheduling.
+ properties:
+ name:
+ description: |-
+ Name of the scheduling gate.
+ Each scheduling gate must have a unique name field.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ securityContext:
+ description: |-
+ SecurityContext holds pod-level security attributes and common container settings.
+ Optional: Defaults to empty. See type description for default values of each field.
+ properties:
+ appArmorProfile:
+ description: |-
+ appArmorProfile is the AppArmor options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile loaded on the node that should be used.
+ The profile must be preconfigured on the node to work.
+ Must match the loaded name of the profile.
+ Must be set if and only if type is "Localhost".
+ type: string
+ type:
+ description: |-
+ type indicates which kind of AppArmor profile will be applied.
+ Valid options are:
+ Localhost - a profile pre-loaded on the node.
+ RuntimeDefault - the container runtime's default profile.
+ Unconfined - no AppArmor enforcement.
+ type: string
+ required:
+ - type
+ type: object
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod.
+ Some volume types allow the Kubelet to change the ownership of that volume
+ to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup
+ 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: |-
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ before being exposed inside Pod. This field will only apply to
+ volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir.
+ Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: |-
+ A list of groups applied to the first process run in each container, in
+ addition to the container's primary GID and fsGroup (if specified). If
+ the SupplementalGroupsPolicy feature is enabled, the
+ supplementalGroupsPolicy field determines whether these are in addition
+ to or instead of any group memberships defined in the container image.
+ If unspecified, no additional groups are added, though group memberships
+ defined in the container image may still be used, depending on the
+ supplementalGroupsPolicy field.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ x-kubernetes-list-type: atomic
+ supplementalGroupsPolicy:
+ description: |-
+ Defines how supplemental groups of the first container processes are calculated.
+ Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+ (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+ and the container runtime must implement support for this feature.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ sysctls:
+ description: |-
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter
+ to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options within a container's SecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ description: |-
+ DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+ Deprecated: Use serviceAccountName instead.
+ type: string
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ type: string
+ setHostnameAsFQDN:
+ description: |-
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ If a pod does not have FQDN, this has no effect.
+ Default to false.
+ type: boolean
+ shareProcessNamespace:
+ description: |-
+ Share a single process namespace between all of the containers in a pod.
+ When this is set containers will be able to view and signal processes from other containers
+ in the same pod, and the first process in each container will not be assigned PID 1.
+ HostPID and ShareProcessNamespace cannot both be set.
+ Optional: Default to false.
+ type: boolean
+ subdomain:
+ description: |-
+ If specified, the fully qualified Pod hostname will be "...svc.".
+ If not specified, the pod will not have a domainname at all.
+ type: string
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ If this value is nil, the default grace period will be used instead.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ Defaults to 30 seconds.
+ format: int64
+ type: integer
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ domains. Scheduler will schedule pods in a way which abides by the constraints.
+ All topologySpreadConstraints are ANDed.
+ items:
+ description: TopologySpreadConstraint specifies how
+ to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of
+ label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ description: |-
+ List of volumes that can be mounted by containers belonging to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes
+ items:
+ description: Volume represents a named volume in a
+ pod that may be accessed by any container in the
+ pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: azureDisk represents an Azure Data
+ Disk mount on the host and bind mount to the
+ pod.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching
+ mode: None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data
+ disk in the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk
+ in the blob storage
+ type: string
+ fsType:
+ default: ext4
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared:
+ multiple blob disks per storage account Dedicated:
+ single blob disk per storage account Managed:
+ azure managed data disk (only in managed
+ availability set). defaults to shared'
+ type: string
+ readOnly:
+ default: false
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: azureFile represents an Azure File
+ Service mount on the host and bind mount to
+ the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret
+ that contains Azure Storage Account Name
+ and Key
+ type: string
+ shareName:
+ description: shareName is the azure share
+ Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: cephFS represents a Ceph FS mount
+ on the host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description: 'path is Optional: Used as the
+ mounted root, rather than the full Ceph
+ tree, default is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap
+ that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path
+ within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether the
+ ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface)
+ represents ephemeral storage that is handled
+ by certain external CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API
+ about the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API
+ volume file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field
+ of the pod: only annotations, labels,
+ name, namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the
+ relative path name of the file to
+ be created. Must not be absolute or
+ contain the ''..'' path. Must be utf-8
+ encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env
+ vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ finalizers:
+ items:
+ type: string
+ type: array
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ type: string
+ namespace:
+ type: string
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type
+ of resource being referenced
+ type: string
+ name:
+ description: Name is the name
+ of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type
+ of resource being referenced
+ type: string
+ name:
+ description: Name is the name
+ of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query
+ over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding
+ reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine
+ and then exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun
+ number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target
+ worldwide names (WWNs)'
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description: driver is the name of the driver
+ to use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field
+ holds extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: flocker represents a Flocker volume
+ attached to a kubelet's host machine. This depends
+ on the Flocker control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the
+ dataset. This is unique identifier of a
+ Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for
+ the specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ image:
+ description: |-
+ image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+ The volume is resolved at pod startup depending on which PullPolicy value is provided:
+
+ - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+
+ The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+ A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+ The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+ The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+ The volume will be mounted read-only (ro) and non-executable files (noexec).
+ Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+ The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+ properties:
+ pullPolicy:
+ description: |-
+ Policy for pulling OCI objects. Possible values are:
+ Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ type: string
+ reference:
+ description: |-
+ Required: Image or artifact reference to be used.
+ Behaves in the same way as pod.spec.containers[*].image.
+ Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether
+ support iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether
+ support iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified
+ Name.
+ type: string
+ iscsiInterface:
+ default: default
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun
+ number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret
+ for iSCSI target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: photonPersistentDisk represents a
+ PhotonController persistent disk attached and
+ mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies
+ Photon Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: portworxVolume represents a portworx
+ volume attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies
+ a Portworx volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: |-
+ sources is the list of volume projections. Each entry in this list
+ handles one source.
+ items:
+ description: |-
+ Projection that may be projected along with other supported volume types.
+ Exactly one of these fields must be set.
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from
+ the volume root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about
+ the configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key
+ to a path within a volume.
+ properties:
+ key:
+ description: key is the key
+ to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional specify whether
+ the ConfigMap or its keys must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information
+ about the downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of
+ DownwardAPIVolume file
+ items:
+ description: DownwardAPIVolumeFile
+ represents information to create
+ the file containing the pod
+ field
+ properties:
+ fieldRef:
+ description: 'Required: Selects
+ a field of the pod: only
+ annotations, labels, name,
+ namespace and uid are supported.'
+ properties:
+ apiVersion:
+ description: Version of
+ the schema the FieldPath
+ is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the
+ field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path
+ is the relative path name
+ of the file to be created.
+ Must not be absolute or
+ contain the ''..'' path.
+ Must be utf-8 encoded. The
+ first item of the relative
+ path must not start with
+ ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container
+ name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies
+ the output format of
+ the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required:
+ resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description: secret information about
+ the secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key
+ to a path within a volume.
+ properties:
+ key:
+ description: key is the key
+ to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ optional:
+ description: optional field specify
+ whether the Secret or its key
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is
+ information about the serviceAccountToken
+ data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description: quobyte represents a Quobyte mount
+ on the host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ default: /etc/ceph/keyring
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ default: rbd
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ default: admin
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ default: xfs
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of
+ the ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name
+ of the ScaleIO Protection Domain for the
+ configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable
+ SSL communication with Gateway, default
+ false
+ type: boolean
+ storageMode:
+ default: ThinProvisioned
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage
+ system as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path
+ within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description: optional field specify whether
+ the Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: storageOS represents a StorageOS
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: vsphereVolume represents a vSphere
+ volume attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage
+ Policy Based Management (SPBM) profile ID
+ associated with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage
+ Policy Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - containers
+ type: object
+ type: object
+ topologyRequest:
+ description: topologyRequest defines the topology request for
+ the PodSet.
+ properties:
+ preferred:
+ description: |-
+ preferred indicates the topology level preferred by the PodSet, as
+ indicated by the `kueue.x-k8s.io/podset-preferred-topology` PodSet
+ annotation.
+ type: string
+ required:
+ description: |-
+ required indicates the topology level required by the PodSet, as
+ indicated by the `kueue.x-k8s.io/podset-required-topology` PodSet
+ annotation.
+ type: string
+ type: object
+ required:
+ - count
+ - template
+ type: object
+ x-kubernetes-validations:
+ - message: minCount should be positive and less or equal to count
+ rule: 'has(self.minCount) ? self.minCount <= self.count : true'
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ priority:
+ description: |-
+ Priority determines the order of access to the resources managed by the
+ ClusterQueue where the workload is queued.
+ The priority value is populated from PriorityClassName.
+ The higher the value, the higher the priority.
+ If priorityClassName is specified, priority must not be null.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the workload's priority.
+ "system-node-critical" and "system-cluster-critical" are two special
+ keywords which indicate the highest priorities with the former being
+ the highest priority. Any other name must be defined by creating a
+ PriorityClass object with that name. If not specified, the workload
+ priority will be default or zero if there is no default.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ priorityClassSource:
+ default: ""
+ description: |-
+ priorityClassSource determines whether the priorityClass field refers to a pod PriorityClass or kueue.x-k8s.io/workloadpriorityclass.
+ Workload's PriorityClass can accept the name of a pod priorityClass or a workloadPriorityClass.
+ When using pod PriorityClass, a priorityClassSource field has the scheduling.k8s.io/priorityclass value.
+ enum:
+ - kueue.x-k8s.io/workloadpriorityclass
+ - scheduling.k8s.io/priorityclass
+ - ""
+ type: string
+ queueName:
+ description: |-
+ queueName is the name of the LocalQueue the Workload is associated with.
+ queueName cannot be changed while .status.admission is not null.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ required:
+ - podSets
+ type: object
+ x-kubernetes-validations:
+ - message: priority should not be nil when priorityClassName is set
+ rule: 'has(self.priorityClassName) ? has(self.priority) : true'
+ status:
+ description: WorkloadStatus defines the observed state of Workload
+ properties:
+ accumulatedPastExexcutionTimeSeconds:
+ description: |-
+ accumulatedPastExexcutionTimeSeconds holds the total time, in seconds, the workload spent
+ in Admitted state, in the previous `Admit` - `Evict` cycles.
+ format: int32
+ type: integer
+ admission:
+ description: |-
+ admission holds the parameters of the admission of the workload by a
+ ClusterQueue. admission can be set back to null, but its fields cannot be
+ changed once set.
+ properties:
+ clusterQueue:
+ description: clusterQueue is the name of the ClusterQueue that
+ admitted this workload.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ podSetAssignments:
+ description: PodSetAssignments hold the admission results for
+ each of the .spec.podSets entries.
+ items:
+ properties:
+ count:
+ description: |-
+ count is the number of pods taken into account at admission time.
+ This field will not change in case of quota reclaim.
+ Value could be missing for Workloads created before this field was added,
+ in that case spec.podSets[*].count value will be used.
+ format: int32
+ minimum: 0
+ type: integer
+ flavors:
+ additionalProperties:
+ description: ResourceFlavorReference is the name of the
+ ResourceFlavor.
+ maxLength: 253
+ pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
+ type: string
+ description: Flavors are the flavors assigned to the workload
+ for each resource.
+ type: object
+ name:
+ default: main
+ description: Name is the name of the podSet. It should match
+ one of the names in .spec.podSets.
+ maxLength: 63
+ pattern: ^(?i)[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ resourceUsage:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ resourceUsage keeps track of the total resources all the pods in the podset need to run.
+
+ Beside what is provided in podSet's specs, this calculation takes into account
+ the LimitRange defaults and RuntimeClass overheads at the moment of admission.
+ This field will not change in case of quota reclaim.
+ type: object
+ topologyAssignment:
+ description: |-
+ topologyAssignment indicates the topology assignment divided into
+ topology domains corresponding to the lowest level of the topology.
+ The assignment specifies the number of Pods to be scheduled per topology
+ domain and specifies the node selectors for each topology domain, in the
+ following way: the node selector keys are specified by the levels field
+ (same for all domains), and the corresponding node selector value is
+ specified by the domains.values subfield.
+
+ Example:
+
+ topologyAssignment:
+ levels:
+ - cloud.provider.com/topology-block
+ - cloud.provider.com/topology-rack
+ domains:
+ - values: [block-1, rack-1]
+ count: 4
+ - values: [block-1, rack-2]
+ count: 2
+
+ Here:
+ - 4 Pods are to be scheduled on nodes matching the node selector:
+ cloud.provider.com/topology-block: block-1
+ cloud.provider.com/topology-rack: rack-1
+ - 2 Pods are to be scheduled on nodes matching the node selector:
+ cloud.provider.com/topology-block: block-1
+ cloud.provider.com/topology-rack: rack-2
+ properties:
+ domains:
+ description: |-
+ domains is a list of topology assignments split by topology domains at
+ the lowest level of the topology.
+ items:
+ properties:
+ count:
+ description: |-
+ count indicates the number of Pods to be scheduled in the topology
+ domain indicated by the values field.
+ format: int32
+ minimum: 1
+ type: integer
+ values:
+ description: |-
+ values is an ordered list of node selector values describing a topology
+ domain. The values correspond to the consecutive topology levels, from
+ the highest to the lowest.
+ items:
+ type: string
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - count
+ - values
+ type: object
+ type: array
+ levels:
+ description: |-
+ levels is an ordered list of keys denoting the levels of the assigned
+ topology (i.e. node label keys), from the highest to the lowest level of
+ the topology.
+ items:
+ type: string
+ maxItems: 8
+ minItems: 1
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - domains
+ - levels
+ type: object
+ required:
+ - name
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ required:
+ - clusterQueue
+ - podSetAssignments
+ type: object
+ admissionChecks:
+ description: admissionChecks list all the admission checks required
+ by the workload and the current status
+ items:
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ name:
+ description: name identifies the admission check.
+ maxLength: 316
+ type: string
+ podSetUpdates:
+ items:
+ description: |-
+ PodSetUpdate contains a list of pod set modifications suggested by AdmissionChecks.
+ The modifications should be additive only - modifications of already existing keys
+ or having the same key provided by multiple AdmissionChecks is not allowed and will
+ result in failure during workload admission.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ name:
+ description: Name of the PodSet to modify. Should match
+ to one of the Workload's PodSets.
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ type: object
+ tolerations:
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-validations:
+ - message: operator must be Exists when 'key' is empty,
+ which means 'match all values and all keys'
+ rule: 'self.all(x, !has(x.key) ? x.operator == ''Exists''
+ : true)'
+ - message: effect must be 'NoExecute' when 'tolerationSeconds'
+ is set
+ rule: 'self.all(x, has(x.tolerationSeconds) ? x.effect
+ == ''NoExecute'' : true)'
+ - message: 'supported toleration values: ''Equal''(default),
+ ''Exists'''
+ rule: self.all(x, !has(x.operator) || x.operator in
+ ['Equal', 'Exists'])
+ - message: a value must be empty when 'operator' is 'Exists'
+ rule: 'self.all(x, has(x.operator) && x.operator ==
+ ''Exists'' ? !has(x.value) : true)'
+ - message: 'supported taint effect values: ''NoSchedule'',
+ ''PreferNoSchedule'', ''NoExecute'''
+ rule: self.all(x, !has(x.effect) || x.effect in ['NoSchedule',
+ 'PreferNoSchedule', 'NoExecute'])
+ required:
+ - name
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-type: atomic
+ state:
+ description: state of the admissionCheck, one of Pending, Ready,
+ Retry, Rejected
+ enum:
+ - Pending
+ - Ready
+ - Retry
+ - Rejected
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - name
+ - state
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ conditions:
+ description: |-
+ conditions hold the latest available observations of the Workload
+ current state.
+
+ The type of the condition could be:
+
+ - Admitted: the Workload was admitted through a ClusterQueue.
+ - Finished: the associated workload finished running (failed or succeeded).
+ - PodsReady: at least `.spec.podSets[*].count` Pods are ready or have
+ succeeded.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ reclaimablePods:
+ description: |-
+ reclaimablePods keeps track of the number pods within a podset for which
+ the resource reservation is no longer needed.
+ items:
+ properties:
+ count:
+ description: count is the number of pods for which the requested
+ resources are no longer needed.
+ format: int32
+ minimum: 0
+ type: integer
+ name:
+ description: name is the PodSet name.
+ type: string
+ required:
+ - count
+ - name
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ requeueState:
+ description: |-
+ requeueState holds the re-queue state
+ when a workload meets Eviction with PodsReadyTimeout reason.
+ properties:
+ count:
+ description: |-
+ count records the number of times a workload has been re-queued
+ When a deactivated (`.spec.activate`=`false`) workload is reactivated (`.spec.activate`=`true`),
+ this count would be reset to null.
+ format: int32
+ minimum: 0
+ type: integer
+ requeueAt:
+ description: |-
+ requeueAt records the time when a workload will be re-queued.
+ When a deactivated (`.spec.activate`=`false`) workload is reactivated (`.spec.activate`=`true`),
+ this time would be reset to null.
+ format: date-time
+ type: string
+ type: object
+ resourceRequests:
+ description: |-
+ resourceRequests provides a detailed view of the resources that were
+ requested by a non-admitted workload when it was considered for admission.
+ If admission is non-null, resourceRequests will be empty because
+ admission.resourceUsage contains the detailed information.
+ items:
+ properties:
+ name:
+ default: main
+ description: name is the name of the podSet. It should match
+ one of the names in .spec.podSets.
+ maxLength: 63
+ pattern: ^(?i)[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ type: string
+ resources:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ resources is the total resources all the pods in the podset need to run.
+
+ Beside what is provided in podSet's specs, this value also takes into account
+ the LimitRange defaults and RuntimeClass overheads at the moment of consideration
+ and the application of resource.excludeResourcePrefixes and resource.transformations.
+ type: object
+ required:
+ - name
+ type: object
+ maxItems: 8
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ type: object
+ type: object
+ x-kubernetes-validations:
+ - message: podSetAssignments must have the same number of podSets as the spec
+ rule: 'has(self.status) && has(self.status.conditions) && self.status.conditions.exists(c,
+ c.type == ''QuotaReserved'' && c.status == ''True'') && has(self.status.admission)
+ ? size(self.spec.podSets) == size(self.status.admission.podSetAssignments)
+ : true'
+ - message: field is immutable
+ rule: '(has(oldSelf.status) && has(oldSelf.status.conditions) && oldSelf.status.conditions.exists(c,
+ c.type == ''QuotaReserved'' && c.status == ''True'')) ? (oldSelf.spec.priorityClassSource
+ == self.spec.priorityClassSource) : true'
+ - message: field is immutable
+ rule: '(has(oldSelf.status) && has(oldSelf.status.conditions) && oldSelf.status.conditions.exists(c,
+ c.type == ''QuotaReserved'' && c.status == ''True'') && has(oldSelf.spec.priorityClassName)
+ && has(self.spec.priorityClassName)) ? (oldSelf.spec.priorityClassName
+ == self.spec.priorityClassName) : true'
+ - message: field is immutable
+ rule: '(has(oldSelf.status) && has(oldSelf.status.conditions) && oldSelf.status.conditions.exists(c,
+ c.type == ''QuotaReserved'' && c.status == ''True'')) && (has(self.status)
+ && has(self.status.conditions) && self.status.conditions.exists(c, c.type
+ == ''QuotaReserved'' && c.status == ''True'')) && has(oldSelf.spec.queueName)
+ && has(self.spec.queueName) ? oldSelf.spec.queueName == self.spec.queueName
+ : true'
+ - message: maximumExecutionTimeSeconds is immutable while admitted
+ rule: ((has(oldSelf.status) && has(oldSelf.status.conditions) && oldSelf.status.conditions.exists(c,
+ c.type == 'Admitted' && c.status == 'True')) && (has(self.status) && has(self.status.conditions)
+ && self.status.conditions.exists(c, c.type == 'Admitted' && c.status ==
+ 'True')))?((has(oldSelf.spec.maximumExecutionTimeSeconds)?oldSelf.spec.maximumExecutionTimeSeconds:0)
+ == (has(self.spec.maximumExecutionTimeSeconds)?self.spec.maximumExecutionTimeSeconds:0)):true
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-controller-manager
+ namespace: kueue-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-leader-election-role
+ namespace: kueue-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-batch-admin-role
+---
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.kueue.x-k8s.io/batch-user: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-batch-user-role
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-clusterqueue-editor-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - clusterqueues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - clusterqueues/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-clusterqueue-viewer-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - clusterqueues
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - clusterqueues/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-job-editor-role
+rules:
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-job-viewer-role
+rules:
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-jobset-editor-role
+rules:
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-jobset-viewer-role
+rules:
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-localqueue-editor-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - localqueues
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - localqueues/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-localqueue-viewer-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - localqueues
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - localqueues/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-manager-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - limitranges
+ - namespaces
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/finalizers
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - get
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - podtemplates
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingadmissionpolicies
+ - validatingadmissionpolicybindings
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - autoscaling.x-k8s.io
+ resources:
+ - provisioningrequests
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - autoscaling.x-k8s.io
+ resources:
+ - provisioningrequests/status
+ verbs:
+ - get
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs/finalizers
+ - jobs/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - flowcontrol.apiserver.k8s.io
+ resources:
+ - flowschemas
+ - prioritylevelconfigurations
+ verbs:
+ - list
+ - watch
+- apiGroups:
+ - flowcontrol.apiserver.k8s.io
+ resources:
+ - flowschemas/status
+ verbs:
+ - patch
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets/finalizers
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - jobset.x-k8s.io
+ resources:
+ - jobsets/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs
+ - mxjobs
+ - paddlejobs
+ - pytorchjobs
+ - tfjobs
+ - xgboostjobs
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs/finalizers
+ - mxjobs/finalizers
+ - mxjobs/status
+ - paddlejobs/finalizers
+ - pytorchjobs/finalizers
+ - tfjobs/finalizers
+ - xgboostjobs/finalizers
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs/status
+ - paddlejobs/status
+ - pytorchjobs/status
+ - tfjobs/status
+ - xgboostjobs/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - admissionchecks
+ - clusterqueues
+ - cohorts
+ - localqueues
+ - workloads
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - admissionchecks/finalizers
+ - clusterqueues/finalizers
+ - localqueues/finalizers
+ - resourceflavors/finalizers
+ - workloads/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - admissionchecks/status
+ - clusterqueues/status
+ - localqueues/status
+ - multikueueclusters/status
+ - workloads/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - multikueueclusters
+ - multikueueconfigs
+ - provisioningrequestconfigs
+ - topologies
+ - workloadpriorityclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - resourceflavors
+ verbs:
+ - delete
+ - get
+ - list
+ - update
+ - watch
+- apiGroups:
+ - node.k8s.io
+ resources:
+ - runtimeclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ray.io
+ resources:
+ - rayclusters
+ - rayjobs
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ray.io
+ resources:
+ - rayclusters/finalizers
+ - rayclusters/status
+ - rayjobs/finalizers
+ - rayjobs/status
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - scheduling.k8s.io
+ resources:
+ - priorityclasses
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-metrics-reader
+rules:
+- nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-mpijob-editor-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-mpijob-viewer-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mpijobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-mxjob-editor-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mxjobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mxjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-mxjob-viewer-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mxjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - mxjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-paddlejob-editor-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - paddlejobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - paddlejobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-paddlejob-viewer-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - paddlejobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - paddlejobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-pending-workloads-cq-viewer-role
+rules:
+- apiGroups:
+ - visibility.kueue.x-k8s.io
+ resources:
+ - clusterqueues/pendingworkloads
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-pending-workloads-lq-viewer-role
+rules:
+- apiGroups:
+ - visibility.kueue.x-k8s.io
+ resources:
+ - localqueues/pendingworkloads
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-pytorchjob-editor-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - pytorchjobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - pytorchjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-pytorchjob-viewer-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - pytorchjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - pytorchjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-raycluster-editor-role
+rules:
+- apiGroups:
+ - ray.io
+ resources:
+ - rayclusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ray.io
+ resources:
+ - rayclusters/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-raycluster-viewer-role
+rules:
+- apiGroups:
+ - ray.io
+ resources:
+ - rayclusters
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ray.io
+ resources:
+ - rayclusters/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-rayjob-editor-role
+rules:
+- apiGroups:
+ - ray.io
+ resources:
+ - rayjobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ray.io
+ resources:
+ - rayjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-rayjob-viewer-role
+rules:
+- apiGroups:
+ - ray.io
+ resources:
+ - rayjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ray.io
+ resources:
+ - rayjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-resourceflavor-editor-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - resourceflavors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-resourceflavor-viewer-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - resourceflavors
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-tfjob-editor-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - tfjobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - tfjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-tfjob-viewer-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - tfjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - tfjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ name: kueue-workload-editor-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - workloads
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - workloads/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-workload-viewer-role
+rules:
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - workloads
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kueue.x-k8s.io
+ resources:
+ - workloads/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-xgboostjob-editor-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - xgboostjobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - xgboostjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ rbac.kueue.x-k8s.io/batch-admin: "true"
+ rbac.kueue.x-k8s.io/batch-user: "true"
+ name: kueue-xgboostjob-viewer-role
+rules:
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - xgboostjobs
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubeflow.org
+ resources:
+ - xgboostjobs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-visibility-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: kueue-controller-manager
+ namespace: kueue-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-leader-election-rolebinding
+ namespace: kueue-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kueue-leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: kueue-controller-manager
+ namespace: kueue-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kueue-manager-role
+subjects:
+- kind: ServiceAccount
+ name: kueue-controller-manager
+ namespace: kueue-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kueue-proxy-role
+subjects:
+- kind: ServiceAccount
+ name: kueue-controller-manager
+ namespace: kueue-system
+---
+apiVersion: v1
+data:
+ controller_manager_config.yaml: |
+ apiVersion: config.kueue.x-k8s.io/v1beta1
+ kind: Configuration
+ health:
+ healthProbeBindAddress: :8081
+ metrics:
+ bindAddress: :8080
+ # enableClusterQueueResources: true
+ webhook:
+ port: 9443
+ leaderElection:
+ leaderElect: true
+ resourceName: c1f6bfd2.kueue.x-k8s.io
+ controller:
+ groupKindConcurrency:
+ Job.batch: 5
+ Pod: 5
+ Workload.kueue.x-k8s.io: 5
+ LocalQueue.kueue.x-k8s.io: 1
+ Cohort.kueue.x-k8s.io: 1
+ ClusterQueue.kueue.x-k8s.io: 1
+ ResourceFlavor.kueue.x-k8s.io: 1
+ clientConnection:
+ qps: 50
+ burst: 100
+ #pprofBindAddress: :8083
+ #waitForPodsReady:
+ # enable: false
+ # timeout: 5m
+ # blockAdmission: false
+ # requeuingStrategy:
+ # timestamp: Eviction
+ # backoffLimitCount: null # null indicates infinite requeuing
+ # backoffBaseSeconds: 60
+ # backoffMaxSeconds: 3600
+ #manageJobsWithoutQueueName: true
+ #internalCertManagement:
+ # enable: false
+ # webhookServiceName: ""
+ # webhookSecretName: ""
+ integrations:
+ frameworks:
+ - "batch/job"
+ - "kubeflow.org/mpijob"
+ - "ray.io/rayjob"
+ - "ray.io/raycluster"
+ - "jobset.x-k8s.io/jobset"
+ - "kubeflow.org/mxjob"
+ - "kubeflow.org/paddlejob"
+ - "kubeflow.org/pytorchjob"
+ - "kubeflow.org/tfjob"
+ - "kubeflow.org/xgboostjob"
+ # - "pod"
+ # - "deployment" # requires enabling pod integration
+ # - "statefulset" # requires enabling pod integration
+ # externalFrameworks:
+ # - "Foo.v1.example.com"
+ # podOptions:
+ # namespaceSelector:
+ # matchExpressions:
+ # - key: kubernetes.io/metadata.name
+ # operator: NotIn
+ # values: [ kube-system, kueue-system ]
+ #fairSharing:
+ # enable: true
+ # preemptionStrategies: [LessThanOrEqualToFinalShare, LessThanInitialShare]
+ #resources:
+ # excludeResourcePrefixes: []
+ # transformations:
+ # - input: nvidia.com/mig-4g.5gb
+ # strategy: Replace | Retain
+ # outputs:
+ # example.com/accelerator-memory: 5Gi
+ # example.com/accelerator-gpc: 4
+kind: ConfigMap
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-manager-config
+ namespace: kueue-system
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-webhook-server-cert
+ namespace: kueue-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-controller-manager-metrics-service
+ namespace: kueue-system
+spec:
+ ports:
+ - name: https
+ port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-visibility-server
+ namespace: kueue-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 8082
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-webhook-service
+ namespace: kueue-system
+spec:
+ ports:
+ - port: 443
+ protocol: TCP
+ targetPort: 9443
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-controller-manager
+ namespace: kueue-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ template:
+ metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ spec:
+ containers:
+ - args:
+ - --config=/controller_manager_config.yaml
+ - --zap-log-level=2
+ - --feature-gates=TopologyAwareScheduling=true
+ command:
+ - /manager
+ image: registry.k8s.io/kueue/kueue:v0.9.1
+ imagePullPolicy: Always
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ name: manager
+ ports:
+ - containerPort: 8082
+ name: visibility
+ protocol: TCP
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ requests:
+ cpu: 500m
+ memory: 512Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ volumeMounts:
+ - mountPath: /tmp/k8s-webhook-server/serving-certs
+ name: cert
+ readOnly: true
+ - mountPath: /controller_manager_config.yaml
+ name: manager-config
+ subPath: controller_manager_config.yaml
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --logtostderr=true
+ - --v=10
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: kueue-controller-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - name: cert
+ secret:
+ defaultMode: 420
+ secretName: kueue-webhook-server-cert
+ - configMap:
+ name: kueue-manager-config
+ name: manager-config
+ tolerations:
+ - effect: NoSchedule
+ key: components.gke.io/gke-managed-components
+ operator: Equal
+ value: "true"
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: v1alpha1.visibility.kueue.x-k8s.io
+spec:
+ group: visibility.kueue.x-k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: kueue-visibility-server
+ namespace: kueue-system
+ version: v1alpha1
+ versionPriority: 100
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: v1beta1.visibility.kueue.x-k8s.io
+spec:
+ group: visibility.kueue.x-k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: kueue-visibility-server
+ namespace: kueue-system
+ version: v1beta1
+ versionPriority: 100
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate--v1-pod
+ failurePolicy: Fail
+ name: mpod.kb.io
+ namespaceSelector:
+ matchExpressions:
+ - key: kubernetes.io/metadata.name
+ operator: NotIn
+ values:
+ - kube-system
+ - kueue-system
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - pods
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-apps-v1-deployment
+ failurePolicy: Fail
+ name: mdeployment.kb.io
+ namespaceSelector:
+ matchExpressions:
+ - key: kubernetes.io/metadata.name
+ operator: NotIn
+ values:
+ - kube-system
+ - kueue-system
+ rules:
+ - apiGroups:
+ - apps
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - deployments
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-batch-v1-job
+ failurePolicy: Fail
+ name: mjob.kb.io
+ rules:
+ - apiGroups:
+ - batch
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - jobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-jobset-x-k8s-io-v1alpha2-jobset
+ failurePolicy: Fail
+ name: mjobset.kb.io
+ rules:
+ - apiGroups:
+ - jobset.x-k8s.io
+ apiVersions:
+ - v1alpha2
+ operations:
+ - CREATE
+ resources:
+ - jobsets
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kubeflow-org-v1-mxjob
+ failurePolicy: Fail
+ name: mmxjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - mxjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kubeflow-org-v1-paddlejob
+ failurePolicy: Fail
+ name: mpaddlejob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - paddlejobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kubeflow-org-v1-pytorchjob
+ failurePolicy: Fail
+ name: mpytorchjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - pytorchjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kubeflow-org-v1-tfjob
+ failurePolicy: Fail
+ name: mtfjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - tfjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kubeflow-org-v1-xgboostjob
+ failurePolicy: Fail
+ name: mxgboostjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - xgboostjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kubeflow-org-v2beta1-mpijob
+ failurePolicy: Fail
+ name: mmpijob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v2beta1
+ operations:
+ - CREATE
+ resources:
+ - mpijobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-ray-io-v1-raycluster
+ failurePolicy: Fail
+ name: mraycluster.kb.io
+ rules:
+ - apiGroups:
+ - ray.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - rayclusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-ray-io-v1-rayjob
+ failurePolicy: Fail
+ name: mrayjob.kb.io
+ rules:
+ - apiGroups:
+ - ray.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - rayjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-apps-v1-statefulset
+ failurePolicy: Fail
+ name: mstatefulset.kb.io
+ rules:
+ - apiGroups:
+ - apps
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - statefulsets
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kueue-x-k8s-io-v1beta1-clusterqueue
+ failurePolicy: Fail
+ name: mclusterqueue.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ resources:
+ - clusterqueues
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kueue-x-k8s-io-v1beta1-resourceflavor
+ failurePolicy: Fail
+ name: mresourceflavor.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ resources:
+ - resourceflavors
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /mutate-kueue-x-k8s-io-v1beta1-workload
+ failurePolicy: Fail
+ name: mworkload.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ resources:
+ - workloads
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ labels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: kueue
+ control-plane: controller-manager
+ name: kueue-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate--v1-pod
+ failurePolicy: Fail
+ name: vpod.kb.io
+ namespaceSelector:
+ matchExpressions:
+ - key: kubernetes.io/metadata.name
+ operator: NotIn
+ values:
+ - kube-system
+ - kueue-system
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - pods
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-apps-v1-deployment
+ failurePolicy: Fail
+ name: vdeployment.kb.io
+ namespaceSelector:
+ matchExpressions:
+ - key: kubernetes.io/metadata.name
+ operator: NotIn
+ values:
+ - kube-system
+ - kueue-system
+ rules:
+ - apiGroups:
+ - apps
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - deployments
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-batch-v1-job
+ failurePolicy: Fail
+ name: vjob.kb.io
+ rules:
+ - apiGroups:
+ - batch
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - jobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-jobset-x-k8s-io-v1alpha2-jobset
+ failurePolicy: Fail
+ name: vjobset.kb.io
+ rules:
+ - apiGroups:
+ - jobset.x-k8s.io
+ apiVersions:
+ - v1alpha2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - jobsets
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kubeflow-org-v1-mxjob
+ failurePolicy: Fail
+ name: vmxjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - mxjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kubeflow-org-v1-paddlejob
+ failurePolicy: Fail
+ name: vpaddlejob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - paddlejobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kubeflow-org-v1-pytorchjob
+ failurePolicy: Fail
+ name: vpytorchjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - pytorchjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kubeflow-org-v1-tfjob
+ failurePolicy: Fail
+ name: vtfjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - tfjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kubeflow-org-v1-xgboostjob
+ failurePolicy: Fail
+ name: vxgboostjob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - xgboostjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kubeflow-org-v2beta1-mpijob
+ failurePolicy: Fail
+ name: vmpijob.kb.io
+ rules:
+ - apiGroups:
+ - kubeflow.org
+ apiVersions:
+ - v2beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - mpijobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-ray-io-v1-raycluster
+ failurePolicy: Fail
+ name: vraycluster.kb.io
+ rules:
+ - apiGroups:
+ - ray.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rayclusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-ray-io-v1-rayjob
+ failurePolicy: Fail
+ name: vrayjob.kb.io
+ rules:
+ - apiGroups:
+ - ray.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - rayjobs
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-apps-v1-statefulset
+ failurePolicy: Fail
+ name: vstatefulset.kb.io
+ rules:
+ - apiGroups:
+ - apps
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - statefulsets
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kueue-x-k8s-io-v1beta1-clusterqueue
+ failurePolicy: Fail
+ name: vclusterqueue.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusterqueues
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kueue-x-k8s-io-v1alpha1-cohort
+ failurePolicy: Fail
+ name: vcohort.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1alpha1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - cohorts
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kueue-x-k8s-io-v1beta1-resourceflavor
+ failurePolicy: Fail
+ name: vresourceflavor.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - resourceflavors
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: kueue-webhook-service
+ namespace: kueue-system
+ path: /validate-kueue-x-k8s-io-v1beta1-workload
+ failurePolicy: Fail
+ name: vworkload.kb.io
+ rules:
+ - apiGroups:
+ - kueue.x-k8s.io
+ apiVersions:
+ - v1beta1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - workloads
+ - workloads/status
+ sideEffects: None
diff --git a/modules/management/kubectl-apply/variables.tf b/modules/management/kubectl-apply/variables.tf
index 5f78c81927..c493332e7c 100644
--- a/modules/management/kubectl-apply/variables.tf
+++ b/modules/management/kubectl-apply/variables.tf
@@ -15,14 +15,24 @@
*/
locals {
- supported_versions = ["v0.9.0", "v0.8.1"]
+ kueue_supported_versions = ["v0.9.1", "v0.9.0", "v0.8.1"]
+ jobset_supported_versions = ["v0.7.1", "v0.5.2"]
}
resource "terraform_data" "kueue_validations" {
lifecycle {
precondition {
- condition = !var.kueue.install || contains(local.supported_versions, var.kueue.version)
- error_message = "Supported version of Kueue are ${join(", ", local.supported_versions)}"
+ condition = !var.kueue.install || contains(local.kueue_supported_versions, var.kueue.version)
+ error_message = "Supported version of Kueue are ${join(", ", local.kueue_supported_versions)}"
+ }
+ }
+}
+
+resource "terraform_data" "jobset_validations" {
+ lifecycle {
+ precondition {
+ condition = !var.jobset.install || contains(local.jobset_supported_versions, var.jobset.version)
+ error_message = "Supported version of Jobset are ${join(", ", local.jobset_supported_versions)}"
}
}
}
@@ -60,7 +70,6 @@ variable "kueue" {
config_template_vars = optional(map(any), null)
})
default = {}
-
}
variable "jobset" {
@@ -70,9 +79,4 @@ variable "jobset" {
version = optional(string, "v0.5.2")
})
default = {}
-
- validation {
- condition = !var.jobset.install || contains(["v0.5.2"], var.jobset.version)
- error_message = "Supported version of Jobset is v0.5.2"
- }
}
diff --git a/modules/monitoring/dashboard/versions.tf b/modules/monitoring/dashboard/versions.tf
index 74189a3b70..ad1eb07abf 100644
--- a/modules/monitoring/dashboard/versions.tf
+++ b/modules/monitoring/dashboard/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:dashboard/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:dashboard/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/modules/network/firewall-rules/versions.tf b/modules/network/firewall-rules/versions.tf
index 7d7b6bd037..fefc244b8f 100644
--- a/modules/network/firewall-rules/versions.tf
+++ b/modules/network/firewall-rules/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:firewall-rules/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:firewall-rules/v1.44.0"
}
required_version = ">= 1.3"
diff --git a/modules/network/gpu-rdma-vpc/README.md b/modules/network/gpu-rdma-vpc/README.md
new file mode 100644
index 0000000000..d5b8f5c894
--- /dev/null
+++ b/modules/network/gpu-rdma-vpc/README.md
@@ -0,0 +1,143 @@
+## Description
+
+This module accomplishes the following:
+
+* Creates one [VPC network][cft-network]
+ * Each VPC contains a variable number of subnetworks as specified in the
+ `subnetworks_template` variable
+ * Each subnetwork contains distinct IP address ranges
+* Outputs the following unique parameters
+ * `subnetwork_interfaces` which is compatible with Slurm and vm-instance
+ modules
+ * `subnetwork_interfaces_gke` which is compatible with GKE modules
+
+This module is a simplified version of the VPC module and its main difference
+is the variable `subnetwork_template` which is the template for all subnetworks
+created within the network. This template contains the following values:
+
+1. `count`: The number of subnetworks to be created
+1. `name_prefix`: The prefix for the subnetwork names
+1. `ip_range`: [CIDR-formatted IP range][cidr]
+1. `region`: The region where the subnetwork will be deployed
+
+> [!WARNING]
+> The `ip_range` should be always be large enough to split into `count`
+> subnetworks and the number of required connections within.
+
+[cft-network]: https://github.com/terraform-google-modules/terraform-google-network/tree/v10.0.0
+[cidr]: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation
+
+### Example
+
+This snippet uses the gpu-vpc module to create a new VPC network named
+`test-rdma-net` with 8 subnetworks named `test-mrdma-sub-#` where # ranges from
+0 to 7. The subnetworks will split the `ip_range` evenly, starting from bit 16
+(0 indexed). The networks are ingested by the Slurm nodeset within the
+`additional_networks` setting.
+
+```yaml
+ - id: rdma-net
+ source: modules/network/gpu-rdma-vpc
+ settings:
+ network_name: test-rdma-net
+ network_profile: https://www.googleapis.com/compute/beta/projects/$(vars.project_id)/global/networkProfiles/$(vars.zone)-vpc-roce
+ network_routing_mode: REGIONAL
+ subnetworks_template:
+ name_prefix: test-mrdma-sub
+ count: 8
+ ip_range: 192.168.0.0/16
+ region: $(vars.region)
+
+ - id: a3_nodeset
+ source: community/modules/compute/schedmd-slurm-gcp-v6-nodeset
+ use: [network0]
+ settings:
+ machine_type: a3-ultragpu-8g
+ additional_networks:
+ $(concat(
+ [{
+ network=null,
+ subnetwork=network1.subnetwork_self_link,
+ subnetwork_project=vars.project_id,
+ nic_type="GVNIC",
+ queue_count=null,
+ network_ip="",
+ stack_type=null,
+ access_config=[],
+ ipv6_access_config=[],
+ alias_ip_range=[]
+ }],
+ rdma-net.subnetwork_interfaces
+ ))
+ ...
+```
+
+## License
+
+
+Copyright 2022 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 0.15.0 |
+
+## Providers
+
+No providers.
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [vpc](#module\_vpc) | terraform-google-modules/network/google | ~> 10.0 |
+
+## Resources
+
+No resources.
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [delete\_default\_internet\_gateway\_routes](#input\_delete\_default\_internet\_gateway\_routes) | If set, ensure that all routes within the network specified whose names begin with 'default-route' and with a next hop of 'default-internet-gateway' are deleted | `bool` | `false` | no |
+| [deployment\_name](#input\_deployment\_name) | The name of the current deployment | `string` | n/a | yes |
+| [enable\_internal\_traffic](#input\_enable\_internal\_traffic) | Enable a firewall rule to allow all internal TCP, UDP, and ICMP traffic within the network | `bool` | `true` | no |
+| [firewall\_log\_config](#input\_firewall\_log\_config) | Firewall log configuration for Toolkit firewall rules (var.enable\_iap\_ssh\_ingress and others) | `string` | `"DISABLE_LOGGING"` | no |
+| [firewall\_rules](#input\_firewall\_rules) | List of firewall rules | `any` | `[]` | no |
+| [mtu](#input\_mtu) | The network MTU (default: 8896). Recommended values: 0 (use Compute Engine default), 1460 (default outside HPC environments), 1500 (Internet default), or 8896 (for Jumbo packets). Allowed are all values in the range 1300 to 8896, inclusively. | `number` | `8896` | no |
+| [network\_description](#input\_network\_description) | An optional description of this resource (changes will trigger resource destroy/create) | `string` | `""` | no |
+| [network\_name](#input\_network\_name) | The name of the network to be created (if unsupplied, will default to "{deployment\_name}-net") | `string` | `null` | no |
+| [network\_profile](#input\_network\_profile) | A full or partial URL of the network profile to apply to this network.
This field can be set only at resource creation time. For example, the
following are valid URLs:
- https://www.googleapis.com/compute/beta/projects/{projectId}/global/networkProfiles/{network_profile_name}
- projects/{projectId}/global/networkProfiles/{network\_profile\_name}} | `string` | n/a | yes |
+| [network\_routing\_mode](#input\_network\_routing\_mode) | The network routing mode (default "REGIONAL") | `string` | `"REGIONAL"` | no |
+| [nic\_type](#input\_nic\_type) | NIC type for use in modules that use the output | `string` | `"MRDMA"` | no |
+| [project\_id](#input\_project\_id) | Project in which the HPC deployment will be created | `string` | n/a | yes |
+| [region](#input\_region) | The default region for Cloud resources | `string` | n/a | yes |
+| [shared\_vpc\_host](#input\_shared\_vpc\_host) | Makes this project a Shared VPC host if 'true' (default 'false') | `bool` | `false` | no |
+| [subnetworks\_template](#input\_subnetworks\_template) | Specifications for the subnetworks that will be created within this VPC.
count (number, required, number of subnets to create, default is 8)
name\_prefix (string, required, subnet name prefix, default is deployment name)
ip\_range (string, required, range of IPs for all subnets to share (CIDR format), default is 192.168.0.0/16)
region (string, optional, region to deploy subnets to, defaults to vars.region) | object({
count = number
name_prefix = string
ip_range = string
region = optional(string)
})
| {
"count": 8,
"ip_range": "192.168.0.0/16",
"name_prefix": null,
"region": null
}
| no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [network\_id](#output\_network\_id) | ID of the new VPC network |
+| [network\_name](#output\_network\_name) | Name of the new VPC network |
+| [network\_self\_link](#output\_network\_self\_link) | Self link of the new VPC network |
+| [subnetwork\_interfaces](#output\_subnetwork\_interfaces) | Full list of subnetwork objects belonging to the new VPC network (compatible with vm-instance and Slurm modules) |
+| [subnetwork\_interfaces\_gke](#output\_subnetwork\_interfaces\_gke) | Full list of subnetwork objects belonging to the new VPC network (compatible with gke-node-pool) |
+| [subnetwork\_name\_prefix](#output\_subnetwork\_name\_prefix) | Prefix of the RDMA subnetwork names |
+| [subnetworks](#output\_subnetworks) | Full list of subnetwork objects belonging to the new VPC network |
+
diff --git a/modules/network/gpu-rdma-vpc/main.tf b/modules/network/gpu-rdma-vpc/main.tf
new file mode 100644
index 0000000000..93f4fb9dd3
--- /dev/null
+++ b/modules/network/gpu-rdma-vpc/main.tf
@@ -0,0 +1,114 @@
+/**
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+locals {
+ autoname = replace(var.deployment_name, "_", "-")
+ network_name = var.network_name == null ? "${local.autoname}-net" : var.network_name
+ subnet_prefix = var.subnetworks_template.name_prefix == null ? "${local.autoname}-subnet" : var.subnetworks_template.name_prefix
+
+ new_bits = ceil(log(var.subnetworks_template.count, 2))
+ template_subnetworks = [for i in range(var.subnetworks_template.count) :
+ {
+ subnet_name = "${local.subnet_prefix}-${i}"
+ subnet_region = try(var.subnetworks_template.region, var.region)
+ subnet_ip = cidrsubnet(var.subnetworks_template.ip_range, local.new_bits, i)
+ }
+ ]
+
+ firewall_log_api_values = {
+ "DISABLE_LOGGING" = null
+ "INCLUDE_ALL_METADATA" = { metadata = "INCLUDE_ALL_METADATA" },
+ "EXCLUDE_ALL_METADATA" = { metadata = "EXCLUDE_ALL_METADATA" },
+ }
+ firewall_log_config = lookup(local.firewall_log_api_values, var.firewall_log_config, null)
+
+ allow_internal_traffic = {
+ name = "${local.network_name}-fw-allow-internal-traffic"
+ priority = null
+ description = "allow traffic between nodes of this VPC"
+ direction = "INGRESS"
+ ranges = [var.subnetworks_template.ip_range]
+ source_tags = null
+ source_service_accounts = null
+ target_tags = null
+ target_service_accounts = null
+ allow = [{
+ protocol = "tcp"
+ ports = ["0-65535"]
+ }, {
+ protocol = "udp"
+ ports = ["0-65535"]
+ }, {
+ protocol = "icmp"
+ ports = null
+ },
+ ]
+ deny = []
+ log_config = local.firewall_log_config
+ }
+
+ firewall_rules = concat(
+ var.firewall_rules,
+ var.enable_internal_traffic ? [local.allow_internal_traffic] : [],
+ )
+
+ output_subnets = [
+ for subnet in module.vpc.subnets : {
+ network = null
+ subnetwork = subnet.self_link
+ subnetwork_project = null # will populate from subnetwork_self_link
+ network_ip = null
+ nic_type = var.nic_type
+ stack_type = null
+ queue_count = null
+ access_config = []
+ ipv6_access_config = []
+ alias_ip_range = []
+ }
+ ]
+
+ output_subnets_gke = [
+ for i in range(length(module.vpc.subnets)) : {
+ network = local.network_name
+ subnetwork = local.template_subnetworks[i].subnet_name
+ subnetwork_project = var.project_id
+ network_ip = null
+ nic_type = var.nic_type
+ stack_type = null
+ queue_count = null
+ access_config = []
+ ipv6_access_config = []
+ alias_ip_range = []
+ }
+ ]
+}
+
+module "vpc" {
+ source = "terraform-google-modules/network/google"
+ version = "~> 10.0"
+
+ network_name = local.network_name
+ project_id = var.project_id
+ auto_create_subnetworks = false
+ subnets = local.template_subnetworks
+ routing_mode = var.network_routing_mode
+ mtu = var.mtu
+ description = var.network_description
+ shared_vpc_host = var.shared_vpc_host
+ delete_default_internet_gateway_routes = var.delete_default_internet_gateway_routes
+ firewall_rules = local.firewall_rules
+ network_profile = var.network_profile
+}
diff --git a/modules/network/gpu-rdma-vpc/metadata.yaml b/modules/network/gpu-rdma-vpc/metadata.yaml
new file mode 100644
index 0000000000..4c2f23a8d7
--- /dev/null
+++ b/modules/network/gpu-rdma-vpc/metadata.yaml
@@ -0,0 +1,19 @@
+# Copyright 2023 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+spec:
+ requirements:
+ services:
+ - compute.googleapis.com
diff --git a/modules/network/gpu-rdma-vpc/outputs.tf b/modules/network/gpu-rdma-vpc/outputs.tf
new file mode 100644
index 0000000000..0a21f1d3f2
--- /dev/null
+++ b/modules/network/gpu-rdma-vpc/outputs.tf
@@ -0,0 +1,59 @@
+/**
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+output "network_name" {
+ description = "Name of the new VPC network"
+ value = module.vpc.network_name
+ depends_on = [module.vpc]
+}
+
+output "network_id" {
+ description = "ID of the new VPC network"
+ value = module.vpc.network_id
+ depends_on = [module.vpc]
+}
+
+output "network_self_link" {
+ description = "Self link of the new VPC network"
+ value = module.vpc.network_self_link
+ depends_on = [module.vpc]
+}
+
+output "subnetworks" {
+ description = "Full list of subnetwork objects belonging to the new VPC network"
+ value = module.vpc.subnets
+ depends_on = [module.vpc]
+}
+
+output "subnetwork_interfaces" {
+ description = "Full list of subnetwork objects belonging to the new VPC network (compatible with vm-instance and Slurm modules)"
+ value = local.output_subnets
+ depends_on = [module.vpc]
+}
+
+# The output subnetwork_interfaces is compatible with vm-instance module but not with gke-node-pool
+# See https://github.com/GoogleCloudPlatform/cluster-toolkit/blob/99493df21cecf6a092c45298bf7a45e0343cf622/modules/compute/vm-instance/variables.tf#L220
+# So, we need a separate output that makes the network and subnetwork names available
+output "subnetwork_interfaces_gke" {
+ description = "Full list of subnetwork objects belonging to the new VPC network (compatible with gke-node-pool)"
+ value = local.output_subnets_gke
+ depends_on = [module.vpc]
+}
+
+output "subnetwork_name_prefix" {
+ description = "Prefix of the RDMA subnetwork names"
+ value = var.subnetworks_template.name_prefix
+}
diff --git a/modules/network/gpu-rdma-vpc/variables.tf b/modules/network/gpu-rdma-vpc/variables.tf
new file mode 100644
index 0000000000..6c9be8ee9a
--- /dev/null
+++ b/modules/network/gpu-rdma-vpc/variables.tf
@@ -0,0 +1,162 @@
+/**
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+variable "project_id" {
+ description = "Project in which the HPC deployment will be created"
+ type = string
+}
+
+variable "network_name" {
+ description = "The name of the network to be created (if unsupplied, will default to \"{deployment_name}-net\")"
+ type = string
+ default = null
+}
+
+variable "region" {
+ description = "The default region for Cloud resources"
+ type = string
+}
+
+variable "deployment_name" {
+ description = "The name of the current deployment"
+ type = string
+}
+
+variable "mtu" {
+ type = number
+ description = "The network MTU (default: 8896). Recommended values: 0 (use Compute Engine default), 1460 (default outside HPC environments), 1500 (Internet default), or 8896 (for Jumbo packets). Allowed are all values in the range 1300 to 8896, inclusively."
+ default = 8896
+}
+
+variable "subnetworks_template" {
+ description = <<-EOT
+ Specifications for the subnetworks that will be created within this VPC.
+
+ count (number, required, number of subnets to create, default is 8)
+ name_prefix (string, required, subnet name prefix, default is deployment name)
+ ip_range (string, required, range of IPs for all subnets to share (CIDR format), default is 192.168.0.0/16)
+ region (string, optional, region to deploy subnets to, defaults to vars.region)
+ EOT
+ nullable = false
+ type = object({
+ count = number
+ name_prefix = string
+ ip_range = string
+ region = optional(string)
+ })
+ default = {
+ count = 8
+ name_prefix = null
+ ip_range = "192.168.0.0/16"
+ region = null
+ }
+
+ validation {
+ condition = var.subnetworks_template.count > 0
+ error_message = "Number of subnetworks must be greater than 0"
+ }
+
+ validation {
+ condition = can(cidrhost(var.subnetworks_template.ip_range, 0))
+ error_message = "IP address range must be in CIDR format."
+ }
+}
+
+variable "network_routing_mode" {
+ type = string
+ default = "REGIONAL"
+ description = "The network routing mode (default \"REGIONAL\")"
+
+ validation {
+ condition = contains(["GLOBAL", "REGIONAL"], var.network_routing_mode)
+ error_message = "The network routing mode must either be \"GLOBAL\" or \"REGIONAL\"."
+ }
+}
+
+variable "network_description" {
+ type = string
+ description = "An optional description of this resource (changes will trigger resource destroy/create)"
+ default = ""
+}
+
+variable "shared_vpc_host" {
+ type = bool
+ description = "Makes this project a Shared VPC host if 'true' (default 'false')"
+ default = false
+}
+
+variable "delete_default_internet_gateway_routes" {
+ type = bool
+ description = "If set, ensure that all routes within the network specified whose names begin with 'default-route' and with a next hop of 'default-internet-gateway' are deleted"
+ default = false
+}
+
+variable "enable_internal_traffic" {
+ type = bool
+ description = "Enable a firewall rule to allow all internal TCP, UDP, and ICMP traffic within the network"
+ default = true
+}
+
+variable "firewall_rules" {
+ type = any
+ description = "List of firewall rules"
+ default = []
+}
+
+variable "firewall_log_config" {
+ type = string
+ description = "Firewall log configuration for Toolkit firewall rules (var.enable_iap_ssh_ingress and others)"
+ default = "DISABLE_LOGGING"
+ nullable = false
+
+ validation {
+ condition = contains([
+ "INCLUDE_ALL_METADATA",
+ "EXCLUDE_ALL_METADATA",
+ "DISABLE_LOGGING",
+ ], var.firewall_log_config)
+ error_message = "var.firewall_log_config must be set to \"DISABLE_LOGGING\", or enable logging with \"INCLUDE_ALL_METADATA\" or \"EXCLUDE_ALL_METADATA\""
+ }
+}
+
+variable "network_profile" {
+ description = <<-EOT
+ A full or partial URL of the network profile to apply to this network.
+ This field can be set only at resource creation time. For example, the
+ following are valid URLs:
+ - https://www.googleapis.com/compute/beta/projects/{projectId}/global/networkProfiles/{network_profile_name}
+ - projects/{projectId}/global/networkProfiles/{network_profile_name}}
+ EOT
+ type = string
+ nullable = false
+
+ validation {
+ condition = can(coalesce(var.network_profile))
+ error_message = "var.network_profile must be specified and not an empty string"
+ }
+}
+
+variable "nic_type" {
+ description = "NIC type for use in modules that use the output"
+ type = string
+ nullable = true
+ default = "MRDMA"
+
+ validation {
+ condition = contains(["MRDMA"], var.nic_type)
+ error_message = "The nic_type must be \"MRDMA\"."
+ }
+}
diff --git a/modules/network/gpu-rdma-vpc/versions.tf b/modules/network/gpu-rdma-vpc/versions.tf
new file mode 100644
index 0000000000..71b7106734
--- /dev/null
+++ b/modules/network/gpu-rdma-vpc/versions.tf
@@ -0,0 +1,19 @@
+/**
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+terraform {
+ required_version = ">= 0.15.0"
+}
diff --git a/modules/network/multivpc/README.md b/modules/network/multivpc/README.md
index 605a6a3603..9f65c2b8ab 100644
--- a/modules/network/multivpc/README.md
+++ b/modules/network/multivpc/README.md
@@ -116,6 +116,7 @@ limitations under the License.
| [network\_description](#input\_network\_description) | An optional description of this resource (changes will trigger resource destroy/create) | `string` | `""` | no |
| [network\_interface\_defaults](#input\_network\_interface\_defaults) | The template of the network settings to be used on all vpcs. | object({
network = optional(string)
subnetwork = optional(string)
subnetwork_project = optional(string)
network_ip = optional(string, "")
nic_type = optional(string, "GVNIC")
stack_type = optional(string, "IPV4_ONLY")
queue_count = optional(string)
access_config = optional(list(object({
nat_ip = string
network_tier = string
public_ptr_domain_name = string
})), [])
ipv6_access_config = optional(list(object({
network_tier = string
public_ptr_domain_name = string
})), [])
alias_ip_range = optional(list(object({
ip_cidr_range = string
subnetwork_range_name = string
})), [])
})
| {
"access_config": [],
"alias_ip_range": [],
"ipv6_access_config": [],
"network": null,
"network_ip": "",
"nic_type": "GVNIC",
"queue_count": null,
"stack_type": "IPV4_ONLY",
"subnetwork": null,
"subnetwork_project": null
}
| no |
| [network\_name\_prefix](#input\_network\_name\_prefix) | The base name of the vpcs and their subnets, will be appended with a sequence number | `string` | `""` | no |
+| [network\_profile](#input\_network\_profile) | A full or partial URL of the network profile to apply to this network.
This field can be set only at resource creation time. For example, the
following are valid URLs:
- https://www.googleapis.com/compute/beta/projects/{projectId}/global/networkProfiles/{network_profile_name}
- projects/{projectId}/global/networkProfiles/{network\_profile\_name}} | `string` | `null` | no |
| [network\_routing\_mode](#input\_network\_routing\_mode) | The network dynamic routing mode | `string` | `"REGIONAL"` | no |
| [project\_id](#input\_project\_id) | Project in which the HPC deployment will be created | `string` | n/a | yes |
| [region](#input\_region) | The default region for Cloud resources | `string` | n/a | yes |
diff --git a/modules/network/multivpc/main.tf b/modules/network/multivpc/main.tf
index 3b04195f8a..ad06e793c1 100644
--- a/modules/network/multivpc/main.tf
+++ b/modules/network/multivpc/main.tf
@@ -74,4 +74,5 @@ module "vpcs" {
mtu = var.mtu
network_description = var.network_description
network_routing_mode = var.network_routing_mode
+ network_profile = var.network_profile
}
diff --git a/modules/network/multivpc/variables.tf b/modules/network/multivpc/variables.tf
index 84d301e954..f51bbde58c 100644
--- a/modules/network/multivpc/variables.tf
+++ b/modules/network/multivpc/variables.tf
@@ -186,3 +186,15 @@ variable "network_interface_defaults" {
alias_ip_range = []
}
}
+
+variable "network_profile" {
+ type = string
+ description = <<-EOT
+ A full or partial URL of the network profile to apply to this network.
+ This field can be set only at resource creation time. For example, the
+ following are valid URLs:
+ - https://www.googleapis.com/compute/beta/projects/{projectId}/global/networkProfiles/{network_profile_name}
+ - projects/{projectId}/global/networkProfiles/{network_profile_name}}
+ EOT
+ default = null
+}
diff --git a/modules/network/pre-existing-subnetwork/versions.tf b/modules/network/pre-existing-subnetwork/versions.tf
index f5693ac098..bed38076f2 100644
--- a/modules/network/pre-existing-subnetwork/versions.tf
+++ b/modules/network/pre-existing-subnetwork/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:pre-existing-subnetwork/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:pre-existing-subnetwork/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/modules/network/pre-existing-vpc/versions.tf b/modules/network/pre-existing-vpc/versions.tf
index 6825dec0b0..00e8dcf8bc 100644
--- a/modules/network/pre-existing-vpc/versions.tf
+++ b/modules/network/pre-existing-vpc/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:pre-existing-vpc/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:pre-existing-vpc/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/modules/network/vpc/README.md b/modules/network/vpc/README.md
index 97e28d548a..ff0fd46c95 100644
--- a/modules/network/vpc/README.md
+++ b/modules/network/vpc/README.md
@@ -165,7 +165,9 @@ limitations under the License.
## Providers
-No providers.
+| Name | Version |
+|------|---------|
+| [terraform](#provider\_terraform) | n/a |
## Modules
@@ -173,11 +175,13 @@ No providers.
|------|--------|---------|
| [cloud\_router](#module\_cloud\_router) | terraform-google-modules/cloud-router/google | ~> 6.0 |
| [nat\_ip\_addresses](#module\_nat\_ip\_addresses) | terraform-google-modules/address/google | ~> 4.1 |
-| [vpc](#module\_vpc) | terraform-google-modules/network/google | ~> 9.0 |
+| [vpc](#module\_vpc) | terraform-google-modules/network/google | ~> 10.0 |
## Resources
-No resources.
+| Name | Type |
+|------|------|
+| [terraform_data.secondary_ranges_validation](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/resources/data) | resource |
## Inputs
@@ -201,11 +205,13 @@ No resources.
| [network\_address\_range](#input\_network\_address\_range) | IP address range (CIDR) for global network | `string` | `"10.0.0.0/9"` | no |
| [network\_description](#input\_network\_description) | An optional description of this resource (changes will trigger resource destroy/create) | `string` | `""` | no |
| [network\_name](#input\_network\_name) | The name of the network to be created (if unsupplied, will default to "{deployment\_name}-net") | `string` | `null` | no |
+| [network\_profile](#input\_network\_profile) | A full or partial URL of the network profile to apply to this network.
This field can be set only at resource creation time. For example, the
following are valid URLs:
- https://www.googleapis.com/compute/beta/projects/{projectId}/global/networkProfiles/{network_profile_name}
- projects/{projectId}/global/networkProfiles/{network\_profile\_name}} | `string` | `null` | no |
| [network\_routing\_mode](#input\_network\_routing\_mode) | The network routing mode (default "GLOBAL") | `string` | `"GLOBAL"` | no |
| [primary\_subnetwork](#input\_primary\_subnetwork) | DEPRECATED: please see https://goo.gle/hpc-toolkit-vpc-deprecation for migration instructions | `map(string)` | `null` | no |
| [project\_id](#input\_project\_id) | Project in which the HPC deployment will be created | `string` | n/a | yes |
| [region](#input\_region) | The default region for Cloud resources | `string` | n/a | yes |
-| [secondary\_ranges](#input\_secondary\_ranges) | Secondary ranges that will be used in some of the subnets. Please see https://goo.gle/hpc-toolkit-vpc-deprecation for migration instructions. | `map(list(object({ range_name = string, ip_cidr_range = string })))` | `{}` | no |
+| [secondary\_ranges](#input\_secondary\_ranges) | "Secondary ranges associated with the subnets.
This will be deprecated in favour of secondary\_ranges\_list at a later date.
Please migrate to using the same." | `map(list(object({ range_name = string, ip_cidr_range = string })))` | `{}` | no |
+| [secondary\_ranges\_list](#input\_secondary\_ranges\_list) | List of secondary ranges associated with the subnets. | list(object({
subnetwork_name = string,
ranges = list(object({
range_name = string,
ip_cidr_range = string
}))
}))
| `[]` | no |
| [shared\_vpc\_host](#input\_shared\_vpc\_host) | Makes this project a Shared VPC host if 'true' (default 'false') | `bool` | `false` | no |
| [subnetwork\_name](#input\_subnetwork\_name) | The name of the network to be created (if unsupplied, will default to "{deployment\_name}-primary-subnet") | `string` | `null` | no |
| [subnetwork\_size](#input\_subnetwork\_size) | DEPRECATED: please see https://goo.gle/hpc-toolkit-vpc-deprecation for migration instructions | `number` | `null` | no |
diff --git a/modules/network/vpc/main.tf b/modules/network/vpc/main.tf
index 3c1ceff0d2..3ad533a957 100644
--- a/modules/network/vpc/main.tf
+++ b/modules/network/vpc/main.tf
@@ -152,23 +152,29 @@ locals {
var.enable_internal_traffic ? [local.allow_internal_traffic] : [],
length(local.iap_ports) > 0 ? [local.allow_iap_ingress] : []
)
+
+ secondary_ranges_map = {
+ for secondary_range in var.secondary_ranges_list :
+ secondary_range.subnetwork_name => secondary_range.ranges
+ }
}
module "vpc" {
source = "terraform-google-modules/network/google"
- version = "~> 9.0"
+ version = "~> 10.0"
network_name = local.network_name
project_id = var.project_id
auto_create_subnetworks = false
subnets = local.subnetworks
- secondary_ranges = var.secondary_ranges
+ secondary_ranges = length(local.secondary_ranges_map) > 0 ? local.secondary_ranges_map : var.secondary_ranges
routing_mode = var.network_routing_mode
mtu = var.mtu
description = var.network_description
shared_vpc_host = var.shared_vpc_host
delete_default_internet_gateway_routes = var.delete_default_internet_gateway_routes
firewall_rules = local.firewall_rules
+ network_profile = var.network_profile
}
# This use of the module may appear odd when var.ips_per_nat = 0. The module
diff --git a/modules/network/vpc/variables.tf b/modules/network/vpc/variables.tf
index 12495b6770..f4e7321784 100644
--- a/modules/network/vpc/variables.tf
+++ b/modules/network/vpc/variables.tf
@@ -149,10 +149,26 @@ variable "additional_subnetworks" {
variable "secondary_ranges" {
type = map(list(object({ range_name = string, ip_cidr_range = string })))
- description = "Secondary ranges that will be used in some of the subnets. Please see https://goo.gle/hpc-toolkit-vpc-deprecation for migration instructions."
+ description = <<-EOT
+ "Secondary ranges associated with the subnets.
+ This will be deprecated in favour of secondary_ranges_list at a later date.
+ Please migrate to using the same."
+ EOT
default = {}
}
+variable "secondary_ranges_list" {
+ type = list(object({
+ subnetwork_name = string,
+ ranges = list(object({
+ range_name = string,
+ ip_cidr_range = string
+ }))
+ }))
+ description = "List of secondary ranges associated with the subnets."
+ default = []
+}
+
variable "network_routing_mode" {
type = string
default = "GLOBAL"
@@ -250,3 +266,24 @@ variable "firewall_log_config" {
error_message = "var.firewall_log_config must be set to \"DISABLE_LOGGING\", or enable logging with \"INCLUDE_ALL_METADATA\" or \"EXCLUDE_ALL_METADATA\""
}
}
+
+resource "terraform_data" "secondary_ranges_validation" {
+ lifecycle {
+ precondition {
+ condition = length(var.secondary_ranges) == 0 || length(var.secondary_ranges_list) == 0
+ error_message = "Only one of var.secondary_ranges or var.secondary_ranges_list should be specified"
+ }
+ }
+}
+
+variable "network_profile" {
+ type = string
+ description = <<-EOT
+ A full or partial URL of the network profile to apply to this network.
+ This field can be set only at resource creation time. For example, the
+ following are valid URLs:
+ - https://www.googleapis.com/compute/beta/projects/{projectId}/global/networkProfiles/{network_profile_name}
+ - projects/{projectId}/global/networkProfiles/{network_profile_name}}
+ EOT
+ default = null
+}
diff --git a/modules/scheduler/batch-job-template/README.md b/modules/scheduler/batch-job-template/README.md
index d4068a93c3..4dc73e8309 100644
--- a/modules/scheduler/batch-job-template/README.md
+++ b/modules/scheduler/batch-job-template/README.md
@@ -141,7 +141,7 @@ limitations under the License.
| Name | Source | Version |
|------|--------|---------|
| [instance\_template](#module\_instance\_template) | terraform-google-modules/vm/google//modules/instance_template | ~> 12.1 |
-| [netstorage\_startup\_script](#module\_netstorage\_startup\_script) | github.com/GoogleCloudPlatform/hpc-toolkit//modules/scripts/startup-script | v1.39.0 |
+| [netstorage\_startup\_script](#module\_netstorage\_startup\_script) | ../../scripts/startup-script | n/a |
## Resources
diff --git a/modules/scheduler/batch-job-template/startup_from_network_storage.tf b/modules/scheduler/batch-job-template/startup_from_network_storage.tf
index 070b0b8c33..02bc58e4f7 100644
--- a/modules/scheduler/batch-job-template/startup_from_network_storage.tf
+++ b/modules/scheduler/batch-job-template/startup_from_network_storage.tf
@@ -55,7 +55,7 @@ locals {
}
module "netstorage_startup_script" {
- source = "github.com/GoogleCloudPlatform/hpc-toolkit//modules/scripts/startup-script?ref=v1.39.0"
+ source = "../../scripts/startup-script"
labels = local.labels
project_id = var.project_id
diff --git a/modules/scheduler/batch-login-node/versions.tf b/modules/scheduler/batch-login-node/versions.tf
index 05f674c89d..16c1b02494 100644
--- a/modules/scheduler/batch-login-node/versions.tf
+++ b/modules/scheduler/batch-login-node/versions.tf
@@ -22,7 +22,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:batch-login-node/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:batch-login-node/v1.44.0"
}
required_version = ">= 0.14.0"
diff --git a/modules/scheduler/gke-cluster/README.md b/modules/scheduler/gke-cluster/README.md
index 93140e9191..74f1ac0ba3 100644
--- a/modules/scheduler/gke-cluster/README.md
+++ b/modules/scheduler/gke-cluster/README.md
@@ -110,7 +110,6 @@ limitations under the License.
| [google](#requirement\_google) | > 5.0 |
| [google-beta](#requirement\_google-beta) | > 5.0 |
| [kubernetes](#requirement\_kubernetes) | ~> 2.23 |
-| [null](#requirement\_null) | ~> 3.0 |
## Providers
@@ -118,7 +117,6 @@ limitations under the License.
|------|---------|
| [google](#provider\_google) | > 5.0 |
| [google-beta](#provider\_google-beta) | > 5.0 |
-| [null](#provider\_null) | ~> 3.0 |
## Modules
@@ -133,7 +131,6 @@ limitations under the License.
|------|------|
| [google-beta_google_container_cluster.gke_cluster](https://registry.terraform.io/providers/hashicorp/google-beta/latest/docs/resources/google_container_cluster) | resource |
| [google-beta_google_container_node_pool.system_node_pools](https://registry.terraform.io/providers/hashicorp/google-beta/latest/docs/resources/google_container_node_pool) | resource |
-| [null_resource.enable_parallelstore_csi](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [google_client_config.default](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/client_config) | data source |
| [google_project.project](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/project) | data source |
diff --git a/modules/scheduler/gke-cluster/main.tf b/modules/scheduler/gke-cluster/main.tf
index 6a5bcfb786..48a225d5e8 100644
--- a/modules/scheduler/gke-cluster/main.tf
+++ b/modules/scheduler/gke-cluster/main.tf
@@ -185,6 +185,9 @@ resource "google_container_cluster" "gke_cluster" {
dns_cache_config {
enabled = var.enable_node_local_dns_cache
}
+ parallelstore_csi_driver_config {
+ enabled = var.enable_parallelstore_csi
+ }
}
timeouts {
@@ -305,17 +308,6 @@ resource "google_container_node_pool" "system_node_pools" {
}
}
-### TODO: remove this after Terraform support for GKE Parallelstore CSI is added. ###
-### Instead use addons_config above to enable the CSI ###
-resource "null_resource" "enable_parallelstore_csi" {
- count = var.enable_parallelstore_csi == true ? 1 : 0
-
- provisioner "local-exec" {
- command = "gcloud container clusters update ${local.name} --location=${var.region} --project=${var.project_id} --update-addons=ParallelstoreCsiDriver=ENABLED"
- }
- depends_on = [google_container_node_pool.system_node_pools] # avoid cluster operation conflict
-}
-
data "google_client_config" "default" {}
provider "kubernetes" {
diff --git a/modules/scheduler/gke-cluster/versions.tf b/modules/scheduler/gke-cluster/versions.tf
index 28bf526fec..1f327efbe8 100644
--- a/modules/scheduler/gke-cluster/versions.tf
+++ b/modules/scheduler/gke-cluster/versions.tf
@@ -28,12 +28,8 @@ terraform {
source = "hashicorp/kubernetes"
version = "~> 2.23"
}
- null = {
- source = "hashicorp/null"
- version = "~> 3.0"
- }
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:gke-cluster/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:gke-cluster/v1.44.0"
}
}
diff --git a/modules/scheduler/pre-existing-gke-cluster/versions.tf b/modules/scheduler/pre-existing-gke-cluster/versions.tf
index b0808d9acf..e1a9453e83 100644
--- a/modules/scheduler/pre-existing-gke-cluster/versions.tf
+++ b/modules/scheduler/pre-existing-gke-cluster/versions.tf
@@ -23,7 +23,7 @@ terraform {
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:pre-existing-gke-cluster/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:pre-existing-gke-cluster/v1.44.0"
}
required_version = ">= 1.3"
diff --git a/modules/scripts/startup-script/versions.tf b/modules/scripts/startup-script/versions.tf
index b2bdb0e875..90b1dd202c 100644
--- a/modules/scripts/startup-script/versions.tf
+++ b/modules/scripts/startup-script/versions.tf
@@ -30,7 +30,7 @@ terraform {
}
}
provider_meta "google" {
- module_name = "blueprints/terraform/hpc-toolkit:startup-script/v1.43.0"
+ module_name = "blueprints/terraform/hpc-toolkit:startup-script/v1.44.0"
}
required_version = ">= 1.3"
diff --git a/pkg/config/expand.go b/pkg/config/expand.go
index 9bad4dd2d1..ae5c30a328 100644
--- a/pkg/config/expand.go
+++ b/pkg/config/expand.go
@@ -199,11 +199,11 @@ func getDefaultGoogleProviders(bp Blueprint) map[string]TerraformProvider {
return map[string]TerraformProvider{
"google": {
Source: "hashicorp/google",
- Version: "~> 6.10.0",
+ Version: "~> 6.13.0",
Configuration: gglConf},
"google-beta": {
Source: "hashicorp/google-beta",
- Version: "~> 6.10.0",
+ Version: "~> 6.13.0",
Configuration: gglConf}}
}
diff --git a/pkg/config/expand_test.go b/pkg/config/expand_test.go
index ad00218133..e1ad008407 100644
--- a/pkg/config/expand_test.go
+++ b/pkg/config/expand_test.go
@@ -93,10 +93,10 @@ func (s *zeroSuite) TestExpandProviders(c *C) {
c.Check(g.TerraformProviders, DeepEquals, map[string]PR{
"google": TerraformProvider{
Source: "hashicorp/google",
- Version: "~> 6.10.0"},
+ Version: "~> 6.13.0"},
"google-beta": TerraformProvider{
Source: "hashicorp/google-beta",
- Version: "~> 6.10.0"}})
+ Version: "~> 6.13.0"}})
}
{ // no def PR, group PR
diff --git a/pkg/inspect/modules_test.go b/pkg/inspect/modules_test.go
index 03e9d570b7..1fdc2bd4fc 100644
--- a/pkg/inspect/modules_test.go
+++ b/pkg/inspect/modules_test.go
@@ -20,6 +20,7 @@ import (
"hpc-toolkit/pkg/modulereader"
"log"
"path/filepath"
+ "strings"
"testing"
"github.com/hashicorp/hcl/v2/ext/typeexpr"
@@ -61,6 +62,11 @@ func getModules() []modInfo {
}
allMods = []modInfo{}
for _, sk := range sks {
+ if strings.Contains(sk.Source, "/internal/") {
+ continue // skip internal modules
+ // TODO: remove skipping internal modules
+ }
+
info, err := modulereader.GetModuleInfo(modPath(sk.Source), sk.Kind)
if err != nil {
log.Fatal(err)
diff --git a/tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-kueue.yml b/tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-kueue.yml
new file mode 100644
index 0000000000..39aac2dfd5
--- /dev/null
+++ b/tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-kueue.yml
@@ -0,0 +1,125 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- name: Assert variables are defined
+ ansible.builtin.assert:
+ that:
+ - region is defined
+ - custom_vars.project is defined
+
+- name: Get cluster credentials for kubectl
+ delegate_to: localhost
+ ansible.builtin.command: gcloud container clusters get-credentials {{ deployment_name }} --region {{ region }} --project {{ custom_vars.project }}
+
+- name: Create the topology kueue
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ array=({{ workspace }}/tools/cloud-build/daily-tests/blueprints/kueue-config-files/tas-queues.yaml)
+ kubectl create -f ${array[0]}
+ echo ${array[0]}
+ args:
+ executable: /bin/bash
+ changed_when: False
+
+- name: Create the host topology kueue job
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ array=({{ workspace }}/tools/cloud-build/daily-tests/blueprints/kueue-config-files/host-topology-tas-small-job.yaml)
+ kubectl create -f ${array[0]}
+ echo ${array[0]}
+ args:
+ executable: /bin/bash
+ changed_when: False
+
+- name: Ensure all pods are on the same host
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ kubectl get pods \
+ -o custom-columns="Name:.metadata.name,Host:.spec.nodeSelector.cloud\.google\.com/gce-topology-host" | \
+ sort -k2 | uniq -f 1 | wc -l
+ register: unique_host_count
+ until: unique_host_count.stdout | int == 2
+ retries: 10
+ delay: 10
+
+- name: Delete the host topology kueue job
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ kubectl delete --all jobs
+ args:
+ executable: /bin/bash
+ changed_when: False
+
+- name: Create the rack topology kueue job
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ array=({{ workspace }}/tools/cloud-build/daily-tests/blueprints/kueue-config-files/rack-topology-tas-small-job.yaml)
+ kubectl create -f ${array[0]}
+ echo ${array[0]}
+ args:
+ executable: /bin/bash
+ changed_when: False
+
+- name: Ensure all pods are on the same rack
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ kubectl get pods \
+ -o custom-columns="Name:.metadata.name,Host:.spec.nodeSelector.cloud\.google\.com/gce-topology-subblock" | \
+ sort -k2 | uniq -f 1 | wc -l
+ register: unique_host_count
+ until: unique_host_count.stdout | int == 2
+ retries: 10
+ delay: 10
+
+- name: Delete the rack topology kueue job
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ kubectl delete --all jobs
+ args:
+ executable: /bin/bash
+ changed_when: False
+
+- name: Create the block topology kueue job
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ array=({{ workspace }}/tools/cloud-build/daily-tests/blueprints/kueue-config-files/block-topology-tas-small-job.yaml)
+ kubectl create -f ${array[0]}
+ echo ${array[0]}
+ args:
+ executable: /bin/bash
+ changed_when: False
+
+- name: Ensure all pods are on the same block
+ delegate_to: localhost
+ ansible.builtin.shell: |
+ kubectl get pods \
+ -o custom-columns="Name:.metadata.name,Host:.spec.nodeSelector.cloud\.google\.com/gce-topology-block" | \
+ sort -k2 | uniq -f 1 | wc -l
+ register: unique_host_count
+ until: unique_host_count.stdout | int == 2
+ retries: 10
+ delay: 10
+
+- name: Wait for job to complete
+ delegate_to: localhost
+ ansible.builtin.command: |
+ kubectl get job --field-selector status.successful=2
+ register: job_completion
+ until: job_completion.stdout_lines | length > 1
+ retries: 10
+ delay: 5
+
+- name: Print job_completion debug output
+ ansible.builtin.debug:
+ var: job_completion.stdout_lines
diff --git a/tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-storage-parallelstore.yml b/tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-managed-parallelstore.yml
similarity index 100%
rename from tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-storage-parallelstore.yml
rename to tools/cloud-build/daily-tests/ansible_playbooks/test-validation/test-gke-managed-parallelstore.yml
diff --git a/tools/cloud-build/daily-tests/blueprints/gke-a2-highgpu.yaml b/tools/cloud-build/daily-tests/blueprints/gke-a2-highgpu.yaml
new file mode 100644
index 0000000000..b715448e65
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/gke-a2-highgpu.yaml
@@ -0,0 +1,99 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+blueprint_name: gke-a2-highgpu
+
+vars:
+ project_id: hpc-toolkit-dev ## Set GCP Project ID Here ##
+ deployment_name: gke-a2-highgpu
+ region: us-central1
+ zone: us-central1-f
+
+ # Cidr block containing the IP of the machine calling terraform.
+ # The following line must be updated for this example to work.
+ authorized_cidr: 0.0.0.0/0
+
+deployment_groups:
+- group: primary
+ modules:
+ - id: network1
+ source: modules/network/vpc
+ settings:
+ subnetwork_name: $(vars.deployment_name)-subnet
+ mtu: 8244
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
+ - range_name: pods
+ ip_cidr_range: 10.4.0.0/14
+ - range_name: services
+ ip_cidr_range: 10.0.32.0/20
+
+ - id: gke_service_account
+ source: community/modules/project/service-account
+ settings:
+ name: gke-sa
+ project_roles:
+ - logging.logWriter
+ - monitoring.metricWriter
+ - monitoring.viewer
+ - stackdriver.resourceMetadata.writer
+ - storage.objectViewer
+ - artifactregistry.reader
+
+ - id: gpunets
+ source: modules/network/multivpc
+ settings:
+ network_name_prefix: $(vars.deployment_name)-gpunet
+ global_ip_address_range: 192.169.0.0/16
+ network_count: 4
+ subnetwork_cidr_suffix: 24
+ mtu: 8244
+
+ - id: gke_cluster
+ source: modules/scheduler/gke-cluster
+ use: [network1, gpunets, gke_service_account]
+ settings:
+ enable_private_endpoint: false # Allows for access from authorized public IPs
+ release_channel: 'RAPID'
+ master_authorized_networks:
+ - cidr_block: $(vars.authorized_cidr) # Allows your machine run kubectl command. It's required for the multi-network setup.
+ display_name: "kubectl-access-network"
+ outputs: [instructions]
+
+ - id: a2_highgpu_pool
+ source: modules/compute/gke-node-pool
+ use: [gke_cluster, gpunets, gke_service_account]
+ settings:
+ auto_upgrade: true
+ machine_type: a2-highgpu-2g
+ static_node_count: 6
+ zones: [$(vars.zone)]
+ image_type: UBUNTU_CONTAINERD
+ placement_policy:
+ name: a2-highgpu-compact
+ type: "COMPACT"
+ outputs: [instructions]
+
+ - id: workload_component_install
+ source: modules/management/kubectl-apply
+ use: [gke_cluster]
+ settings:
+ kueue:
+ install: true
+ version: v0.9.0
+ jobset:
+ install: true
diff --git a/tools/cloud-build/daily-tests/blueprints/kueue-config-files/block-topology-tas-small-job.yaml b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/block-topology-tas-small-job.yaml
new file mode 100644
index 0000000000..aefbc20679
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/block-topology-tas-small-job.yaml
@@ -0,0 +1,43 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ generateName: tas-sample-small-preferred-block-
+ labels:
+ kueue.x-k8s.io/queue-name: tas-user-queue
+spec:
+ parallelism: 2
+ completions: 2
+ completionMode: Indexed
+ template:
+ metadata:
+ annotations:
+ kueue.x-k8s.io/podset-required-topology: "cloud.google.com/gce-topology-block"
+ spec:
+ tolerations:
+ - key: "nvidia.com/gpu"
+ operator: "Exists"
+ effect: NoSchedule
+ containers:
+ - name: dummy-job
+ image: gcr.io/k8s-staging-perf-tests/sleep:v0.1.0
+ args: ["10s"]
+ resources:
+ requests:
+ nvidia.com/gpu: 1
+ limits:
+ nvidia.com/gpu: 1
+ restartPolicy: Never
diff --git a/tools/cloud-build/daily-tests/blueprints/kueue-config-files/host-topology-tas-small-job.yaml b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/host-topology-tas-small-job.yaml
new file mode 100644
index 0000000000..92e45a8e43
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/host-topology-tas-small-job.yaml
@@ -0,0 +1,43 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ generateName: tas-sample-small-required-host-
+ labels:
+ kueue.x-k8s.io/queue-name: tas-user-queue
+spec:
+ parallelism: 2
+ completions: 2
+ completionMode: Indexed
+ template:
+ metadata:
+ annotations:
+ kueue.x-k8s.io/podset-required-topology: "cloud.google.com/gce-topology-host"
+ spec:
+ tolerations:
+ - key: "nvidia.com/gpu"
+ operator: "Exists"
+ effect: NoSchedule
+ containers:
+ - name: dummy-job
+ image: gcr.io/k8s-staging-perf-tests/sleep:v0.1.0
+ args: ["10s"]
+ resources:
+ requests:
+ nvidia.com/gpu: 1
+ limits:
+ nvidia.com/gpu: 1
+ restartPolicy: Never
diff --git a/tools/cloud-build/daily-tests/blueprints/kueue-config-files/kueue-configuration.yaml.tftpl b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/kueue-configuration.yaml.tftpl
new file mode 100644
index 0000000000..ea2db87ace
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/kueue-configuration.yaml.tftpl
@@ -0,0 +1,73 @@
+apiVersion: kueue.x-k8s.io/v1beta1
+kind: ResourceFlavor
+metadata:
+ name: gke-1xh100-mega-80gb-8
+spec:
+ nodeLabels:
+ cloud.google.com/gke-accelerator: system
+---
+
+apiVersion: kueue.x-k8s.io/v1beta1
+kind: ClusterQueue
+metadata:
+ name: cluster-queue
+spec:
+ preemption:
+ reclaimWithinCohort: Never # Don't preempt other queues in the cohort.
+ withinClusterQueue: LowerPriority
+ namespaceSelector: {} # match all.
+ resourceGroups:
+ - coveredResources: ["nvidia.com/gpu"]
+ flavors:
+ - name: gke-1xh100-mega-80gb-8
+ resources:
+ - name: "nvidia.com/gpu"
+ nominalQuota: ${num_chips}
+---
+apiVersion: kueue.x-k8s.io/v1beta1
+kind: LocalQueue
+metadata:
+ namespace: default
+ name: multislice-queue
+spec:
+ clusterQueue: cluster-queue # Point to the ClusterQueue
+---
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: very-low
+value: 100
+globalDefault: false
+description: "Very Low"
+---
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: low
+value: 250
+globalDefault: false
+description: "Low"
+---
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: medium
+value: 500
+globalDefault: false
+description: "Medium"
+---
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: high
+value: 750
+globalDefault: false
+description: "High"
+---
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: very-high
+value: 1000
+globalDefault: false
+description: "Very High"
diff --git a/tools/cloud-build/daily-tests/blueprints/kueue-config-files/rack-topology-tas-small-job.yaml b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/rack-topology-tas-small-job.yaml
new file mode 100644
index 0000000000..039eeec136
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/rack-topology-tas-small-job.yaml
@@ -0,0 +1,43 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ generateName: tas-sample-small-required-rack-
+ labels:
+ kueue.x-k8s.io/queue-name: tas-user-queue
+spec:
+ parallelism: 2
+ completions: 2
+ completionMode: Indexed
+ template:
+ metadata:
+ annotations:
+ kueue.x-k8s.io/podset-required-topology: "cloud.google.com/gce-topology-subblock"
+ spec:
+ tolerations:
+ - key: "nvidia.com/gpu"
+ operator: "Exists"
+ effect: NoSchedule
+ containers:
+ - name: dummy-job
+ image: gcr.io/k8s-staging-perf-tests/sleep:v0.1.0
+ args: ["10s"]
+ resources:
+ requests:
+ nvidia.com/gpu: 1
+ limits:
+ nvidia.com/gpu: 1
+ restartPolicy: Never
diff --git a/tools/cloud-build/daily-tests/blueprints/kueue-config-files/sample-kueue-job.yaml b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/sample-kueue-job.yaml
new file mode 100644
index 0000000000..d84b9830c5
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/sample-kueue-job.yaml
@@ -0,0 +1,47 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ namespace: default
+ generateName: sample-kueue-job
+ annotations:
+ kueue.x-k8s.io/queue-name: multislice-queue
+spec:
+ ttlSecondsAfterFinished: 90 # Job will be deleted after 90 seconds
+ parallelism: 3 # This Job will have 3 replicas running at the same time
+ completions: 3 # This Job requires 3 completions
+ suspend: true # Set to true to allow Kueue to control the Job when it starts
+ template:
+ spec:
+ tolerations:
+ - key: "components.gke.io/gke-managed-components"
+ operator: "Equal"
+ value: "true"
+ - key: "user-workload"
+ operator: "Equal"
+ value: "true"
+ effect: "NoSchedule"
+ - key: "nvidia.com/gpu"
+ operator: "Equal"
+ value: "present"
+ effect: "NoSchedule"
+ nodeSelector:
+ cloud.google.com/gke-accelerator: nvidia-h100-80gb # Specify the GPU hardware
+ containers:
+ - name: dummy-job
+ image: ubuntu
+ command: ["sh", "-c", "echo Hello world!"]
+ restartPolicy: Never
diff --git a/tools/cloud-build/daily-tests/blueprints/kueue-config-files/tas-queues.yaml b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/tas-queues.yaml
new file mode 100644
index 0000000000..adaae65769
--- /dev/null
+++ b/tools/cloud-build/daily-tests/blueprints/kueue-config-files/tas-queues.yaml
@@ -0,0 +1,55 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: kueue.x-k8s.io/v1alpha1
+kind: Topology
+metadata:
+ name: "gke-default"
+spec:
+ levels:
+ - nodeLabel: "cloud.google.com/gce-topology-block"
+ - nodeLabel: "cloud.google.com/gce-topology-subblock"
+ - nodeLabel: "cloud.google.com/gce-topology-host"
+ - nodeLabel: "kubernetes.io/hostname"
+---
+kind: ResourceFlavor
+apiVersion: kueue.x-k8s.io/v1beta1
+metadata:
+ name: "tas-flavor"
+spec:
+ nodeLabels:
+ cloud.google.com/gke-nodepool: "a2-highgpu-2g-a2highgpupool"
+ topologyName: "gke-default"
+---
+apiVersion: kueue.x-k8s.io/v1beta1
+kind: ClusterQueue
+metadata:
+ name: "tas-cluster-queue"
+spec:
+ namespaceSelector: {} # match all.
+ resourceGroups:
+ - coveredResources: ["nvidia.com/gpu"]
+ flavors:
+ - name: "tas-flavor"
+ resources:
+ - name: "nvidia.com/gpu"
+ nominalQuota: 12 # 6 nodes, 2 GPU each
+---
+apiVersion: kueue.x-k8s.io/v1beta1
+kind: LocalQueue
+metadata:
+ namespace: "default"
+ name: "tas-user-queue"
+spec:
+ clusterQueue: "tas-cluster-queue"
diff --git a/tools/cloud-build/daily-tests/blueprints/ml-gke-e2e.yaml b/tools/cloud-build/daily-tests/blueprints/ml-gke-e2e.yaml
index a2cec04396..9183dcb50b 100644
--- a/tools/cloud-build/daily-tests/blueprints/ml-gke-e2e.yaml
+++ b/tools/cloud-build/daily-tests/blueprints/ml-gke-e2e.yaml
@@ -21,11 +21,9 @@ vars:
region: asia-southeast1
zones:
- asia-southeast1-b # g2 machine has better availability in this zone
-
# Cidr block containing the IP of the machine calling terraform.
# The following line must be updated for this example to work.
authorized_cidr: /32
-
gcp_public_cidrs_access_enabled: false
deployment_groups:
@@ -34,9 +32,10 @@ deployment_groups:
- id: network1
source: modules/network/vpc
settings:
- subnetwork_name: gke-subnet1
- secondary_ranges:
- gke-subnet1:
+ subnetwork_name: $(vars.deployment_name)-subnet
+ secondary_ranges_list:
+ - subnetwork_name: $(vars.deployment_name)-subnet
+ ranges:
- range_name: pods
ip_cidr_range: 10.4.0.0/14
- range_name: services
@@ -93,6 +92,7 @@ deployment_groups:
"value": "g2-latest-driver"
}
]
+ requested_gpu_per_pod: 1
outputs: [instructions]
- id: n1_pool_default
@@ -154,6 +154,7 @@ deployment_groups:
"value": "n1-pool-full-spec"
}
]
+ requested_gpu_per_pod: 1
outputs: [instructions]
- id: default_settings_pool
diff --git a/tools/cloud-build/daily-tests/builds/gke-a2-highgpu-kueue.yaml b/tools/cloud-build/daily-tests/builds/gke-a2-highgpu-kueue.yaml
new file mode 100644
index 0000000000..413e7926aa
--- /dev/null
+++ b/tools/cloud-build/daily-tests/builds/gke-a2-highgpu-kueue.yaml
@@ -0,0 +1,53 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+tags:
+- m.gke-cluster
+- m.gke-node-pool
+- m.service-account
+- m.vpc
+- m.multivpc
+- m.kubectl-apply
+- gke
+
+timeout: 14400s # 4hr
+steps:
+- id: gke-a2-highgpu-kueue-test
+ name: us-central1-docker.pkg.dev/$PROJECT_ID/hpc-toolkit-repo/test-runner
+ entrypoint: /bin/bash
+ env:
+ - "ANSIBLE_HOST_KEY_CHECKING=false"
+ - "ANSIBLE_CONFIG=/workspace/tools/cloud-build/ansible.cfg"
+ args:
+ - -c
+ - |
+ set -x -e
+ cd /workspace && make
+ BUILD_ID_FULL=$BUILD_ID
+ BUILD_ID_SHORT=$${BUILD_ID_FULL:0:6}
+ EXAMPLE_BP=tools/cloud-build/daily-tests/blueprints/gke-a2-highgpu.yaml
+
+ # Replacing the static subnet name to prevent collisions
+ sed -i "s/gke-subnet-a2-high/gke-subnet-a2-high-$${BUILD_ID_SHORT}/" $${EXAMPLE_BP}
+ echo ' - id: remote-node' >> $${EXAMPLE_BP}
+ echo ' source: modules/compute/vm-instance' >> $${EXAMPLE_BP}
+ echo ' use: [network1]' >> $${EXAMPLE_BP}
+ echo ' settings:' >> $${EXAMPLE_BP}
+ echo ' machine_type: e2-standard-2' >> $${EXAMPLE_BP}
+ echo ' name_prefix: remote-node' >> $${EXAMPLE_BP}
+ echo ' add_deployment_name_before_prefix: true' >> $${EXAMPLE_BP}
+ ansible-playbook tools/cloud-build/daily-tests/ansible_playbooks/base-integration-test.yml \
+ --user=sa_106486320838376751393 --extra-vars="project=${PROJECT_ID} build=$${BUILD_ID_SHORT}" \
+ --extra-vars="@tools/cloud-build/daily-tests/tests/gke-a2-highgpu-kueue.yml"
diff --git a/tools/cloud-build/daily-tests/builds/gke-storage-parallelstore.yaml b/tools/cloud-build/daily-tests/builds/gke-managed-parallelstore.yaml
similarity index 91%
rename from tools/cloud-build/daily-tests/builds/gke-storage-parallelstore.yaml
rename to tools/cloud-build/daily-tests/builds/gke-managed-parallelstore.yaml
index a51c8cebab..01010a0435 100644
--- a/tools/cloud-build/daily-tests/builds/gke-storage-parallelstore.yaml
+++ b/tools/cloud-build/daily-tests/builds/gke-managed-parallelstore.yaml
@@ -27,7 +27,7 @@ timeout: 14400s # 4hr
steps:
## Test GKE
-- id: gke-storage-parallelstore
+- id: gke-managed-parallelstore
name: us-central1-docker.pkg.dev/$PROJECT_ID/hpc-toolkit-repo/test-runner
entrypoint: /bin/bash
env:
@@ -40,7 +40,7 @@ steps:
cd /workspace && make
BUILD_ID_FULL=$BUILD_ID
BUILD_ID_SHORT=$${BUILD_ID_FULL:0:6}
- SG_EXAMPLE=examples/gke-storage-parallelstore.yaml
+ SG_EXAMPLE=examples/gke-managed-parallelstore.yaml
# adding vm to act as remote node
echo ' - id: remote-node' >> $${SG_EXAMPLE}
@@ -58,4 +58,4 @@ steps:
ansible-playbook tools/cloud-build/daily-tests/ansible_playbooks/base-integration-test.yml \
--user=sa_106486320838376751393 --extra-vars="project=${PROJECT_ID} build=$${BUILD_ID_SHORT}" \
- --extra-vars="@tools/cloud-build/daily-tests/tests/gke-storage-parallelstore.yml"
+ --extra-vars="@tools/cloud-build/daily-tests/tests/gke-managed-parallelstore.yml"
diff --git a/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-reconfig-size.yaml b/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-reconfig-size.yaml
new file mode 100644
index 0000000000..8d6e390ebe
--- /dev/null
+++ b/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-reconfig-size.yaml
@@ -0,0 +1,34 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+tags:
+- m.pre-existing-vpc
+- m.schedmd-slurm-gcp-v6-controller
+- m.schedmd-slurm-gcp-v6-login
+- m.schedmd-slurm-gcp-v6-nodeset
+- m.schedmd-slurm-gcp-v6-partition
+- slurm6
+
+timeout: 14400s # 4hr
+steps:
+- id: slurm-topology
+ name: us-central1-docker.pkg.dev/$PROJECT_ID/hpc-toolkit-repo/test-runner
+ entrypoint: /bin/bash
+ args:
+ - -c
+ - |
+ set -x -e
+ cd /workspace && make
+ python3 tools/python-integration-tests/slurm_reconfig_size.py
diff --git a/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-simple-job-completion.yaml b/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-simple-job-completion.yaml
new file mode 100644
index 0000000000..7acd7bdc11
--- /dev/null
+++ b/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-simple-job-completion.yaml
@@ -0,0 +1,34 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+tags:
+- m.pre-existing-vpc
+- m.schedmd-slurm-gcp-v6-controller
+- m.schedmd-slurm-gcp-v6-login
+- m.schedmd-slurm-gcp-v6-nodeset
+- m.schedmd-slurm-gcp-v6-partition
+- slurm6
+
+timeout: 14400s # 4hr
+steps:
+- id: slurm-topology
+ name: us-central1-docker.pkg.dev/$PROJECT_ID/hpc-toolkit-repo/test-runner
+ entrypoint: /bin/bash
+ args:
+ - -c
+ - |
+ set -x -e
+ cd /workspace && make
+ python3 tools/python-integration-tests/slurm_simple_job_completion.py
diff --git a/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-topology.yaml b/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-topology.yaml
new file mode 100644
index 0000000000..51bfa17c71
--- /dev/null
+++ b/tools/cloud-build/daily-tests/builds/slurm-gcp-v6-topology.yaml
@@ -0,0 +1,34 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+tags:
+- m.pre-existing-vpc
+- m.schedmd-slurm-gcp-v6-controller
+- m.schedmd-slurm-gcp-v6-login
+- m.schedmd-slurm-gcp-v6-nodeset
+- m.schedmd-slurm-gcp-v6-partition
+- slurm6
+
+timeout: 14400s # 4hr
+steps:
+- id: slurm-topology
+ name: us-central1-docker.pkg.dev/$PROJECT_ID/hpc-toolkit-repo/test-runner
+ entrypoint: /bin/bash
+ args:
+ - -c
+ - |
+ set -x -e
+ cd /workspace && make
+ python3 tools/python-integration-tests/slurm_topology.py
diff --git a/tools/cloud-build/daily-tests/tests/gke-a2-highgpu-kueue.yml b/tools/cloud-build/daily-tests/tests/gke-a2-highgpu-kueue.yml
new file mode 100644
index 0000000000..0735f4f970
--- /dev/null
+++ b/tools/cloud-build/daily-tests/tests/gke-a2-highgpu-kueue.yml
@@ -0,0 +1,41 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# region, zone must be defined
+# in build file with --extra-vars flag!
+test_name: gke-a2high-kueue
+deployment_name: gke-a2high-kueue-{{ build }}
+workspace: /workspace
+blueprint_yaml: "{{ workspace }}/tools/cloud-build/daily-tests/blueprints/gke-a2-highgpu.yaml"
+network: "gke-a2high-net-{{ build }}"
+region: us-central1
+zone: us-central1-f
+remote_node: "{{ deployment_name }}-remote-node-0"
+reservation_affinity:
+ consume_reservation_type: SPECIFIC_RESERVATION
+ specific_reservations:
+ - name: a2-reservation-0
+ project: "{{ project }}"
+cli_deployment_vars:
+ region: "{{ region }}"
+ zone: "{{ zone }}"
+ network_name: "{{ network }}"
+ reservation_affinity: "{{ reservation_affinity }}"
+ local_ssd_count_nvme_block: 2
+custom_vars:
+ project: "{{ project }}"
+post_deploy_tests:
+- test-validation/test-gke-kueue.yml
diff --git a/tools/cloud-build/daily-tests/tests/gke-a3-highgpu.yml b/tools/cloud-build/daily-tests/tests/gke-a3-highgpu.yml
index ea7e105141..dc88ac46d2 100644
--- a/tools/cloud-build/daily-tests/tests/gke-a3-highgpu.yml
+++ b/tools/cloud-build/daily-tests/tests/gke-a3-highgpu.yml
@@ -20,7 +20,7 @@ test_name: gke-a3high
deployment_name: gke-a3high-{{ build }}
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/gke-a3-highgpu.yaml"
-network: "gke-a3high-net-{{ build }}"
+network: "{{ deployment_name }}-net"
region: us-west1
zone: us-west1-a
remote_node: "{{ deployment_name }}-remote-node-0"
diff --git a/tools/cloud-build/daily-tests/tests/gke-a3-megagpu.yml b/tools/cloud-build/daily-tests/tests/gke-a3-megagpu.yml
index f24facfe68..dec2cad59e 100644
--- a/tools/cloud-build/daily-tests/tests/gke-a3-megagpu.yml
+++ b/tools/cloud-build/daily-tests/tests/gke-a3-megagpu.yml
@@ -20,7 +20,7 @@ test_name: gke-a3mega
deployment_name: gke-a3mega-{{ build }}
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/gke-a3-megagpu.yaml"
-network: "gke-a3mega-net-{{ build }}"
+network: "{{ deployment_name }}-net"
region: us-west4
zone: us-west4-a
remote_node: "{{ deployment_name }}-remote-node-0"
diff --git a/tools/cloud-build/daily-tests/tests/gke-storage-parallelstore.yml b/tools/cloud-build/daily-tests/tests/gke-managed-parallelstore.yml
similarity index 81%
rename from tools/cloud-build/daily-tests/tests/gke-storage-parallelstore.yml
rename to tools/cloud-build/daily-tests/tests/gke-managed-parallelstore.yml
index a6de4bf239..cd9e7f712b 100644
--- a/tools/cloud-build/daily-tests/tests/gke-storage-parallelstore.yml
+++ b/tools/cloud-build/daily-tests/tests/gke-managed-parallelstore.yml
@@ -12,16 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-test_name: gke-storage-parallelstore
-deployment_name: gke-storage-parallelstore-{{ build }}
+test_name: gke-managed-parallelstore
+deployment_name: gke-managed-parallelstore-{{ build }}
zone: us-central1-a # for remote node
region: us-central1
workspace: /workspace
-blueprint_yaml: "{{ workspace }}/examples/gke-storage-parallelstore.yaml"
+blueprint_yaml: "{{ workspace }}/examples/gke-managed-parallelstore.yaml"
network: "{{ deployment_name }}-net"
remote_node: "{{ deployment_name }}-0"
post_deploy_tests:
-- test-validation/test-gke-storage-parallelstore.yml
+- test-validation/test-gke-managed-parallelstore.yml
custom_vars:
project: "{{ project }}"
cli_deployment_vars:
diff --git a/tools/cloud-build/daily-tests/tests/gke-storage.yml b/tools/cloud-build/daily-tests/tests/gke-storage.yml
index f2addf9432..3e37d04a9e 100644
--- a/tools/cloud-build/daily-tests/tests/gke-storage.yml
+++ b/tools/cloud-build/daily-tests/tests/gke-storage.yml
@@ -17,7 +17,7 @@ deployment_name: gke-storage-{{ build }}
zone: us-central1-a # for remote node
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/storage-gke.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
remote_node: "{{ deployment_name }}-0"
post_deploy_tests: []
cli_deployment_vars:
diff --git a/tools/cloud-build/daily-tests/tests/hcls-v5-legacy.yml b/tools/cloud-build/daily-tests/tests/hcls-v5-legacy.yml
index 2699508885..073e773d2c 100644
--- a/tools/cloud-build/daily-tests/tests/hcls-v5-legacy.yml
+++ b/tools/cloud-build/daily-tests/tests/hcls-v5-legacy.yml
@@ -22,7 +22,7 @@ slurm_cluster_name: "hcls{{ build[0:6] }}"
zone: europe-west1-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/docs/videos/healthcare-and-life-sciences/hcls-blueprint-v5-legacy.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
login_node: "{{ slurm_cluster_name }}-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
cli_deployment_vars:
diff --git a/tools/cloud-build/daily-tests/tests/hpc-enterprise-slurm.yml b/tools/cloud-build/daily-tests/tests/hpc-enterprise-slurm.yml
index d8793672f7..26a2343334 100644
--- a/tools/cloud-build/daily-tests/tests/hpc-enterprise-slurm.yml
+++ b/tools/cloud-build/daily-tests/tests/hpc-enterprise-slurm.yml
@@ -26,7 +26,7 @@ cli_deployment_vars:
gpu_zones: "[europe-west4-a,europe-west4-b,europe-west4-c]"
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/hpc-enterprise-slurm.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard.
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
diff --git a/tools/cloud-build/daily-tests/tests/hpc-slurm-chromedesktop.yml b/tools/cloud-build/daily-tests/tests/hpc-slurm-chromedesktop.yml
index 59e7e20b5e..700a3a1807 100644
--- a/tools/cloud-build/daily-tests/tests/hpc-slurm-chromedesktop.yml
+++ b/tools/cloud-build/daily-tests/tests/hpc-slurm-chromedesktop.yml
@@ -26,7 +26,7 @@ cli_deployment_vars:
zone: "{{ zone }}"
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-chromedesktop-v5-legacy.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard.
login_node: "{{ slurm_cluster_name }}-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/htc-slurm.yml b/tools/cloud-build/daily-tests/tests/htc-slurm.yml
index a34fe41c8b..819df638bf 100644
--- a/tools/cloud-build/daily-tests/tests/htc-slurm.yml
+++ b/tools/cloud-build/daily-tests/tests/htc-slurm.yml
@@ -15,7 +15,7 @@
---
test_name: htc-slurm-v6
-deployment_name: htcv6{{ build }}
+deployment_name: htc-v6-{{ build }}
slurm_cluster_name: "htcv6{{ build[0:5] }}"
zone: us-west4-c
@@ -26,7 +26,7 @@ cli_deployment_vars:
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/htc-slurm.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, a*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image-legacy.yml b/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image-legacy.yml
index dfda8f955d..1c36c4f792 100644
--- a/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image-legacy.yml
+++ b/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image-legacy.yml
@@ -27,4 +27,4 @@ cli_deployment_vars:
region: us-west1
zone: us-west1-a
source_image_project_id: deeplearning-platform
- source_image: dlvm-tcpd-cu120-648491853-ubuntu-2004-py310
+ source_image: dlvm-tcpd-cu120-677850957-ubuntu-2004-py310
diff --git a/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image.yml b/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image.yml
index 4f55da7b40..fbce6a6029 100644
--- a/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image.yml
+++ b/tools/cloud-build/daily-tests/tests/ml-a3-highgpu-slurm-image.yml
@@ -27,4 +27,4 @@ cli_deployment_vars:
region: us-west1
zone: us-west1-a
source_image_project_id: deeplearning-platform
- source_image: dlvm-tcpd-cu120-648491853-ubuntu-2004-py310
+ source_image: dlvm-tcpd-cu120-677850957-ubuntu-2004-py310
diff --git a/tools/cloud-build/daily-tests/tests/ml-slurm.yml b/tools/cloud-build/daily-tests/tests/ml-slurm.yml
index 06fbdaa012..62ce6587b5 100644
--- a/tools/cloud-build/daily-tests/tests/ml-slurm.yml
+++ b/tools/cloud-build/daily-tests/tests/ml-slurm.yml
@@ -16,7 +16,7 @@
test_name: ml-slurm-v6
deployment_name: ml-slurm-v6-{{ build }}
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/ml-slurm.yaml"
packer_group_name: packer
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v5-debian.yml b/tools/cloud-build/daily-tests/tests/slurm-v5-debian.yml
index 002730c1ae..2a06c30571 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v5-debian.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v5-debian.yml
@@ -29,7 +29,7 @@ cli_deployment_vars:
zone: us-west4-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-ubuntu2004-v5-legacy.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v5-hpc-centos7.yml b/tools/cloud-build/daily-tests/tests/slurm-v5-hpc-centos7.yml
index d40b21d566..52400b9e66 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v5-hpc-centos7.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v5-hpc-centos7.yml
@@ -28,7 +28,7 @@ cli_deployment_vars:
zones: "[us-west4-a,us-west4-b,us-west4-c]"
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/hpc-slurm-v5-legacy.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v5-rocky8.yml b/tools/cloud-build/daily-tests/tests/slurm-v5-rocky8.yml
index 7120a56973..cb76a571b4 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v5-rocky8.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v5-rocky8.yml
@@ -29,7 +29,7 @@ cli_deployment_vars:
zone: us-west4-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-ubuntu2004-v5-legacy.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v5-ubuntu.yml b/tools/cloud-build/daily-tests/tests/slurm-v5-ubuntu.yml
index b54b5ad21d..e104f5ede2 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v5-ubuntu.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v5-ubuntu.yml
@@ -22,7 +22,7 @@ slurm_cluster_name: "ubunv5{{ build[0:4] }}"
zone: us-west4-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-ubuntu2004-v5-legacy.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v6-debian.yml b/tools/cloud-build/daily-tests/tests/slurm-v6-debian.yml
index 90338aabd9..4c34464538 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v6-debian.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v6-debian.yml
@@ -29,7 +29,7 @@ cli_deployment_vars:
zone: us-west4-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-ubuntu2004.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v6-rocky8.yml b/tools/cloud-build/daily-tests/tests/slurm-v6-rocky8.yml
index 3a9e27d5b8..688deb86db 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v6-rocky8.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v6-rocky8.yml
@@ -28,7 +28,7 @@ cli_deployment_vars:
zone: us-central1-a
workspace: /workspace
blueprint_yaml: "{{ workspace }}/examples/hpc-slurm.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, a*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v6-ssd.yml b/tools/cloud-build/daily-tests/tests/slurm-v6-ssd.yml
index acf2ecb013..fdeea5a603 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v6-ssd.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v6-ssd.yml
@@ -22,7 +22,7 @@ slurm_cluster_name: "ssdv6{{ build[0:5] }}"
zone: us-central1-a
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-local-ssd.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/slurm-v6-ubuntu.yml b/tools/cloud-build/daily-tests/tests/slurm-v6-ubuntu.yml
index 904866b3a5..768efcfadf 100644
--- a/tools/cloud-build/daily-tests/tests/slurm-v6-ubuntu.yml
+++ b/tools/cloud-build/daily-tests/tests/slurm-v6-ubuntu.yml
@@ -22,7 +22,7 @@ slurm_cluster_name: "ubunv6{{ build[0:4] }}"
zone: us-west4-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-ubuntu2004.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
# Note: Pattern matching in gcloud only supports 1 wildcard, centv5*-login-* won't work.
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
controller_node: "{{ slurm_cluster_name }}-controller"
diff --git a/tools/cloud-build/daily-tests/tests/spack-gromacs.yml b/tools/cloud-build/daily-tests/tests/spack-gromacs.yml
index c86e74eb81..75c2e7d558 100644
--- a/tools/cloud-build/daily-tests/tests/spack-gromacs.yml
+++ b/tools/cloud-build/daily-tests/tests/spack-gromacs.yml
@@ -20,7 +20,7 @@ slurm_cluster_name: "groma{{ build[0:5] }}"
zone: us-central1-c
workspace: /workspace
blueprint_yaml: "{{ workspace }}/community/examples/hpc-slurm-gromacs.yaml"
-network: "{{ test_name }}-net"
+network: "{{ deployment_name }}-net"
login_node: "{{ slurm_cluster_name }}-slurm-login-*"
# Image name to be used to filter logs from /var/log/messages for startup script.
image_name: "slurm-gcp-dev-hpc-rocky-linux-8-*"
diff --git a/tools/cloud-build/daily-tests/validate_tests_metadata.py b/tools/cloud-build/daily-tests/validate_tests_metadata.py
index c6e11fd881..16905a1f72 100644
--- a/tools/cloud-build/daily-tests/validate_tests_metadata.py
+++ b/tools/cloud-build/daily-tests/validate_tests_metadata.py
@@ -73,6 +73,9 @@ def get_blueprint(build_path: str) -> Optional[str]:
f"{BUILDS_DIR}/chrome-remote-desktop.yaml": "tools/cloud-build/daily-tests/blueprints/crd-default.yaml",
f"{BUILDS_DIR}/chrome-remote-desktop-ubuntu.yaml": "tools/cloud-build/daily-tests/blueprints/crd-ubuntu.yaml",
f"{BUILDS_DIR}/gcluster-dockerfile.yaml": "tools/cloud-build/daily-tests/blueprints/e2e.yaml",
+ f"{BUILDS_DIR}/slurm-gcp-v6-reconfig-size.yaml": "tools/python-integration-tests/blueprints/slurm-simple-reconfig.yaml",
+ f"{BUILDS_DIR}/slurm-gcp-v6-simple-job-completion.yaml": "tools/python-integration-tests/blueprints/slurm-simple.yaml",
+ f"{BUILDS_DIR}/slurm-gcp-v6-topology.yaml": "tools/python-integration-tests/blueprints/topology-test.yaml",
}
if build_path in SPECIAL_CASES:
return SPECIAL_CASES[build_path]
diff --git a/tools/cloud-build/images/cluster-toolkit-dockerfile/Dockerfile b/tools/cloud-build/images/cluster-toolkit-dockerfile/Dockerfile
index 0276bfce29..fe80cc75b1 100644
--- a/tools/cloud-build/images/cluster-toolkit-dockerfile/Dockerfile
+++ b/tools/cloud-build/images/cluster-toolkit-dockerfile/Dockerfile
@@ -28,7 +28,8 @@ RUN apt-get update && \
git \
make \
unzip \
- wget
+ wget \
+ python3-pip
# Install Terraform
ARG TERRAFORM_VERSION=1.5.2
@@ -70,6 +71,9 @@ RUN echo $PATH
RUN mkdir /out
WORKDIR /out
+RUN echo "[global]" >> /etc/pip.conf
+RUN echo "break-system-packages = true" >> /etc/pip.conf
+
# Command to execute when running the container (placeholder)
ENTRYPOINT ["gcluster"]
CMD ["--help"]
diff --git a/tools/cloud-build/images/test-runner/Dockerfile b/tools/cloud-build/images/test-runner/Dockerfile
index 0baa889e83..9139d9f4cf 100644
--- a/tools/cloud-build/images/test-runner/Dockerfile
+++ b/tools/cloud-build/images/test-runner/Dockerfile
@@ -44,6 +44,7 @@ RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - && \
pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r https://raw.githubusercontent.com/GoogleCloudPlatform/slurm-gcp/v5/scripts/requirements.txt && \
pip install --no-cache-dir ansible && \
+ pip install --no-cache-dir paramiko && \
rm -rf ~/.cache/pip/* && \
cd /workspace && \
# Add .terraformrc to $HOME to allow use of google-private
diff --git a/tools/python-integration-tests/blueprints/slurm-simple-reconfig.yaml b/tools/python-integration-tests/blueprints/slurm-simple-reconfig.yaml
new file mode 100644
index 0000000000..a9ac6d891f
--- /dev/null
+++ b/tools/python-integration-tests/blueprints/slurm-simple-reconfig.yaml
@@ -0,0 +1,58 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+blueprint_name: slurm-test
+
+vars:
+ project_id: ## Set GCP Project ID Here ##
+ deployment_name: slurm-test
+ region: us-central1
+ zone: us-central1-a
+
+deployment_groups:
+- group: primary
+ modules:
+ - id: network
+ source: modules/network/pre-existing-vpc
+
+ - id: nodeset
+ source: community/modules/compute/schedmd-slurm-gcp-v6-nodeset
+ use: [network]
+ settings:
+ bandwidth_tier: gvnic_enabled
+ machine_type: c2-standard-4
+ node_count_dynamic_max: 3
+ allow_automatic_updates: false
+
+ - id: partition
+ source: community/modules/compute/schedmd-slurm-gcp-v6-partition
+ use: [nodeset]
+ settings:
+ is_default: true
+ partition_name: compute
+
+ - id: slurm_login
+ source: community/modules/scheduler/schedmd-slurm-gcp-v6-login
+ use: [network]
+ settings:
+ machine_type: n1-standard-4
+ enable_login_public_ips: true
+
+ - id: slurm_controller
+ source: community/modules/scheduler/schedmd-slurm-gcp-v6-controller
+ use: [network, slurm_login, partition]
+ settings:
+ machine_type: n1-standard-4
+ enable_controller_public_ips: true
diff --git a/tools/python-integration-tests/blueprints/slurm-simple.yaml b/tools/python-integration-tests/blueprints/slurm-simple.yaml
new file mode 100644
index 0000000000..235674c4d2
--- /dev/null
+++ b/tools/python-integration-tests/blueprints/slurm-simple.yaml
@@ -0,0 +1,58 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+blueprint_name: slurm-test
+
+vars:
+ project_id: ## Set GCP Project ID Here ##
+ deployment_name: slurm-test
+ region: us-central1
+ zone: us-central1-a
+
+deployment_groups:
+- group: primary
+ modules:
+ - id: network
+ source: modules/network/pre-existing-vpc
+
+ - id: nodeset
+ source: community/modules/compute/schedmd-slurm-gcp-v6-nodeset
+ use: [network]
+ settings:
+ bandwidth_tier: gvnic_enabled
+ machine_type: c2-standard-4
+ node_count_dynamic_max: 5
+ allow_automatic_updates: false
+
+ - id: partition
+ source: community/modules/compute/schedmd-slurm-gcp-v6-partition
+ use: [nodeset]
+ settings:
+ is_default: true
+ partition_name: compute
+
+ - id: slurm_login
+ source: community/modules/scheduler/schedmd-slurm-gcp-v6-login
+ use: [network]
+ settings:
+ machine_type: n1-standard-4
+ enable_login_public_ips: true
+
+ - id: slurm_controller
+ source: community/modules/scheduler/schedmd-slurm-gcp-v6-controller
+ use: [network, slurm_login, partition]
+ settings:
+ machine_type: n1-standard-4
+ enable_controller_public_ips: true
diff --git a/tools/python-integration-tests/blueprints/topology-test.yaml b/tools/python-integration-tests/blueprints/topology-test.yaml
new file mode 100644
index 0000000000..acb494c801
--- /dev/null
+++ b/tools/python-integration-tests/blueprints/topology-test.yaml
@@ -0,0 +1,61 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+blueprint_name: topology-test
+
+vars:
+ project_id: ## Set GCP Project ID Here ##
+ deployment_name: topology-test
+ region: us-central1
+ zone: us-central1-a
+
+deployment_groups:
+- group: primary
+ modules:
+ - id: network
+ source: modules/network/pre-existing-vpc
+
+ - id: nodeset
+ source: community/modules/compute/schedmd-slurm-gcp-v6-nodeset
+ use: [network]
+ settings:
+ bandwidth_tier: gvnic_enabled
+ machine_type: n2-standard-4
+ node_count_dynamic_max: 0
+ node_count_static: 5
+ allow_automatic_updates: false
+ enable_placement: true
+
+ - id: partition
+ source: community/modules/compute/schedmd-slurm-gcp-v6-partition
+ use: [nodeset]
+ settings:
+ is_default: true
+ partition_name: topo
+ exclusive: false
+
+ - id: slurm_login
+ source: community/modules/scheduler/schedmd-slurm-gcp-v6-login
+ use: [network]
+ settings:
+ machine_type: n1-standard-4
+ enable_login_public_ips: true
+
+ - id: slurm_controller
+ source: community/modules/scheduler/schedmd-slurm-gcp-v6-controller
+ use: [network, slurm_login, partition]
+ settings:
+ machine_type: n1-standard-4
+ enable_controller_public_ips: true
diff --git a/tools/python-integration-tests/deployment.py b/tools/python-integration-tests/deployment.py
new file mode 100644
index 0000000000..3ed43361b9
--- /dev/null
+++ b/tools/python-integration-tests/deployment.py
@@ -0,0 +1,129 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import shutil
+import os
+import subprocess
+import yaml
+
+class Deployment:
+ def __init__(self, blueprint: str):
+ self.blueprint_yaml = blueprint
+ self.state_bucket = "daily-tests-tf-state"
+ self.project_id = None
+ self.workspace = None
+ self.instance_name = None
+ self.username = None
+ self.deployment_name = None
+ self.zone = None
+
+ def run_command(self, cmd: str, err_msg: str = None) -> subprocess.CompletedProcess:
+ res = subprocess.run(cmd, shell=True, universal_newlines=True, check=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return res
+
+ def parse_blueprint(self, file_path: str):
+ with open(file_path, 'r') as file:
+ content = yaml.safe_load(file)
+ self.deployment_name = content["vars"]["deployment_name"]
+ self.zone = content["vars"]["zone"]
+
+ def get_posixAccount_info(self):
+ # Extract the username from posixAccounts
+ result = self.run_command(f"gcloud compute os-login describe-profile --format=json").stdout
+ posixAccounts = json.loads(result)
+
+ for account in posixAccounts.get('posixAccounts', []):
+ if 'accountId' in account:
+ self.project_id = account['accountId']
+ self.username = account['username']
+
+ def set_deployment_variables(self):
+ self.workspace = os.path.abspath(os.getcwd().strip())
+ self.parse_blueprint(self.blueprint_yaml)
+ self.get_posixAccount_info()
+ self.instance_name = self.deployment_name.replace("-", "")[:10] + "-slurm-login-001"
+
+ def create_blueprint(self):
+ cmd = [
+ "./gcluster",
+ "create",
+ "-l",
+ "ERROR",
+ self.blueprint_yaml,
+ "--backend-config",
+ f"bucket={self.state_bucket}",
+ "--vars",
+ f"project_id={self.project_id}",
+ "--vars",
+ f"deployment_name={self.deployment_name}",
+ "-w"
+ ]
+
+ subprocess.run(cmd, check=True, cwd=self.workspace)
+
+ def compress_blueprint(self):
+ cmd = [
+ "tar",
+ "-czf",
+ "%s.tgz" % (self.deployment_name),
+ "%s" % (self.deployment_name),
+ ]
+
+ subprocess.run(cmd, check=True, cwd=self.workspace)
+
+ def upload_deployment(self):
+ cmd = [
+ "gsutil",
+ "cp",
+ "%s.tgz" % (self.deployment_name),
+ "gs://%s/%s/" % (self.state_bucket, self.deployment_name)
+ ]
+
+ subprocess.run(cmd, check=True, cwd=self.workspace)
+
+ def print_download_command(self):
+ print("gcloud storage cp gs://%s/%s/%s.tgz ." % (self.state_bucket, self.deployment_name, self.deployment_name))
+
+ def create_deployment_directory(self):
+ self.set_deployment_variables()
+ self.create_blueprint()
+ self.compress_blueprint()
+ self.upload_deployment()
+ self.print_download_command()
+
+ def deploy(self):
+ # Create deployment directory
+ self.create_deployment_directory()
+ cmd = [
+ "./gcluster",
+ "deploy",
+ self.deployment_name,
+ "--auto-approve"
+ ]
+
+ subprocess.run(cmd, check=True, cwd=self.workspace)
+
+ def destroy(self):
+ cmd = [
+ "./gcluster",
+ "destroy",
+ self.deployment_name,
+ "--auto-approve"
+ ]
+
+ subprocess.run(cmd, check=True, cwd=self.workspace)
+ os.remove(f"{self.deployment_name}.tgz")
+ shutil.rmtree(self.deployment_name)
diff --git a/tools/python-integration-tests/slurm_reconfig_size.py b/tools/python-integration-tests/slurm_reconfig_size.py
new file mode 100644
index 0000000000..597eac1756
--- /dev/null
+++ b/tools/python-integration-tests/slurm_reconfig_size.py
@@ -0,0 +1,41 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ssh import SSHManager
+from deployment import Deployment
+from test import SlurmTest
+import unittest
+import time
+
+class SlurmReconfigureSize(SlurmTest):
+ # Class to test simple reconfiguration
+ def __init__(self, deployment):
+ super().__init__(Deployment("tools/python-integration-tests/blueprints/slurm-simple.yaml"))
+ self.reconfig_blueprint = "tools/python-integration-tests/blueprints/slurm-simple-reconfig.yaml"
+
+ def runTest(self):
+ # Check 5 nodes are available
+ self.assert_equal(len(self.get_nodes()), 5)
+
+ self.deployment = Deployment(self.reconfig_blueprint)
+ self.deployment.deploy()
+
+ # Wait 90 seconds for reconfig
+ time.sleep(90)
+
+ # Check 3 nodes are available
+ self.assert_equal(len(self.get_nodes()), 3)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/python-integration-tests/slurm_simple_job_completion.py b/tools/python-integration-tests/slurm_simple_job_completion.py
new file mode 100644
index 0000000000..702f337e96
--- /dev/null
+++ b/tools/python-integration-tests/slurm_simple_job_completion.py
@@ -0,0 +1,78 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ssh import SSHManager
+from deployment import Deployment
+from test import SlurmTest
+import unittest
+import time
+import json
+
+class SlurmSimpleJobCompletionTest(SlurmTest):
+ # Class to test simple slurm job completion
+ def __init__(self, deployment):
+ super().__init__(Deployment("tools/python-integration-tests/blueprints/slurm-simple.yaml"))
+ self.job_list = {}
+
+ def runTest(self):
+ # Submits 5 jobs and checks if they are successful.
+ for i in range(5):
+ self.submit_job('sbatch -N 1 --wrap "sleep 20"')
+ self.monitor_squeue()
+
+ for job_id in self.job_list.keys():
+ result = self.is_job_complete(job_id)
+ self.assert_equal(True, result, f"Something went wrong with JobID:{job_id}.")
+ print(f"JobID {job_id} finished successfully.")
+
+ def monitor_squeue(self):
+ # Monitors squeue and updates self.job_list until all running jobs are complete.
+ lines = []
+
+ while True:
+ stdin, stdout, stderr = self.ssh_client.exec_command('squeue')
+
+ lines = stdout.read().decode().splitlines()[1:] # Skip header
+
+ if not lines:
+ break
+ for line in lines:
+ parts = line.split()
+ job_id, partition, _, _, state, times, nodes, nodelist = line.split()
+
+ if job_id not in self.job_list:
+ print(f"Job id {job_id} is not recognized.")
+ else:
+ self.job_list[job_id].update({
+ "partition": partition,
+ "state": state,
+ "time": times,
+ "nodes": nodes,
+ "nodelist": nodelist,
+ })
+ time.sleep(5)
+
+ def is_job_complete(self, job_id: str):
+ # Checks if a job successfully completed.
+ stdin, stdout, stderr = self.ssh_client.exec_command(f'scontrol show job {job_id} --json')
+ content = json.load(stdout)
+ return content["jobs"][0]["job_state"][0] == "COMPLETED"
+
+ def submit_job(self, cmd: str):
+ stdin, stdout, stderr = self.ssh_client.exec_command(cmd)
+ jobID = stdout.read().decode().split()[-1]
+ self.job_list[jobID] = {}
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/python-integration-tests/slurm_topology.py b/tools/python-integration-tests/slurm_topology.py
new file mode 100644
index 0000000000..6b1499d2fd
--- /dev/null
+++ b/tools/python-integration-tests/slurm_topology.py
@@ -0,0 +1,59 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ssh import SSHManager
+from deployment import Deployment
+from test import SlurmTest
+from collections import defaultdict
+import unittest
+import logging
+
+class SlurmTopologyTest(SlurmTest):
+ # Class to test Slurm topology
+ def __init__(self, deployment):
+ super().__init__(Deployment("tools/python-integration-tests/blueprints/topology-test.yaml"))
+
+ def runTest(self):
+ # Checks isomorphism of last layer of nodes to determine topology.
+ r_rack, s_rack = defaultdict(set), defaultdict(set)
+ nodes = self.get_nodes()
+
+ for node in nodes:
+ r_rack[self.get_real_rack(node)].add(node)
+ s_rack[self.get_slurm_rack(node)].add(node)
+
+ r_rack_set = [set(v) for v in r_rack.values()]
+ s_rack_set = [set(v) for v in s_rack.values()]
+
+ self.assert_equal(r_rack_set, s_rack_set, "The two sets did not match.")
+
+ def get_slurm_topology(self):
+ stdin, stdout, stderr = self.ssh_client.exec_command("scontrol show topo")
+ return stdout.read().decode()
+
+ def get_node_depth(self, switch_name: str):
+ return switch_name.count("_")
+
+ def get_real_rack(self, node: str):
+ result = self.run_command(f"gcloud compute instances describe {node} --zone={self.deployment.zone} --project={self.deployment.project_id} --format='value(resourceStatus.physicalHost)'")
+ return result.stdout.split("/")[1]
+
+ def get_slurm_rack(self, node: str):
+ stdin, stdout, stderr = self.ssh_client.exec_command(f"scontrol show topology {node} | tail -1 | cut -d' ' -f1")
+ switch_name = stdout.read().decode()
+ self.assert_equal(self.get_node_depth(switch_name), 2, f"{node} does not have the expected topology depth of 2."),
+ return switch_name
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/python-integration-tests/ssh.py b/tools/python-integration-tests/ssh.py
new file mode 100644
index 0000000000..b051020ad6
--- /dev/null
+++ b/tools/python-integration-tests/ssh.py
@@ -0,0 +1,79 @@
+# Copyright 2024 "Google LLC"
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import time
+import paramiko
+
+class SSHManager:
+ # Manages tunnel and SSH connection.
+ _instance = None
+
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ cls._instance = super(SSHManager, cls).__new__(cls)
+ return cls._instance
+
+ def __init__(self):
+ if not hasattr(self, 'ssh_client'):
+ self.tunnel = None
+ self.key = None
+ self.ssh_client = None
+
+ def run_command(self, cmd: str) -> subprocess.CompletedProcess:
+ res = subprocess.run(cmd, text=True, check=True, capture_output=True)
+
+ def create_tunnel(self, instance_name, port, project_id, zone):
+ iap_tunnel_cmd = [
+ "gcloud", "compute", "start-iap-tunnel", instance_name,
+ "22", "--project", project_id, "--zone", zone,
+ f"--local-host-port=localhost:{port}"
+ ]
+
+ self.tunnel = subprocess.Popen(iap_tunnel_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # Sleep to give the tunnel a few seconds to set up
+ time.sleep(3)
+
+ def get_keypath(self):
+ key_path = os.path.expanduser("~/.ssh/google_compute_engine")
+ os.makedirs(os.path.dirname(key_path), exist_ok=True)
+
+ self.run_command(["ssh-keygen", "-t", "rsa", "-f", key_path, "-N", ""])
+
+ # Add the public key to OS Login
+ public_key_path = key_path + ".pub"
+ self.run_command(["gcloud", "compute", "os-login", "ssh-keys", "add", "--key-file", public_key_path, "--ttl", "60m"])
+
+ return key_path
+
+ def setup_connection(self, instance_name, port, project_id, zone):
+ self.ssh_client = paramiko.SSHClient()
+ self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.key = paramiko.RSAKey.from_private_key_file(self.get_keypath())
+ self.create_tunnel(instance_name, port, project_id, zone)
+
+ def close(self):
+ # Closes existing SSH connection and tunnel
+ if self.ssh_client:
+ self.ssh_client.close()
+ if self.tunnel:
+ self.tunnel.terminate()
+ time.sleep(1) # give a second to terminate
+ if self.tunnel.poll() is None:
+ self.tunnel.kill() # kill leftover process if still running
+ self.tunnel.stdout.close()
+ self.tunnel.stderr.close()
+ self.tunnel = None
diff --git a/tools/python-integration-tests/test.py b/tools/python-integration-tests/test.py
index 00412f7d7b..a02fa6ed65 100644
--- a/tools/python-integration-tests/test.py
+++ b/tools/python-integration-tests/test.py
@@ -13,280 +13,57 @@
# limitations under the License.
import json
-import logging
-import shutil
-import os
-import re
-import signal
-import socket
import subprocess
-import sys
import time
-import paramiko
-from collections import defaultdict
-import argparse
-import yaml
+import unittest
+from ssh import SSHManager
+from deployment import Deployment
-def run_command(cmd: str, err_msg: str = None) -> subprocess.CompletedProcess:
- res = subprocess.run(cmd, shell=True, universal_newlines=True, check=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if res.returncode != 0:
- raise subprocess.SubprocessError(f"{err_msg}:\n{res.stderr}")
-
- return res
-
-def parse_blueprint(file_path: str):
- with open(file_path, 'r') as file:
- content = yaml.safe_load(file)
- return content["vars"]["deployment_name"], content["vars"]["zone"]
-
-def get_account_info():
- # Extract the username from posixAccounts
- result = run_command(f"gcloud compute os-login describe-profile --format=json").stdout
- posixAccounts = json.loads(result)
-
- for account in posixAccounts.get('posixAccounts', []):
- if 'accountId' in account:
- project_id = account['accountId']
- username = account['username']
- return project_id, username
-
-def create_deployment(blueprint: str):
- project_id, username = get_account_info()
- deployment_name, zone = parse_blueprint(blueprint)
- return Deployment(blueprint, project_id, username, deployment_name, zone)
-
-def test_simple_job_completion(blueprint: str):
- deployment = create_deployment(blueprint)
- deployment.deploy()
- try:
- # Waiting to let the login node finish set up or ssh will fail.
- print("Wait 60 seconds")
- time.sleep(60)
-
- ssh = deployment.ssh()
- test = Test(ssh, deployment)
- test.check_simple_job_completion()
- finally:
- deployment.close_tunnel()
- deployment.destroy()
-
-def test_topology(blueprint: str):
- deployment = create_deployment(blueprint)
- deployment.deploy()
- try:
- # Waiting to let the login node finish set up or ssh will fail.
- print("Wait 60 seconds")
- time.sleep(60)
- ssh = deployment.ssh()
- test = Test(ssh, deployment)
- test.check_topology()
- finally:
- deployment.close_tunnel()
- deployment.destroy()
-
-class Deployment:
- def __init__(self, blueprint: str, project_id: str, username: str, deployment_name: str, zone: str):
- self.blueprint_yaml = blueprint
- self.project_id = project_id
- self.state_bucket = "daily-tests-tf-state"
- self.workspace = ""
- self.username = username
- self.deployment_name = deployment_name
- self.zone = zone
- self.test_name = deployment_name
- self.tunnel = None
-
- def get_workspace(self):
- return os.path.abspath(os.getcwd().strip())
-
- def create_blueprint(self):
- self.workspace = self.get_workspace()
-
- cmd = [
- "./gcluster",
- "create",
- "-l",
- "ERROR",
- self.blueprint_yaml,
- "--backend-config",
- f"bucket={self.state_bucket}",
- "--vars",
- f"project_id={self.project_id}",
- "--vars",
- f"deployment_name={self.deployment_name}"
- ]
-
- subprocess.run(cmd, check=True, cwd=self.workspace)
-
- def compress_blueprint(self):
- cmd = [
- "tar",
- "-czf",
- "%s.tgz" % (self.deployment_name),
- "%s" % (self.deployment_name),
- ]
-
- subprocess.run(cmd, check=True, cwd=self.workspace)
-
- def upload_deployment(self):
- cmd = [
- "gsutil",
- "cp",
- "%s.tgz" % (self.deployment_name),
- "gs://%s/%s/" % (self.state_bucket, self.test_name)
- ]
-
- subprocess.run(cmd, check=True, cwd=self.workspace)
-
- def print_download_command(self):
- print("gcloud storage cp gs://%s/%s/%s.tgz ." % (self.state_bucket, self.test_name, self.deployment_name))
-
- def create_deployment_directory(self):
- self.create_blueprint()
- self.compress_blueprint()
- self.upload_deployment()
- self.print_download_command()
-
- def deploy(self):
- # Create deployment directory
- self.create_deployment_directory()
- cmd = [
- "./gcluster",
- "deploy",
- self.deployment_name,
- "--auto-approve"
- ]
-
- subprocess.run(cmd, check=True, cwd=self.workspace)
-
- def ssh(self) -> paramiko.SSHClient:
- instance_name = self.deployment_name.replace("-", "")[:10] + "-slurm-login-001"
-
- # Use existing SSH key pair (assuming it's already in ~/.ssh/google_compute_engine)
- key_path = os.path.expanduser("~/.ssh/google_compute_engine")
-
- # Add the public key to OS Login
- public_key_path = key_path + ".pub"
- subprocess.run(
- [
- "gcloud", "compute", "os-login", "ssh-keys", "describe",
- "--key-file", public_key_path
- ],
- check=True, capture_output=True
- )
-
- # Construct the gcloud command to create the IAP tunnel
- iap_tunnel_cmd = [
- "gcloud", "compute", "start-iap-tunnel", instance_name,
- "22", "--project", self.project_id, "--zone", self.zone,
- "--local-host-port=localhost:10022"
- ]
-
- # Create the IAP tunnel process
- self.tunnel = subprocess.Popen(iap_tunnel_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- # Sleep to give the tunnel a few seconds to set up
- time.sleep(3)
-
- # Create an SSH client
- ssh = paramiko.SSHClient()
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
- # Load the private key
- key = paramiko.RSAKey.from_private_key_file(key_path)
-
- # Connect to the VM
- ssh.connect("localhost", port=10022, username=self.username, pkey=key)
-
- return ssh
-
- def close_tunnel(self):
- if self.tunnel:
- self.tunnel.terminate()
- self.tunnel.wait()
- self.tunnel = None
-
- def destroy(self):
- cmd = [
- "./gcluster",
- "destroy",
- self.deployment_name,
- "--auto-approve"
- ]
-
- subprocess.run(cmd, check=True, cwd=self.workspace)
- os.remove(f"{self.deployment_name}.tgz")
- shutil.rmtree(self.deployment_name)
-
-
-class Test:
- def __init__(self, ssh, deployment):
- self.ssh = ssh
+class Test(unittest.TestCase): # Inherit from unittest.TestCase
+ def __init__(self, deployment):
+ super().__init__() # Call the superclass constructor
self.deployment = deployment
- self.job_list = {}
-
- def get_slurm_topology(self):
- stdin, stdout, stderr = self.ssh.exec_command("scontrol show topo")
- return stdout.read().decode()
-
- def monitor_squeue(self):
- # Monitors squeue and updates self.job_list until all running jobs are complete.
- lines = []
+ self.ssh_manager = None
+ self.ssh_client = None
- while True:
- stdin, stdout, stderr = self.ssh.exec_command('squeue')
-
- lines = stdout.read().decode().splitlines()[1:] # Skip header
+ def run_command(self, cmd: str) -> subprocess.CompletedProcess:
+ res = subprocess.run(cmd, shell=True, text=True, check=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return res
- if not lines:
- break
- for line in lines:
- parts = line.split()
- job_id, partition, _, _, state, times, nodes, nodelist = line.split()
+ def setUp(self):
+ self.addCleanup(self.clean_up)
+ self.deployment.deploy()
+ time.sleep(90)
- if job_id not in self.job_list:
- print(f"Job id {job_id} is not recognized.")
- else:
- self.job_list[job_id].update({
- "partition": partition,
- "state": state,
- "time": times,
- "nodes": nodes,
- "nodelist": nodelist,
- })
- time.sleep(5)
+ def clean_up(self):
+ self.deployment.destroy()
- def is_job_complete(self, job_id: str):
- # Checks if a job successfully completed.
- stdin, stdout, stderr = self.ssh.exec_command(f'scontrol show job {job_id} --json')
- content = json.load(stdout)
- return content["jobs"][0]["job_state"][0] == "COMPLETED"
+class SlurmTest(Test):
+ # Base class for Slurm-specific tests.
+ def ssh(self, hostname):
+ self.ssh_manager = SSHManager()
+ self.ssh_manager.setup_connection(hostname, 10022, self.deployment.project_id, self.deployment.zone)
+ self.ssh_client = self.ssh_manager.ssh_client
+ self.ssh_client.connect("localhost", 10022, username=self.deployment.username, pkey=self.ssh_manager.key)
- def submit_job(self, cmd: str):
- stdin, stdout, stderr = self.ssh.exec_command(cmd)
- jobID = stdout.read().decode().split()[-1]
- self.job_list[jobID] = {}
+ def close_ssh(self):
+ self.ssh_manager.close()
- def get_node_depth(self, switch_name: str):
- return switch_name.count("_")
+ def setUp(self):
+ try:
+ super().setUp()
+ hostname = self.get_login_node()
+ self.ssh(hostname)
+ except Exception as err:
+ self.fail(f"Unexpected error encountered. stderr: {err.stderr}")
- def get_real_rack(self, node: str):
- result = run_command(f"gcloud compute instances describe {node} --zone={self.deployment.zone} --project={self.deployment.project_id} --format='value(resourceStatus.physicalHost)'")
- return result.stdout.split("/")[1]
-
- def get_slurm_rack(self, node: str):
- stdin, stdout, stderr = self.ssh.exec_command(f"scontrol show topology {node} | tail -1 | cut -d' ' -f1")
- switch_name = stdout.read().decode()
- self.assert_equal(self.get_node_depth(switch_name), 2, f"{node} does not have the expected topology depth of 2."),
- return switch_name
+ def clean_up(self):
+ super().clean_up()
+ self.close_ssh()
- def get_nodes(self):
- nodes = []
- stdin, stdout, stderr = self.ssh.exec_command("scontrol show node| grep NodeName")
- for line in stdout.read().decode().splitlines():
- nodes.append(line.split()[0].split("=")[1])
- return nodes
+ def get_login_node(self):
+ return self.deployment.deployment_name.replace("-", "")[:10] + "-slurm-login-001"
def assert_equal(self, value1, value2, message=None):
if value1 != value2:
@@ -294,51 +71,9 @@ def assert_equal(self, value1, value2, message=None):
message = f"Assertion failed: {value1} != {value2}"
raise AssertionError(message)
- def check_simple_job_completion(self):
- # Submits 5 jobs and checks if they are successful.
- for i in range(5):
- self.submit_job('sbatch -N 1 --wrap "sleep 20"')
- self.monitor_squeue()
-
- for job_id in self.job_list.keys():
- result = self.is_job_complete(job_id)
- self.assert_equal(True, result, f"Something went wrong with JobID:{job_id}.")
- print(f"JobID {job_id} finished successfully.")
-
- def check_topology(self):
- # Checks isomorphism of last layer of nodes to determine topology.
- r_rack, s_rack = defaultdict(set), defaultdict(set)
- nodes = self.get_nodes()
-
- for node in nodes:
- r_rack[self.get_real_rack(node)].add(node)
- s_rack[self.get_slurm_rack(node)].add(node)
-
- r_rack_set = [set(v) for v in r_rack.values()]
- s_rack_set = [set(v) for v in s_rack.values()]
-
- self.assert_equal(r_rack_set, s_rack_set, "The two sets did not match.")
-
-def main(simple_test_blueprints, topo_test_blueprints) -> None:
- if simple_test_blueprints:
- for blueprint in simple_test_blueprints:
- test_simple_job_completion(blueprint)
- print(f'{blueprint} passed simple slurm test.')
-
- if topo_test_blueprints:
- for blueprint in topo_test_blueprints:
- test_topology(blueprint)
- print(f'{blueprint} passed topology test.')
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- prog='test.py',
- description="",
- formatter_class=argparse.RawTextHelpFormatter
- )
- parser.add_argument("--simple", nargs="+", help="File path(s) to blueprint(s) to do the simple slurm test on.")
- parser.add_argument("--topo", nargs="+", help="File path(s) to blueprint(s) to do the topology test on.")
-
- args = parser.parse_args()
-
- main(args.simple, args.topo)
+ def get_nodes(self):
+ nodes = []
+ stdin, stdout, stderr = self.ssh_client.exec_command("scontrol show node| grep NodeName")
+ for line in stdout.read().decode().splitlines():
+ nodes.append(line.split()[0].split("=")[1])
+ return nodes
diff --git a/tools/validate_configs/golden_copies/expectations/igc_pkr/.ghpc/artifacts/expanded_blueprint.yaml b/tools/validate_configs/golden_copies/expectations/igc_pkr/.ghpc/artifacts/expanded_blueprint.yaml
index 1db9c66495..dd66cf7aa1 100644
--- a/tools/validate_configs/golden_copies/expectations/igc_pkr/.ghpc/artifacts/expanded_blueprint.yaml
+++ b/tools/validate_configs/golden_copies/expectations/igc_pkr/.ghpc/artifacts/expanded_blueprint.yaml
@@ -38,14 +38,14 @@ deployment_groups:
terraform_providers:
google:
source: hashicorp/google
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
zone: ((var.zone))
google-beta:
source: hashicorp/google-beta
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
diff --git a/tools/validate_configs/golden_copies/expectations/igc_pkr/zero/versions.tf b/tools/validate_configs/golden_copies/expectations/igc_pkr/zero/versions.tf
index ed7b1bb3ba..fab3c44cd0 100644
--- a/tools/validate_configs/golden_copies/expectations/igc_pkr/zero/versions.tf
+++ b/tools/validate_configs/golden_copies/expectations/igc_pkr/zero/versions.tf
@@ -20,11 +20,11 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
google-beta = {
source = "hashicorp/google-beta"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
}
}
diff --git a/tools/validate_configs/golden_copies/expectations/igc_tf/.ghpc/artifacts/expanded_blueprint.yaml b/tools/validate_configs/golden_copies/expectations/igc_tf/.ghpc/artifacts/expanded_blueprint.yaml
index fd6bd3e490..1906e9a832 100644
--- a/tools/validate_configs/golden_copies/expectations/igc_tf/.ghpc/artifacts/expanded_blueprint.yaml
+++ b/tools/validate_configs/golden_copies/expectations/igc_tf/.ghpc/artifacts/expanded_blueprint.yaml
@@ -44,14 +44,14 @@ deployment_groups:
terraform_providers:
google:
source: hashicorp/google
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
zone: ((var.zone))
google-beta:
source: hashicorp/google-beta
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
@@ -80,14 +80,14 @@ deployment_groups:
terraform_providers:
google:
source: hashicorp/google
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
zone: ((var.zone))
google-beta:
source: hashicorp/google-beta
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
diff --git a/tools/validate_configs/golden_copies/expectations/igc_tf/one/versions.tf b/tools/validate_configs/golden_copies/expectations/igc_tf/one/versions.tf
index ed7b1bb3ba..fab3c44cd0 100644
--- a/tools/validate_configs/golden_copies/expectations/igc_tf/one/versions.tf
+++ b/tools/validate_configs/golden_copies/expectations/igc_tf/one/versions.tf
@@ -20,11 +20,11 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
google-beta = {
source = "hashicorp/google-beta"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
}
}
diff --git a/tools/validate_configs/golden_copies/expectations/igc_tf/zero/versions.tf b/tools/validate_configs/golden_copies/expectations/igc_tf/zero/versions.tf
index ed7b1bb3ba..fab3c44cd0 100644
--- a/tools/validate_configs/golden_copies/expectations/igc_tf/zero/versions.tf
+++ b/tools/validate_configs/golden_copies/expectations/igc_tf/zero/versions.tf
@@ -20,11 +20,11 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
google-beta = {
source = "hashicorp/google-beta"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
}
}
diff --git a/tools/validate_configs/golden_copies/expectations/merge_flatten/.ghpc/artifacts/expanded_blueprint.yaml b/tools/validate_configs/golden_copies/expectations/merge_flatten/.ghpc/artifacts/expanded_blueprint.yaml
index 208cdde2ac..15a203a4b5 100644
--- a/tools/validate_configs/golden_copies/expectations/merge_flatten/.ghpc/artifacts/expanded_blueprint.yaml
+++ b/tools/validate_configs/golden_copies/expectations/merge_flatten/.ghpc/artifacts/expanded_blueprint.yaml
@@ -39,14 +39,14 @@ deployment_groups:
terraform_providers:
google:
source: hashicorp/google
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
zone: ((var.zone))
google-beta:
source: hashicorp/google-beta
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
diff --git a/tools/validate_configs/golden_copies/expectations/merge_flatten/zero/versions.tf b/tools/validate_configs/golden_copies/expectations/merge_flatten/zero/versions.tf
index ed7b1bb3ba..fab3c44cd0 100644
--- a/tools/validate_configs/golden_copies/expectations/merge_flatten/zero/versions.tf
+++ b/tools/validate_configs/golden_copies/expectations/merge_flatten/zero/versions.tf
@@ -20,11 +20,11 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
google-beta = {
source = "hashicorp/google-beta"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
}
}
diff --git a/tools/validate_configs/golden_copies/expectations/versioned_blueprint/.ghpc/artifacts/expanded_blueprint.yaml b/tools/validate_configs/golden_copies/expectations/versioned_blueprint/.ghpc/artifacts/expanded_blueprint.yaml
index d8414f6db3..0a51078be5 100644
--- a/tools/validate_configs/golden_copies/expectations/versioned_blueprint/.ghpc/artifacts/expanded_blueprint.yaml
+++ b/tools/validate_configs/golden_copies/expectations/versioned_blueprint/.ghpc/artifacts/expanded_blueprint.yaml
@@ -47,14 +47,14 @@ deployment_groups:
terraform_providers:
google:
source: hashicorp/google
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
zone: ((var.zone))
google-beta:
source: hashicorp/google-beta
- version: ~> 6.10.0
+ version: ~> 6.13.0
configuration:
project: ((var.project_id))
region: ((var.region))
diff --git a/tools/validate_configs/golden_copies/expectations/versioned_blueprint/primary/versions.tf b/tools/validate_configs/golden_copies/expectations/versioned_blueprint/primary/versions.tf
index ed7b1bb3ba..fab3c44cd0 100644
--- a/tools/validate_configs/golden_copies/expectations/versioned_blueprint/primary/versions.tf
+++ b/tools/validate_configs/golden_copies/expectations/versioned_blueprint/primary/versions.tf
@@ -20,11 +20,11 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
google-beta = {
source = "hashicorp/google-beta"
- version = "~> 6.10.0"
+ version = "~> 6.13.0"
}
}
}