diff --git a/manifests/crd.yaml b/manifests/crd.yaml index 0bfa3af076..4c5e4106bc 100644 --- a/manifests/crd.yaml +++ b/manifests/crd.yaml @@ -74,13 +74,647 @@ spec: description: TidbClusterSpec describes the attributes that a user creates on a tidb cluster properties: + affinity: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Node affinity is a group of node affinity scheduling + rules. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union of the results + of one or more label queries over a set of nodes; that is, + it represents the OR of the selectors represented by the node + selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Pod affinity is a group of inter pod affinity scheduling + rules. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label selector is a label query over a + set of resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. + A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label selector is a label query over a + set of resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. + A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + description: Base annotations of TiDB cluster Pods, components may add + or override selectors upon this respectively + type: object enablePVReclaim: + description: Whether enable PVC reclaim for orphan PVC left by statefulset + scale-in type: boolean enableTLSCluster: description: Enable TLS connection between TiDB server components type: boolean + helper: + description: HelperSpec contains details of helper component + properties: + image: + description: Image used to tail slow log and set kernel parameters + if necessary, must have `tail` and `sysctl` installed + type: string + imagePullPolicy: + description: ImagePullPolicy of the component. Override the cluster-level + imagePullPolicy if present + type: string + type: object + hostNetwork: + description: Whether Hostnetwork is enabled for TiDB cluster Pods + type: boolean + imagePullPolicy: + description: ImagePullPolicy of TiDB cluster Pods + type: string + nodeSelector: + description: Base node selectors of TiDB cluster Pods, components may + add or override selectors upon this respectively + type: object pd: description: PDSpec contains details of PD members + properties: + service: + properties: + annotations: + description: Additional annotations of the kubernetes service + object + type: object + loadBalancerIP: + description: LoadBalancerIP is the loadBalancerIP of service + type: string + type: + description: Type of the real kubernetes service, e.g. ClusterIP + type: string + type: object + storageClassName: + type: string + type: object + priorityClassName: + description: PriorityClassName of TiDB cluster Pods + type: string + pump: + description: PumpSpec contains details of Pump members properties: replicas: format: int32 @@ -91,13 +725,17 @@ spec: - replicas type: object pvReclaimPolicy: + description: Persistent volume reclaim policy applied to the PVs that + consumed by TiDB cluster type: string schedulerName: + description: SchedulerName of TiDB cluster Pods type: string services: description: Services list non-headless services type used in TidbCluster + Deprecated items: - description: Service represent service type used in TidbCluster + description: Deprecated Service represent service type used in TidbCluster properties: name: type: string @@ -115,14 +753,40 @@ spec: maxFailoverCount: format: int32 type: integer + plugins: + description: Plugins is a list of plugins that are loaded by TiDB + server, empty means plugin disabled + items: + type: string + type: array replicas: format: int32 type: integer separateSlowLog: type: boolean - slowLogTailer: - description: TiDBSlowLogTailerSpec represents an optional log tailer - sidecar with TiDB + service: + properties: + ServiceSpec: + properties: + annotations: + description: Additional annotations of the kubernetes service + object + type: object + loadBalancerIP: + description: LoadBalancerIP is the loadBalancerIP of service + type: string + type: + description: Type of the real kubernetes service, e.g. ClusterIP + type: string + type: object + exposeStatus: + description: Whether expose the status port + type: boolean + externalTrafficPolicy: + description: ExternalTrafficPolicy of the service + type: string + required: + - ServiceSpec type: object storageClassName: type: string @@ -137,15 +801,68 @@ spec: type: integer privileged: type: boolean - replicas: - format: int32 - type: integer + service: + properties: + annotations: + description: Additional annotations of the kubernetes service + object + type: object + loadBalancerIP: + description: LoadBalancerIP is the loadBalancerIP of service + type: string + type: + description: Type of the real kubernetes service, e.g. ClusterIP + type: string + type: object storageClassName: type: string - required: - - replicas type: object timezone: + description: Time zone of TiDB cluster Pods + type: string + tolerations: + description: Base tolerations of TiDB cluster Pods, components may add + more tolreations upon this respectively + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + version: + description: TiDB cluster version type: string type: object type: object diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go index 6ee090fcbb..295c4347b2 100644 --- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go +++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go @@ -33,15 +33,21 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleList": schema_pkg_apis_pingcap_v1alpha1_BackupScheduleList(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleSpec": schema_pkg_apis_pingcap_v1alpha1_BackupScheduleSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec": schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref), + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ComponentSpec": schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider": schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref), + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec": schema_pkg_apis_pingcap_v1alpha1_HelperSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec": schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref), + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec": schema_pkg_apis_pingcap_v1alpha1_PumpSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ResourceRequirement": schema_pkg_apis_pingcap_v1alpha1_ResourceRequirement(ref), + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Resources": schema_pkg_apis_pingcap_v1alpha1_Resources(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Restore": schema_pkg_apis_pingcap_v1alpha1_Restore(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.RestoreList": schema_pkg_apis_pingcap_v1alpha1_RestoreList(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.RestoreSpec": schema_pkg_apis_pingcap_v1alpha1_RestoreSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider": schema_pkg_apis_pingcap_v1alpha1_S3StorageProvider(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Service": schema_pkg_apis_pingcap_v1alpha1_Service(ref), + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec": schema_pkg_apis_pingcap_v1alpha1_ServiceSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageProvider": schema_pkg_apis_pingcap_v1alpha1_StorageProvider(ref), + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref), "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec": schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref), @@ -592,6 +598,125 @@ func schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref common.ReferenceCallback) c } } +func schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ComponentSpec is the base spec of each component, the fields should always accessed by the BasicSpec() method to respect the cluster-level properties", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Image of the component, override baseImage and version if present Deprecated", + Type: []string{"string"}, + Format: "", + }, + }, + "baseImage": { + SchemaProps: spec.SchemaProps{ + Description: "Base image of the component, e.g. pingcap/tidb, image tag is now allowed during validation", + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version of the component. Override the cluster-level version if non-empty", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present", + Type: []string{"string"}, + Format: "", + }, + }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present", + Type: []string{"boolean"}, + Format: "", + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "Affinity of the component. Override the cluster-level one if present", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName of the component. Override the cluster-level one if present", + Type: []string{"string"}, + Format: "", + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "SchedulerName of the component. Override the cluster-level one if present", + Type: []string{"string"}, + Format: "", + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "podSecurityContext": { + SchemaProps: spec.SchemaProps{ + Description: "PodSecurityContext of the component", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + } +} + func schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -655,6 +780,33 @@ func schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref common.ReferenceCal } } +func schema_pkg_apis_pingcap_v1alpha1_HelperSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HelperSpec contains details of helper component", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Image used to tail slow log and set kernel parameters if necessary, must have `tail` and `sysctl` installed", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -662,10 +814,9 @@ func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) commo Description: "PDSpec contains details of PD members", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "replicas": { + "service": { SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int32", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"), }, }, "storageClassName": { @@ -675,6 +826,33 @@ func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) commo }, }, }, + }, + }, + Dependencies: []string{ + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"}, + } +} + +func schema_pkg_apis_pingcap_v1alpha1_PumpSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PumpSpec contains details of Pump members", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "storageClassName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, Required: []string{"replicas"}, }, }, @@ -715,6 +893,32 @@ func schema_pkg_apis_pingcap_v1alpha1_ResourceRequirement(ref common.ReferenceCa } } +func schema_pkg_apis_pingcap_v1alpha1_Resources(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "requests": { + SchemaProps: spec.SchemaProps{ + Description: "Resource requests of the component", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ResourceRequirement"), + }, + }, + "limits": { + SchemaProps: spec.SchemaProps{ + Description: "Resource limits of the component", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ResourceRequirement"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ResourceRequirement"}, + } +} + func schema_pkg_apis_pingcap_v1alpha1_Restore(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -915,7 +1119,7 @@ func schema_pkg_apis_pingcap_v1alpha1_Service(ref common.ReferenceCallback) comm return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Service represent service type used in TidbCluster", + Description: "Deprecated Service represent service type used in TidbCluster", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -936,6 +1140,47 @@ func schema_pkg_apis_pingcap_v1alpha1_Service(ref common.ReferenceCallback) comm } } +func schema_pkg_apis_pingcap_v1alpha1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of the real kubernetes service, e.g. ClusterIP", + Type: []string{"string"}, + Format: "", + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Additional annotations of the kubernetes service object", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "loadBalancerIP": { + SchemaProps: spec.SchemaProps{ + Description: "LoadBalancerIP is the loadBalancerIP of service", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_pingcap_v1alpha1_StorageProvider(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -961,12 +1206,62 @@ func schema_pkg_apis_pingcap_v1alpha1_StorageProvider(ref common.ReferenceCallba } } +func schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ServiceSpec": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"), + }, + }, + "externalTrafficPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "ExternalTrafficPolicy of the service", + Type: []string{"string"}, + Format: "", + }, + }, + "exposeStatus": { + SchemaProps: spec.SchemaProps{ + Description: "Whether expose the status port", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"ServiceSpec"}, + }, + }, + Dependencies: []string{ + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"}, + } +} + func schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB", Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Image used for slowlog tailer Deprecated, use TidbCluster.HelperImage instead", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Deprecated, use TidbCluster.HelperImagePullPolicy instead", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, } @@ -985,10 +1280,9 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com Format: "int32", }, }, - "storageClassName": { + "service": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec"), }, }, "binlogEnabled": { @@ -1009,9 +1303,10 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com Format: "", }, }, - "slowLogTailer": { + "storageClassName": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec"), + Type: []string{"string"}, + Format: "", }, }, "enableTLSClient": { @@ -1020,12 +1315,26 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com Format: "", }, }, + "plugins": { + SchemaProps: spec.SchemaProps{ + Description: "Plugins is a list of plugins that are loaded by TiDB server, empty means plugin disabled", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"replicas"}, }, }, Dependencies: []string{ - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec"}, + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec"}, } } @@ -1036,10 +1345,9 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com Description: "TiKVSpec contains details of TiKV members", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "replicas": { + "service": { SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int32", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"), }, }, "privileged": { @@ -1061,9 +1369,10 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com }, }, }, - Required: []string{"replicas"}, }, }, + Dependencies: []string{ + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"}, } } @@ -1152,30 +1461,39 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba Description: "TidbClusterSpec describes the attributes that a user creates on a tidb cluster", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, "pd": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec"), + Description: "PD cluster spec", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec"), }, }, "tidb": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec"), + Description: "TiDB cluster spec", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec"), }, }, "tikv": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec"), + Description: "TiKV cluster spec", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec"), + }, + }, + "pump": { + SchemaProps: spec.SchemaProps{ + Description: "Pump cluster spec", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec"), + }, + }, + "helper": { + SchemaProps: spec.SchemaProps{ + Description: "Helper spec", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec"), }, }, "services": { SchemaProps: spec.SchemaProps{ - Description: "Services list non-headless services type used in TidbCluster", + Description: "Services list non-headless services type used in TidbCluster Deprecated", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -1186,36 +1504,123 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba }, }, }, + "version": { + SchemaProps: spec.SchemaProps{ + Description: "TiDB cluster version", + Type: []string{"string"}, + Format: "", + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "SchedulerName of TiDB cluster Pods", + Type: []string{"string"}, + Format: "", + }, + }, "pvReclaimPolicy": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Description: "Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster", + Type: []string{"string"}, + Format: "", }, }, "enablePVReclaim": { SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", + Description: "Whether enable PVC reclaim for orphan PVC left by statefulset scale-in", + Type: []string{"boolean"}, + Format: "", + }, + }, + "enableTLSCluster": { + SchemaProps: spec.SchemaProps{ + Description: "Enable TLS connection between TiDB server components", + Type: []string{"boolean"}, + Format: "", }, }, "timezone": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Description: "Time zone of TiDB cluster Pods", + Type: []string{"string"}, + Format: "", }, }, - "enableTLSCluster": { + "imagePullPolicy": { SchemaProps: spec.SchemaProps{ - Description: "Enable TLS connection between TiDB server components", + Description: "ImagePullPolicy of TiDB cluster Pods", + Type: []string{"string"}, + Format: "", + }, + }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "Whether Hostnetwork is enabled for TiDB cluster Pods", Type: []string{"boolean"}, Format: "", }, }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "Affinity of TiDB cluster Pods", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "PriorityClassName of TiDB cluster Pods", + Type: []string{"string"}, + Format: "", + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "Base node selectors of TiDB cluster Pods, components may add or override selectors upon this respectively", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Base annotations of TiDB cluster Pods, components may add or override selectors upon this respectively", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "Base tolerations of TiDB cluster Pods, components may add more tolreations upon this respectively", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Service", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec"}, + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Service", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"}, } } diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/tidbcluster.go index 9f6b08ede1..5ea94ceada 100644 --- a/pkg/apis/pingcap/v1alpha1/tidbcluster.go +++ b/pkg/apis/pingcap/v1alpha1/tidbcluster.go @@ -13,6 +13,177 @@ package v1alpha1 +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // defaultHelperImage is default image of helper + defaultHelperImage = "busybox:1.26.2" +) + +// ComponentAccessor is the interface to access component details, which respects the cluster-level properties +// and component-level overrides +type ComponentAccessor interface { + Image() string + ImagePullPolicy() corev1.PullPolicy + HostNetwork() bool + Affinity() *corev1.Affinity + PriorityClassName() string + NodeSelector() map[string]string + Annotations() map[string]string + Tolerations() []corev1.Toleration + PodSecurityContext() *corev1.PodSecurityContext + SchedulerName() string +} + +type componentAccessorImpl struct { + // Cluster is the TidbCluster Spec + ClusterSpec *TidbClusterSpec + + // Cluster is the Component Spec + ComponentSpec *ComponentSpec +} + +func (a *componentAccessorImpl) Image() string { + image := a.ComponentSpec.Image + baseImage := a.ComponentSpec.BaseImage + // base image takes higher priority + if baseImage != "" { + version := a.ComponentSpec.Version + if version == "" { + version = a.ClusterSpec.Version + } + image = fmt.Sprintf("%s:%s", baseImage, version) + } + return image +} + +func (a *componentAccessorImpl) PodSecurityContext() *corev1.PodSecurityContext { + return a.ComponentSpec.PodSecurityContext +} + +func (a *componentAccessorImpl) ImagePullPolicy() corev1.PullPolicy { + pp := a.ComponentSpec.ImagePullPolicy + if pp == nil { + pp = &a.ClusterSpec.ImagePullPolicy + } + return *pp +} + +func (a *componentAccessorImpl) HostNetwork() bool { + hostNetwork := a.ComponentSpec.HostNetwork + if hostNetwork == nil { + hostNetwork = &a.ClusterSpec.HostNetwork + } + return *hostNetwork +} + +func (a *componentAccessorImpl) Affinity() *corev1.Affinity { + affi := a.ComponentSpec.Affinity + if affi == nil { + affi = a.ClusterSpec.Affinity + } + return affi +} + +func (a *componentAccessorImpl) PriorityClassName() string { + pcn := a.ComponentSpec.PriorityClassName + if pcn == "" { + pcn = a.ClusterSpec.PriorityClassName + } + return pcn +} + +func (a *componentAccessorImpl) SchedulerName() string { + pcn := a.ComponentSpec.SchedulerName + if pcn == "" { + pcn = a.ClusterSpec.SchedulerName + } + return pcn +} + +func (a *componentAccessorImpl) NodeSelector() map[string]string { + sel := map[string]string{} + for k, v := range a.ClusterSpec.NodeSelector { + sel[k] = v + } + for k, v := range a.ComponentSpec.NodeSelector { + sel[k] = v + } + return sel +} + +func (a *componentAccessorImpl) Annotations() map[string]string { + anno := map[string]string{} + for k, v := range a.ClusterSpec.Annotations { + anno[k] = v + } + for k, v := range a.ComponentSpec.Annotations { + anno[k] = v + } + return anno +} + +func (a *componentAccessorImpl) Tolerations() []corev1.Toleration { + tols := a.ComponentSpec.Tolerations + if len(tols) == 0 { + tols = a.ClusterSpec.Tolerations + } + return tols +} + +// BaseTiDBSpec returns the base spec of TiDB servers +func (tc *TidbCluster) BaseTiDBSpec() ComponentAccessor { + return &componentAccessorImpl{&tc.Spec, &tc.Spec.TiDB.ComponentSpec} +} + +// BaseTiKVSpec returns the base spec of TiKV servers +func (tc *TidbCluster) BaseTiKVSpec() ComponentAccessor { + return &componentAccessorImpl{&tc.Spec, &tc.Spec.TiKV.ComponentSpec} +} + +// BasePDSpec returns the base spec of PD servers +func (tc *TidbCluster) BasePDSpec() ComponentAccessor { + return &componentAccessorImpl{&tc.Spec, &tc.Spec.PD.ComponentSpec} +} + +// BasePumpSpec returns two results: +// 1. the base pump spec, if exists. +// 2. whether the base pump spec exists. +func (tc *TidbCluster) BasePumpSpec() (ComponentAccessor, bool) { + if tc.Spec.Pump == nil { + return nil, false + } + return &componentAccessorImpl{&tc.Spec, &tc.Spec.Pump.ComponentSpec}, true +} + +func (tc *TidbCluster) HelperImage() string { + image := tc.Spec.Helper.Image + if image == "" { + // for backward compatibility + image = tc.Spec.TiDB.SlowLogTailer.Image + } + if image == "" { + image = defaultHelperImage + } + return image +} + +func (tc *TidbCluster) HelperImagePullPolicy() corev1.PullPolicy { + pp := tc.Spec.Helper.ImagePullPolicy + if pp == nil { + // for backward compatibility + pp = tc.Spec.TiDB.SlowLogTailer.ImagePullPolicy + } + if pp == nil { + pp = &tc.Spec.ImagePullPolicy + } + return *pp +} + func (mt MemberType) String() string { return string(mt) } diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster_test.go b/pkg/apis/pingcap/v1alpha1/tidbcluster_test.go index b78d17b119..afc0ec3f95 100644 --- a/pkg/apis/pingcap/v1alpha1/tidbcluster_test.go +++ b/pkg/apis/pingcap/v1alpha1/tidbcluster_test.go @@ -164,6 +164,312 @@ func TestTiKVIsAvailable(t *testing.T) { } } +func TestComponentAccessor(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + cluster *TidbClusterSpec + component *ComponentSpec + expectFn func(*GomegaWithT, ComponentAccessor) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + accessor := &componentAccessorImpl{test.cluster, test.component} + test.expectFn(g, accessor) + } + affinity := &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{ + TopologyKey: "rack", + }}, + }, + } + toleration1 := corev1.Toleration{ + Key: "k1", + } + toleration2 := corev1.Toleration{ + Key: "k2", + } + tests := []testcase{ + { + name: "use cluster-level defaults", + cluster: &TidbClusterSpec{ + ImagePullPolicy: corev1.PullNever, + HostNetwork: true, + Affinity: affinity, + PriorityClassName: "test", + SchedulerName: "test", + }, + component: &ComponentSpec{}, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.ImagePullPolicy()).Should(Equal(corev1.PullNever)) + g.Expect(a.HostNetwork()).Should(Equal(true)) + g.Expect(a.Affinity()).Should(Equal(affinity)) + g.Expect(a.PriorityClassName()).Should(Equal("test")) + g.Expect(a.SchedulerName()).Should(Equal("test")) + }, + }, + { + name: "override at component-level", + cluster: &TidbClusterSpec{ + ImagePullPolicy: corev1.PullNever, + HostNetwork: true, + Affinity: nil, + PriorityClassName: "test", + SchedulerName: "test", + }, + component: &ComponentSpec{ + ImagePullPolicy: func() *corev1.PullPolicy { a := corev1.PullAlways; return &a }(), + HostNetwork: func() *bool { a := false; return &a }(), + Affinity: affinity, + PriorityClassName: "override", + SchedulerName: "override", + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.ImagePullPolicy()).Should(Equal(corev1.PullAlways)) + g.Expect(a.HostNetwork()).Should(Equal(false)) + g.Expect(a.Affinity()).Should(Equal(affinity)) + g.Expect(a.PriorityClassName()).Should(Equal("override")) + g.Expect(a.SchedulerName()).Should(Equal("override")) + }, + }, + { + name: "baseImage and cluster version", + cluster: &TidbClusterSpec{ + Version: "v1.1.0", + }, + component: &ComponentSpec{ + BaseImage: "pingcap/tidb", + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Image()).Should(Equal("pingcap/tidb:v1.1.0")) + }, + }, + { + name: "baseImage and component-level version override", + cluster: &TidbClusterSpec{ + Version: "v1.1.0", + }, + component: &ComponentSpec{ + BaseImage: "pingcap/tidb", + Version: "v1.2.0", + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Image()).Should(Equal("pingcap/tidb:v1.2.0")) + }, + }, + { + name: "backward compatibility of .spec..image", + cluster: &TidbClusterSpec{ + Version: "v1.1.0", + }, + component: &ComponentSpec{ + Image: "pingcap/tidb:v1.0.0", + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Image()).Should(Equal("pingcap/tidb:v1.0.0")) + }, + }, + { + name: "baseImage shadows the deprecated image field", + cluster: &TidbClusterSpec{ + Version: "v1.1.0", + }, + component: &ComponentSpec{ + BaseImage: "pingcap/tidb", + Image: "pingcap/tidb:v1.0.0", + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Image()).Should(Equal("pingcap/tidb:v1.1.0")) + }, + }, + { + name: "node selector merge", + cluster: &TidbClusterSpec{ + NodeSelector: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + NodeSelector: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.NodeSelector()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "annotations merge", + cluster: &TidbClusterSpec{ + Annotations: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + Annotations: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Annotations()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "annotations merge", + cluster: &TidbClusterSpec{ + Annotations: map[string]string{ + "k1": "v1", + }, + }, + component: &ComponentSpec{ + Annotations: map[string]string{ + "k1": "v2", + "k3": "v3", + }, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Annotations()).Should(Equal(map[string]string{ + "k1": "v2", + "k3": "v3", + })) + }, + }, + { + name: "tolerations merge", + cluster: &TidbClusterSpec{ + Tolerations: []corev1.Toleration{toleration1}, + }, + component: &ComponentSpec{ + Tolerations: []corev1.Toleration{toleration2}, + }, + expectFn: func(g *GomegaWithT, a ComponentAccessor) { + g.Expect(a.Tolerations()).Should(ConsistOf(toleration2)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestHelperImage(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*TidbCluster) + expectFn func(*GomegaWithT, string) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + tc := newTidbCluster() + test.update(tc) + test.expectFn(g, tc.HelperImage()) + } + tests := []testcase{ + { + name: "helper image has defaults", + update: func(tc *TidbCluster) {}, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).ShouldNot(BeEmpty()) + }, + }, + { + name: "helper image use .spec.helper.image first", + update: func(tc *TidbCluster) { + tc.Spec.Helper.Image = "helper1" + tc.Spec.TiDB.SlowLogTailer.Image = "helper2" + }, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).Should(Equal("helper1")) + }, + }, + { + name: "pick .spec.tidb.slowLogTailer.image as helper for backward compatibility", + update: func(tc *TidbCluster) { + tc.Spec.Helper.Image = "" + tc.Spec.TiDB.SlowLogTailer.Image = "helper2" + }, + expectFn: func(g *GomegaWithT, s string) { + g.Expect(s).Should(Equal("helper2")) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestHelperImagePullPolicy(t *testing.T) { + g := NewGomegaWithT(t) + + type testcase struct { + name string + update func(*TidbCluster) + expectFn func(*GomegaWithT, corev1.PullPolicy) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + + tc := newTidbCluster() + test.update(tc) + test.expectFn(g, tc.HelperImagePullPolicy()) + } + tests := []testcase{ + { + name: "use .spec.helper.imagePullPolicy first", + update: func(tc *TidbCluster) { + tc.Spec.Helper.ImagePullPolicy = func() *corev1.PullPolicy { a := corev1.PullAlways; return &a }() + tc.Spec.TiDB.SlowLogTailer.ImagePullPolicy = func() *corev1.PullPolicy { a := corev1.PullIfNotPresent; return &a }() + tc.Spec.ImagePullPolicy = corev1.PullNever + }, + expectFn: func(g *GomegaWithT, p corev1.PullPolicy) { + g.Expect(p).Should(Equal(corev1.PullAlways)) + }, + }, + { + name: "pick .spec.tidb.slowLogTailer.imagePullPolicy when .spec.helper.imagePullPolicy is nil", + update: func(tc *TidbCluster) { + tc.Spec.Helper.ImagePullPolicy = nil + tc.Spec.TiDB.SlowLogTailer.ImagePullPolicy = func() *corev1.PullPolicy { a := corev1.PullIfNotPresent; return &a }() + tc.Spec.ImagePullPolicy = corev1.PullNever + }, + expectFn: func(g *GomegaWithT, p corev1.PullPolicy) { + g.Expect(p).Should(Equal(corev1.PullIfNotPresent)) + }, + }, + { + name: "pick cluster one if both .spec.tidb.slowLogTailer.imagePullPolicy and .spec.helper.imagePullPolicy are nil", + update: func(tc *TidbCluster) { + tc.Spec.Helper.ImagePullPolicy = nil + tc.Spec.TiDB.SlowLogTailer.ImagePullPolicy = nil + tc.Spec.ImagePullPolicy = corev1.PullNever + }, + expectFn: func(g *GomegaWithT, p corev1.PullPolicy) { + g.Expect(p).Should(Equal(corev1.PullNever)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + func newTidbCluster() *TidbCluster { return &TidbCluster{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go index e9400df139..a298171b96 100644 --- a/pkg/apis/pingcap/v1alpha1/types.go +++ b/pkg/apis/pingcap/v1alpha1/types.go @@ -14,6 +14,7 @@ package v1alpha1 import ( + "github.com/pingcap/tidb-operator/pkg/util/json" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -90,19 +91,64 @@ type TidbClusterList struct { // +k8s:openapi-gen=true // TidbClusterSpec describes the attributes that a user creates on a tidb cluster type TidbClusterSpec struct { - SchedulerName string `json:"schedulerName,omitempty"` - PD PDSpec `json:"pd,omitempty"` - TiDB TiDBSpec `json:"tidb,omitempty"` - TiKV TiKVSpec `json:"tikv,omitempty"` - // +k8s:openapi-gen=false - TiKVPromGateway TiKVPromGatewaySpec `json:"tikvPromGateway,omitempty"` + + // PD cluster spec + PD PDSpec `json:"pd,omitempty"` + + // TiDB cluster spec + TiDB TiDBSpec `json:"tidb,omitempty"` + + // TiKV cluster spec + TiKV TiKVSpec `json:"tikv,omitempty"` + + // Pump cluster spec + Pump *PumpSpec `json:"pump,omitempty"` + + // Helper spec + Helper HelperSpec `json:"helper,omitempty"` + // Services list non-headless services type used in TidbCluster - Services []Service `json:"services,omitempty"` + // Deprecated + Services []Service `json:"services,omitempty"` + + // TiDB cluster version + Version string `json:"version,omitempty"` + + // SchedulerName of TiDB cluster Pods + SchedulerName string `json:"schedulerName,omitempty"` + + // Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"` - EnablePVReclaim bool `json:"enablePVReclaim,omitempty"` - Timezone string `json:"timezone,omitempty"` + + // Whether enable PVC reclaim for orphan PVC left by statefulset scale-in + EnablePVReclaim bool `json:"enablePVReclaim,omitempty"` + // Enable TLS connection between TiDB server components EnableTLSCluster bool `json:"enableTLSCluster,omitempty"` + + // Time zone of TiDB cluster Pods + Timezone string `json:"timezone,omitempty"` + + // ImagePullPolicy of TiDB cluster Pods + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Whether Hostnetwork is enabled for TiDB cluster Pods + HostNetwork bool `json:"hostNetwork,omitempty"` + + // Affinity of TiDB cluster Pods + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // PriorityClassName of TiDB cluster Pods + PriorityClassName string `json:"priorityClassName,omitempty"` + + // Base node selectors of TiDB cluster Pods, components may add or override selectors upon this respectively + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Base annotations of TiDB cluster Pods, components may add or override selectors upon this respectively + Annotations map[string]string `json:"annotations,omitempty"` + + // Base tolerations of TiDB cluster Pods, components may add more tolreations upon this respectively + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // TidbClusterStatus represents the current status of a tidb cluster. @@ -117,77 +163,180 @@ type TidbClusterStatus struct { // PDSpec contains details of PD members type PDSpec struct { // +k8s:openapi-gen=false - ContainerSpec + ComponentSpec // +k8s:openapi-gen=false - PodAttributesSpec - Replicas int32 `json:"replicas"` - StorageClassName string `json:"storageClassName,omitempty"` + Resources + // +k8s:openapi-gen=false + Replicas int32 `json:"replicas"` + Service *ServiceSpec `json:"service,omitempty"` + StorageClassName string `json:"storageClassName,omitempty"` + + // +k8s:openapi-gen=false + // TODO: add schema + Config map[string]json.JsonObject `json:"config,omitempty"` } // +k8s:openapi-gen=true -// TiDBSpec contains details of TiDB members -type TiDBSpec struct { +// TiKVSpec contains details of TiKV members +type TiKVSpec struct { + // +k8s:openapi-gen=false + ComponentSpec + // +k8s:openapi-gen=false + Resources // +k8s:openapi-gen=false - ContainerSpec + Replicas int32 `json:"replicas"` + Service *ServiceSpec `json:"service,omitempty"` + Privileged bool `json:"privileged,omitempty"` + StorageClassName string `json:"storageClassName,omitempty"` + MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` + // +k8s:openapi-gen=false - PodAttributesSpec - Replicas int32 `json:"replicas"` - StorageClassName string `json:"storageClassName,omitempty"` - BinlogEnabled bool `json:"binlogEnabled,omitempty"` - MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` - SeparateSlowLog bool `json:"separateSlowLog,omitempty"` - SlowLogTailer TiDBSlowLogTailerSpec `json:"slowLogTailer,omitempty"` - EnableTLSClient bool `json:"enableTLSClient,omitempty"` + // TODO: add schema + Config map[string]json.JsonObject `json:"config,omitempty"` } // +k8s:openapi-gen=true -// TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB -type TiDBSlowLogTailerSpec struct { +// TiDBSpec contains details of TiDB members +type TiDBSpec struct { + // +k8s:openapi-gen=false + ComponentSpec + // +k8s:openapi-gen=false + Resources + Replicas int32 `json:"replicas"` + Service *TiDBServiceSpec `json:"service,omitempty"` + BinlogEnabled bool `json:"binlogEnabled,omitempty"` + MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` + SeparateSlowLog bool `json:"separateSlowLog,omitempty"` + StorageClassName string `json:"storageClassName,omitempty"` + // +k8s:openapi-gen=false + SlowLogTailer TiDBSlowLogTailerSpec `json:"slowLogTailer,omitempty"` + EnableTLSClient bool `json:"enableTLSClient,omitempty"` + + // Plugins is a list of plugins that are loaded by TiDB server, empty means plugin disabled + Plugins []string `json:"plugins,omitempty"` + // +k8s:openapi-gen=false - ContainerSpec + // TODO: add schema + Config map[string]json.JsonObject `json:"config,omitempty"` } // +k8s:openapi-gen=true -// TiKVSpec contains details of TiKV members -type TiKVSpec struct { +// PumpSpec contains details of Pump members +type PumpSpec struct { // +k8s:openapi-gen=false - ContainerSpec + ComponentSpec // +k8s:openapi-gen=false - PodAttributesSpec - Replicas int32 `json:"replicas"` - Privileged bool `json:"privileged,omitempty"` + Resources + StorageClassName string `json:"storageClassName,omitempty"` - MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"` + Replicas int32 `json:"replicas"` + + // +k8s:openapi-gen=false + // TODO: add schema + Config map[string]json.JsonObject `json:"config,omitempty"` } -// +k8s:openapi-gen=false -// TiKVPromGatewaySpec runs as a sidecar with TiKVSpec -type TiKVPromGatewaySpec struct { - ContainerSpec +// +k8s:openapi-gen=true +// HelperSpec contains details of helper component +type HelperSpec struct { + // Image used to tail slow log and set kernel parameters if necessary, must have `tail` and `sysctl` installed + Image string `json:"image,omitempty"` + + // ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` } -// +k8s:openapi-gen=false -// ContainerSpec is the container spec of a pod -type ContainerSpec struct { - Image string `json:"image"` - ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - Requests *ResourceRequirement `json:"requests,omitempty"` - Limits *ResourceRequirement `json:"limits,omitempty"` +// +k8s:openapi-gen=true +// TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB +type TiDBSlowLogTailerSpec struct { + // +k8s:openapi-gen=false + Resources + + // Image used for slowlog tailer + // Deprecated, use TidbCluster.HelperImage instead + Image string `json:"image,omitempty"` + + // ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present + // Deprecated, use TidbCluster.HelperImagePullPolicy instead + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` +} + +// +k8s:openapi-gen=true +type Resources struct { + // Resource requests of the component + Requests *ResourceRequirement `json:"requests,omitempty"` + + // Resource limits of the component + Limits *ResourceRequirement `json:"limits,omitempty"` } -// +k8s:openapi-gen=false -// PodAttributesControlSpec is a spec of some general attributes of TiKV, TiDB and PD Pods -type PodAttributesSpec struct { - Affinity *corev1.Affinity `json:"affinity,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` - HostNetwork bool `json:"hostNetwork,omitempty"` +// +k8s:openapi-gen=true +// ComponentSpec is the base spec of each component, the fields should always accessed by the BasicSpec() method to respect the cluster-level properties +type ComponentSpec struct { + // Image of the component, override baseImage and version if present + // Deprecated + Image string `json:"image,omitempty"` + + // Base image of the component, e.g. pingcap/tidb, image tag is now allowed during validation + BaseImage string `json:"baseImage,omitempty"` + + // Version of the component. Override the cluster-level version if non-empty + Version string `json:"version,omitempty"` + + // ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present + HostNetwork *bool `json:"hostNetwork,omitempty"` + + // Affinity of the component. Override the cluster-level one if present + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // PriorityClassName of the component. Override the cluster-level one if present + PriorityClassName string `json:"priorityClassName,omitempty"` + + // SchedulerName of the component. Override the cluster-level one if present + SchedulerName string `json:"schedulerName,omitempty"` + + // NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Annotations of the component. Merged into the cluster-level annotations if non-empty + Annotations map[string]string `json:"annotations,omitempty"` + + // Tolerations of the component. Override the cluster-level tolerations if non-empty + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // PodSecurityContext of the component + // TODO: make this configurable at cluster level PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` - PriorityClassName string `json:"priorityClassName,omitempty"` } // +k8s:openapi-gen=true +type ServiceSpec struct { + // Type of the real kubernetes service, e.g. ClusterIP + Type corev1.ServiceType `json:"type,omitempty"` + + // Additional annotations of the kubernetes service object + Annotations map[string]string `json:"annotations,omitempty"` + + // LoadBalancerIP is the loadBalancerIP of service + LoadBalancerIP string `json:"loadBalancerIP,omitempty"` +} + +// +k8s:openapi-gen=true +type TiDBServiceSpec struct { + ServiceSpec + + // ExternalTrafficPolicy of the service + ExternalTrafficPolicy corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + + // Whether expose the status port + ExposeStatus bool `json:"exposeStatus,omitempty"` +} + +// +k8s:openapi-gen=true +// Deprecated // Service represent service type used in TidbCluster type Service struct { Name string `json:"name,omitempty"` diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go index ed23ee7608..b4832db2cb 100644 --- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go @@ -18,8 +18,9 @@ package v1alpha1 import ( - v1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" + json "github.com/pingcap/tidb-operator/pkg/util/json" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -257,27 +258,58 @@ func (in *BackupStatus) DeepCopy() *BackupStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { +func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) { *out = *in - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = new(ResourceRequirement) + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(v1.PullPolicy) **out = **in } - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = new(ResourceRequirement) + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) **out = **in } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodSecurityContext != nil { + in, out := &in.PodSecurityContext, &out.PodSecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSpec. -func (in *ContainerSpec) DeepCopy() *ContainerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentSpec. +func (in *ComponentSpec) DeepCopy() *ComponentSpec { if in == nil { return nil } - out := new(ContainerSpec) + out := new(ComponentSpec) in.DeepCopyInto(out) return out } @@ -408,6 +440,27 @@ func (in *GcsStorageProvider) DeepCopy() *GcsStorageProvider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelperSpec) DeepCopyInto(out *HelperSpec) { + *out = *in + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(v1.PullPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelperSpec. +func (in *HelperSpec) DeepCopy() *HelperSpec { + if in == nil { + return nil + } + out := new(HelperSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PDFailureMember) DeepCopyInto(out *PDFailureMember) { *out = *in @@ -445,8 +498,24 @@ func (in *PDMember) DeepCopy() *PDMember { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PDSpec) DeepCopyInto(out *PDSpec) { *out = *in - in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - in.PodAttributesSpec.DeepCopyInto(&out.PodAttributesSpec) + in.ComponentSpec.DeepCopyInto(&out.ComponentSpec) + in.Resources.DeepCopyInto(&out.Resources) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]json.JsonObject, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyJsonObject() + } + } + } return } @@ -465,7 +534,7 @@ func (in *PDStatus) DeepCopyInto(out *PDStatus) { *out = *in if in.StatefulSet != nil { in, out := &in.StatefulSet, &out.StatefulSet - *out = new(v1.StatefulSetStatus) + *out = new(appsv1.StatefulSetStatus) (*in).DeepCopyInto(*out) } if in.Members != nil { @@ -497,48 +566,30 @@ func (in *PDStatus) DeepCopy() *PDStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttributesSpec) DeepCopyInto(out *PodAttributesSpec) { +func (in *PumpSpec) DeepCopyInto(out *PumpSpec) { *out = *in - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(corev1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) + in.ComponentSpec.DeepCopyInto(&out.ComponentSpec) + in.Resources.DeepCopyInto(&out.Resources) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]json.JsonObject, len(*in)) for key, val := range *in { - (*out)[key] = val + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyJsonObject() + } } } - if in.PodSecurityContext != nil { - in, out := &in.PodSecurityContext, &out.PodSecurityContext - *out = new(corev1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttributesSpec. -func (in *PodAttributesSpec) DeepCopy() *PodAttributesSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PumpSpec. +func (in *PumpSpec) DeepCopy() *PumpSpec { if in == nil { return nil } - out := new(PodAttributesSpec) + out := new(PumpSpec) in.DeepCopyInto(out) return out } @@ -559,6 +610,32 @@ func (in *ResourceRequirement) DeepCopy() *ResourceRequirement { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resources) DeepCopyInto(out *Resources) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(ResourceRequirement) + **out = **in + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(ResourceRequirement) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. +func (in *Resources) DeepCopy() *Resources { + if in == nil { + return nil + } + out := new(Resources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Restore) DeepCopyInto(out *Restore) { *out = *in @@ -710,6 +787,29 @@ func (in *Service) DeepCopy() *Service { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageProvider) DeepCopyInto(out *StorageProvider) { *out = *in @@ -770,10 +870,32 @@ func (in *TiDBMember) DeepCopy() *TiDBMember { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiDBServiceSpec) DeepCopyInto(out *TiDBServiceSpec) { + *out = *in + in.ServiceSpec.DeepCopyInto(&out.ServiceSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBServiceSpec. +func (in *TiDBServiceSpec) DeepCopy() *TiDBServiceSpec { + if in == nil { + return nil + } + out := new(TiDBServiceSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TiDBSlowLogTailerSpec) DeepCopyInto(out *TiDBSlowLogTailerSpec) { *out = *in - in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) + in.Resources.DeepCopyInto(&out.Resources) + if in.ImagePullPolicy != nil { + in, out := &in.ImagePullPolicy, &out.ImagePullPolicy + *out = new(v1.PullPolicy) + **out = **in + } return } @@ -790,9 +912,30 @@ func (in *TiDBSlowLogTailerSpec) DeepCopy() *TiDBSlowLogTailerSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TiDBSpec) DeepCopyInto(out *TiDBSpec) { *out = *in - in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - in.PodAttributesSpec.DeepCopyInto(&out.PodAttributesSpec) + in.ComponentSpec.DeepCopyInto(&out.ComponentSpec) + in.Resources.DeepCopyInto(&out.Resources) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(TiDBServiceSpec) + (*in).DeepCopyInto(*out) + } in.SlowLogTailer.DeepCopyInto(&out.SlowLogTailer) + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]json.JsonObject, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyJsonObject() + } + } + } return } @@ -811,7 +954,7 @@ func (in *TiDBStatus) DeepCopyInto(out *TiDBStatus) { *out = *in if in.StatefulSet != nil { in, out := &in.StatefulSet, &out.StatefulSet - *out = new(v1.StatefulSetStatus) + *out = new(appsv1.StatefulSetStatus) (*in).DeepCopyInto(*out) } if in.Members != nil { @@ -858,28 +1001,27 @@ func (in *TiKVFailureStore) DeepCopy() *TiKVFailureStore { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TiKVPromGatewaySpec) DeepCopyInto(out *TiKVPromGatewaySpec) { - *out = *in - in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVPromGatewaySpec. -func (in *TiKVPromGatewaySpec) DeepCopy() *TiKVPromGatewaySpec { - if in == nil { - return nil - } - out := new(TiKVPromGatewaySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TiKVSpec) DeepCopyInto(out *TiKVSpec) { *out = *in - in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) - in.PodAttributesSpec.DeepCopyInto(&out.PodAttributesSpec) + in.ComponentSpec.DeepCopyInto(&out.ComponentSpec) + in.Resources.DeepCopyInto(&out.Resources) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]json.JsonObject, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyJsonObject() + } + } + } return } @@ -898,7 +1040,7 @@ func (in *TiKVStatus) DeepCopyInto(out *TiKVStatus) { *out = *in if in.StatefulSet != nil { in, out := &in.StatefulSet, &out.StatefulSet - *out = new(v1.StatefulSetStatus) + *out = new(appsv1.StatefulSetStatus) (*in).DeepCopyInto(*out) } if in.Stores != nil { @@ -1020,12 +1162,43 @@ func (in *TidbClusterSpec) DeepCopyInto(out *TidbClusterSpec) { in.PD.DeepCopyInto(&out.PD) in.TiDB.DeepCopyInto(&out.TiDB) in.TiKV.DeepCopyInto(&out.TiKV) - in.TiKVPromGateway.DeepCopyInto(&out.TiKVPromGateway) + if in.Pump != nil { + in, out := &in.Pump, &out.Pump + *out = new(PumpSpec) + (*in).DeepCopyInto(*out) + } + in.Helper.DeepCopyInto(&out.Helper) if in.Services != nil { in, out := &in.Services, &out.Services *out = make([]Service, len(*in)) copy(*out, *in) } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 41a4306cf3..6cecc19c31 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -201,21 +201,6 @@ func TiKVCapacity(limits *v1alpha1.ResourceRequirement) string { return fmt.Sprintf("%dMB", i/humanize.MiByte) } -// Reuse the SlowLogTailer image for TiDB -func GetUtilImage(cluster *v1alpha1.TidbCluster) string { - if img := cluster.Spec.TiDB.SlowLogTailer.Image; img != "" { - return img - } - return defaultTiDBLogTailerImage -} - -func GetSlowLogTailerImage(cluster *v1alpha1.TidbCluster) string { - if img := cluster.Spec.TiDB.SlowLogTailer.Image; img != "" { - return img - } - return defaultTiDBLogTailerImage -} - // PDMemberName returns pd member name func PDMemberName(clusterName string) string { return fmt.Sprintf("%s-pd", clusterName) diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 368d206181..a4b843d141 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -159,15 +159,6 @@ func TestTiKVCapacity(t *testing.T) { } } -func TestGetSlowLogTailerImage(t *testing.T) { - g := NewGomegaWithT(t) - - tc := &v1alpha1.TidbCluster{} - g.Expect(GetSlowLogTailerImage(tc)).To(Equal(defaultTiDBLogTailerImage)) - tc.Spec.TiDB.SlowLogTailer.Image = "image-1" - g.Expect(GetSlowLogTailerImage(tc)).To(Equal("image-1")) -} - func TestPDMemberName(t *testing.T) { g := NewGomegaWithT(t) g.Expect(PDMemberName("demo")).To(Equal("demo-pd")) diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go index dd0785c4c0..6ed953a00e 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go @@ -299,17 +299,17 @@ func newTidbCluster() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", }, }, TiKV: v1alpha1.TiKVSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tikv-test-image", }, }, TiDB: v1alpha1.TiDBSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tidb-test-image", }, }, diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index a890031880..106009bdb6 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -554,7 +554,7 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, err } pdLabel := label.New().Instance(instanceName).PD() setName := controller.PDMemberName(tcName) - podAnnotations := CombineAnnotations(controller.AnnProm(2379), tc.Spec.PD.Annotations) + podAnnotations := CombineAnnotations(controller.AnnProm(2379), tc.BasePDSpec().Annotations()) storageClassName := tc.Spec.PD.StorageClassName if storageClassName == "" { storageClassName = controller.DefaultStorageClassName @@ -567,7 +567,7 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, err } dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults - if tc.Spec.PD.HostNetwork { + if tc.BasePDSpec().HostNetwork() { dnsPolicy = corev1.DNSClusterFirstWithHostNet } @@ -587,17 +587,17 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, err Annotations: podAnnotations, }, Spec: corev1.PodSpec{ - SchedulerName: tc.Spec.SchedulerName, - Affinity: tc.Spec.PD.Affinity, - NodeSelector: tc.Spec.PD.NodeSelector, - HostNetwork: tc.Spec.PD.HostNetwork, + SchedulerName: tc.BasePDSpec().SchedulerName(), + Affinity: tc.BasePDSpec().Affinity(), + NodeSelector: tc.BasePDSpec().NodeSelector(), + HostNetwork: tc.BasePDSpec().HostNetwork(), DNSPolicy: dnsPolicy, Containers: []corev1.Container{ { Name: v1alpha1.PDMemberType.String(), - Image: tc.Spec.PD.Image, + Image: tc.BasePDSpec().Image(), Command: []string{"/bin/sh", "/usr/local/bin/pd_start_script.sh"}, - ImagePullPolicy: tc.Spec.PD.ImagePullPolicy, + ImagePullPolicy: tc.BasePDSpec().ImagePullPolicy(), Ports: []corev1.ContainerPort{ { Name: "server", @@ -611,7 +611,7 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, err }, }, VolumeMounts: volMounts, - Resources: util.ResourceRequirement(tc.Spec.PD.ContainerSpec), + Resources: util.ResourceRequirement(tc.Spec.PD.Resources), Env: []corev1.EnvVar{ { Name: "NAMESPACE", @@ -649,10 +649,10 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, err }, }, RestartPolicy: corev1.RestartPolicyAlways, - Tolerations: tc.Spec.PD.Tolerations, + Tolerations: tc.BasePDSpec().Tolerations(), Volumes: vols, - SecurityContext: tc.Spec.PD.PodSecurityContext, - PriorityClassName: tc.Spec.PD.PriorityClassName, + SecurityContext: tc.BasePDSpec().PodSecurityContext(), + PriorityClassName: tc.BasePDSpec().PriorityClassName(), }, }, VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index cf999a6510..8028bf9131 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -822,8 +822,10 @@ func newTidbClusterForPD() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", + }, + Resources: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ CPU: "1", Memory: "2Gi", @@ -834,8 +836,10 @@ func newTidbClusterForPD() *v1alpha1.TidbCluster { StorageClassName: "my-storage-class", }, TiKV: v1alpha1.TiKVSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tikv-test-image", + }, + Resources: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ CPU: "1", Memory: "2Gi", @@ -937,6 +941,7 @@ func testHostNetwork(t *testing.T, hostNetwork bool, dnsPolicy v1.DNSPolicy) fun } func TestGetNewPDSetForTidbCluster(t *testing.T) { + enable := true tests := []struct { name string tc v1alpha1.TidbCluster @@ -962,8 +967,8 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -979,8 +984,8 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -996,8 +1001,8 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, diff --git a/pkg/manager/member/pd_upgrader_test.go b/pkg/manager/member/pd_upgrader_test.go index 2b86651d92..fe3d04f60d 100644 --- a/pkg/manager/member/pd_upgrader_test.go +++ b/pkg/manager/member/pd_upgrader_test.go @@ -311,7 +311,7 @@ func newTidbClusterForPDUpgrader() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", }, Replicas: 3, diff --git a/pkg/manager/member/tidb_failover_test.go b/pkg/manager/member/tidb_failover_test.go index 5dfb55e37b..dd70c38090 100644 --- a/pkg/manager/member/tidb_failover_test.go +++ b/pkg/manager/member/tidb_failover_test.go @@ -397,7 +397,7 @@ func newTidbClusterForTiDBFailover() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tidb-test-image", }, Replicas: 2, diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index dd37125770..b7299f53fa 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -390,17 +390,17 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { sysctls := "sysctl -w" var initContainers []corev1.Container - if tc.Spec.TiDB.Annotations != nil { - init, ok := tc.Spec.TiDB.Annotations[label.AnnSysctlInit] + if tc.BaseTiDBSpec() != nil { + init, ok := tc.BaseTiDBSpec().Annotations()[label.AnnSysctlInit] if ok && (init == label.AnnSysctlInitVal) { - if tc.Spec.TiDB.PodSecurityContext != nil && len(tc.Spec.TiDB.PodSecurityContext.Sysctls) > 0 { - for _, sysctl := range tc.Spec.TiDB.PodSecurityContext.Sysctls { + if tc.BaseTiDBSpec().PodSecurityContext() != nil && len(tc.BaseTiDBSpec().PodSecurityContext().Sysctls) > 0 { + for _, sysctl := range tc.BaseTiDBSpec().PodSecurityContext().Sysctls { sysctls = sysctls + fmt.Sprintf(" %s=%s", sysctl.Name, sysctl.Value) } privileged := true initContainers = append(initContainers, corev1.Container{ Name: "init", - Image: controller.GetUtilImage(tc), + Image: tc.HelperImage(), Command: []string{ "sh", "-c", @@ -416,7 +416,7 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { // Init container is only used for the case where allowed-unsafe-sysctls // cannot be enabled for kubelet, so clean the sysctl in statefulset // SecurityContext if init container is enabled - podSecurityContext := tc.Spec.TiDB.PodSecurityContext.DeepCopy() + podSecurityContext := tc.BaseTiDBSpec().PodSecurityContext().DeepCopy() if len(initContainers) > 0 { podSecurityContext.Sysctls = []corev1.Sysctl{} } @@ -433,9 +433,9 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { volMounts = append(volMounts, corev1.VolumeMount{Name: slowQueryLogVolumeName, MountPath: slowQueryLogDir}) containers = append(containers, corev1.Container{ Name: v1alpha1.SlowLogTailerMemberType.String(), - Image: controller.GetSlowLogTailerImage(tc), - ImagePullPolicy: tc.Spec.TiDB.SlowLogTailer.ImagePullPolicy, - Resources: util.ResourceRequirement(tc.Spec.TiDB.SlowLogTailer.ContainerSpec), + Image: tc.HelperImage(), + ImagePullPolicy: tc.HelperImagePullPolicy(), + Resources: util.ResourceRequirement(tc.Spec.TiDB.SlowLogTailer.Resources), VolumeMounts: []corev1.VolumeMount{ {Name: slowQueryLogVolumeName, MountPath: slowQueryLogDir}, }, @@ -476,9 +476,9 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { } containers = append(containers, corev1.Container{ Name: v1alpha1.TiDBMemberType.String(), - Image: tc.Spec.TiDB.Image, + Image: tc.BaseTiDBSpec().Image(), Command: []string{"/bin/sh", "/usr/local/bin/tidb_start_script.sh"}, - ImagePullPolicy: tc.Spec.TiDB.ImagePullPolicy, + ImagePullPolicy: tc.BaseTiDBSpec().ImagePullPolicy(), Ports: []corev1.ContainerPort{ { Name: "server", @@ -492,7 +492,7 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { }, }, VolumeMounts: volMounts, - Resources: util.ResourceRequirement(tc.Spec.TiDB.ContainerSpec), + Resources: util.ResourceRequirement(tc.Spec.TiDB.Resources), Env: envs, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ @@ -507,12 +507,12 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { }) dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults - if tc.Spec.TiDB.HostNetwork { + if tc.BaseTiDBSpec().HostNetwork() { dnsPolicy = corev1.DNSClusterFirstWithHostNet } tidbLabel := label.New().Instance(instanceName).TiDB() - podAnnotations := CombineAnnotations(controller.AnnProm(10080), tc.Spec.TiDB.Annotations) + podAnnotations := CombineAnnotations(controller.AnnProm(10080), tc.BaseTiDBSpec().Annotations()) tidbSet := &apps.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: controller.TiDBMemberName(tcName), @@ -529,17 +529,17 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster) *apps.StatefulSet { Annotations: podAnnotations, }, Spec: corev1.PodSpec{ - SchedulerName: tc.Spec.SchedulerName, - Affinity: tc.Spec.TiDB.Affinity, - NodeSelector: tc.Spec.TiDB.NodeSelector, - HostNetwork: tc.Spec.TiDB.HostNetwork, + SchedulerName: tc.BaseTiDBSpec().SchedulerName(), + Affinity: tc.BaseTiDBSpec().Affinity(), + NodeSelector: tc.BaseTiDBSpec().NodeSelector(), + HostNetwork: tc.BaseTiDBSpec().HostNetwork(), DNSPolicy: dnsPolicy, Containers: containers, RestartPolicy: corev1.RestartPolicyAlways, - Tolerations: tc.Spec.TiDB.Tolerations, + Tolerations: tc.BaseTiDBSpec().Tolerations(), Volumes: vols, SecurityContext: podSecurityContext, - PriorityClassName: tc.Spec.TiDB.PriorityClassName, + PriorityClassName: tc.BaseTiDBSpec().PriorityClassName(), InitContainers: initContainers, }, }, diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index f43ba0d893..303728ccf1 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -571,8 +571,10 @@ func newTidbClusterForTiDB() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: v1alpha1.TiDBMemberType.String(), + }, + Resources: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ CPU: "1", Memory: "2Gi", @@ -656,6 +658,7 @@ func TestGetNewTiDBHeadlessServiceForTidbCluster(t *testing.T) { } func TestGetNewTiDBSetForTidbCluster(t *testing.T) { + enable := true tests := []struct { name string tc v1alpha1.TidbCluster @@ -681,8 +684,8 @@ func TestGetNewTiDBSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -698,8 +701,8 @@ func TestGetNewTiDBSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -715,8 +718,8 @@ func TestGetNewTiDBSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -752,7 +755,7 @@ func TestTiDBInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ PodSecurityContext: &corev1.PodSecurityContext{ RunAsNonRoot: &asRoot, Sysctls: []corev1.Sysctl{ @@ -810,7 +813,7 @@ func TestTiDBInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "true", }, @@ -867,7 +870,7 @@ func TestTiDBInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "true", }, @@ -892,7 +895,7 @@ func TestTiDBInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "true", }, @@ -913,7 +916,7 @@ func TestTiDBInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "false", }, diff --git a/pkg/manager/member/tidb_upgrader_test.go b/pkg/manager/member/tidb_upgrader_test.go index 26b18ab460..ba23691a1e 100644 --- a/pkg/manager/member/tidb_upgrader_test.go +++ b/pkg/manager/member/tidb_upgrader_test.go @@ -286,21 +286,21 @@ func newTidbClusterForTiDBUpgrader() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", }, Replicas: 3, StorageClassName: "my-storage-class", }, TiKV: v1alpha1.TiKVSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tikv-test-image", }, Replicas: 3, StorageClassName: "my-storage-class", }, TiDB: v1alpha1.TiDBSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tidb-test-image", }, Replicas: 2, diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 6c18aa46fb..5e36a2c6f5 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -354,17 +354,17 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e sysctls := "sysctl -w" var initContainers []corev1.Container - if tc.Spec.TiKV.Annotations != nil { - init, ok := tc.Spec.TiKV.Annotations[label.AnnSysctlInit] + if tc.BaseTiKVSpec().Annotations() != nil { + init, ok := tc.BaseTiKVSpec().Annotations()[label.AnnSysctlInit] if ok && (init == label.AnnSysctlInitVal) { - if tc.Spec.TiKV.PodSecurityContext != nil && len(tc.Spec.TiKV.PodSecurityContext.Sysctls) > 0 { - for _, sysctl := range tc.Spec.TiKV.PodSecurityContext.Sysctls { + if tc.BaseTiKVSpec().PodSecurityContext() != nil && len(tc.BaseTiKVSpec().PodSecurityContext().Sysctls) > 0 { + for _, sysctl := range tc.BaseTiKVSpec().PodSecurityContext().Sysctls { sysctls = sysctls + fmt.Sprintf(" %s=%s", sysctl.Name, sysctl.Value) } privileged := true initContainers = append(initContainers, corev1.Container{ Name: "init", - Image: controller.GetUtilImage(tc), + Image: tc.HelperImage(), Command: []string{ "sh", "-c", @@ -380,7 +380,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e // Init container is only used for the case where allowed-unsafe-sysctls // cannot be enabled for kubelet, so clean the sysctl in statefulset // SecurityContext if init container is enabled - podSecurityContext := tc.Spec.TiKV.PodSecurityContext.DeepCopy() + podSecurityContext := tc.BaseTiKVSpec().PodSecurityContext().DeepCopy() if len(initContainers) > 0 { podSecurityContext.Sysctls = []corev1.Sysctl{} } @@ -398,7 +398,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e tikvLabel := labelTiKV(tc) setName := controller.TiKVMemberName(tcName) - podAnnotations := CombineAnnotations(controller.AnnProm(20180), tc.Spec.TiKV.Annotations) + podAnnotations := CombineAnnotations(controller.AnnProm(20180), tc.BaseTiKVSpec().Annotations()) capacity := controller.TiKVCapacity(tc.Spec.TiKV.Limits) headlessSvcName := controller.TiKVPeerMemberName(tcName) storageClassName := tc.Spec.TiKV.StorageClassName @@ -407,7 +407,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e } dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults - if tc.Spec.TiKV.HostNetwork { + if tc.BaseTiKVSpec().HostNetwork() { dnsPolicy = corev1.DNSClusterFirstWithHostNet } @@ -427,17 +427,17 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e Annotations: podAnnotations, }, Spec: corev1.PodSpec{ - SchedulerName: tc.Spec.SchedulerName, - Affinity: tc.Spec.TiKV.Affinity, - NodeSelector: tc.Spec.TiKV.NodeSelector, - HostNetwork: tc.Spec.TiKV.HostNetwork, + SchedulerName: tc.BaseTiKVSpec().SchedulerName(), + Affinity: tc.BaseTiKVSpec().Affinity(), + NodeSelector: tc.BaseTiKVSpec().NodeSelector(), + HostNetwork: tc.BaseTiKVSpec().HostNetwork(), DNSPolicy: dnsPolicy, Containers: []corev1.Container{ { Name: v1alpha1.TiKVMemberType.String(), - Image: tc.Spec.TiKV.Image, + Image: tc.BaseTiKVSpec().Image(), Command: []string{"/bin/sh", "/usr/local/bin/tikv_start_script.sh"}, - ImagePullPolicy: tc.Spec.TiKV.ImagePullPolicy, + ImagePullPolicy: tc.BaseTiKVSpec().ImagePullPolicy(), SecurityContext: &corev1.SecurityContext{ Privileged: &tc.Spec.TiKV.Privileged, }, @@ -449,7 +449,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e }, }, VolumeMounts: volMounts, - Resources: util.ResourceRequirement(tc.Spec.TiKV.ContainerSpec), + Resources: util.ResourceRequirement(tc.Spec.TiKV.Resources), Env: []corev1.EnvVar{ { Name: "NAMESPACE", @@ -487,10 +487,10 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster) (*apps.StatefulSet, e }, }, RestartPolicy: corev1.RestartPolicyAlways, - Tolerations: tc.Spec.TiKV.Tolerations, + Tolerations: tc.BaseTiKVSpec().Tolerations(), Volumes: vols, SecurityContext: podSecurityContext, - PriorityClassName: tc.Spec.TiKV.PriorityClassName, + PriorityClassName: tc.BaseTiKVSpec().PriorityClassName(), InitContainers: initContainers, }, }, diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index 1ab0d47be4..7cbd88dfdb 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -1475,6 +1475,7 @@ func TestGetNewServiceForTidbCluster(t *testing.T) { } func TestGetNewTiKVSetForTidbCluster(t *testing.T) { + enable := true tests := []struct { name string tc v1alpha1.TidbCluster @@ -1500,8 +1501,8 @@ func TestGetNewTiKVSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -1517,8 +1518,8 @@ func TestGetNewTiKVSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -1534,8 +1535,8 @@ func TestGetNewTiKVSetForTidbCluster(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiDB: v1alpha1.TiDBSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ - HostNetwork: true, + ComponentSpec: v1alpha1.ComponentSpec{ + HostNetwork: &enable, }, }, }, @@ -1575,7 +1576,7 @@ func TestTiKVInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ PodSecurityContext: &corev1.PodSecurityContext{ RunAsNonRoot: &asRoot, Sysctls: []corev1.Sysctl{ @@ -1633,7 +1634,7 @@ func TestTiKVInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "true", }, @@ -1690,7 +1691,7 @@ func TestTiKVInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "true", }, @@ -1715,7 +1716,7 @@ func TestTiKVInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "true", }, @@ -1736,7 +1737,7 @@ func TestTiKVInitContainers(t *testing.T) { }, Spec: v1alpha1.TidbClusterSpec{ TiKV: v1alpha1.TiKVSpec{ - PodAttributesSpec: v1alpha1.PodAttributesSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Annotations: map[string]string{ "tidb.pingcap.com/sysctl-init": "false", }, diff --git a/pkg/manager/member/tikv_upgrader_test.go b/pkg/manager/member/tikv_upgrader_test.go index ef2243a3d8..cfe72cd9ad 100644 --- a/pkg/manager/member/tikv_upgrader_test.go +++ b/pkg/manager/member/tikv_upgrader_test.go @@ -596,14 +596,14 @@ func newTidbClusterForTiKVUpgrader() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", }, Replicas: 3, StorageClassName: "my-storage-class", }, TiKV: v1alpha1.TiKVSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "tikv-test-image", }, Replicas: 3, diff --git a/pkg/tkctl/cmd/info/info.go b/pkg/tkctl/cmd/info/info.go index 8e04cfb627..bde880856f 100644 --- a/pkg/tkctl/cmd/info/info.go +++ b/pkg/tkctl/cmd/info/info.go @@ -170,7 +170,7 @@ func renderTidbCluster(tc *v1alpha1.TidbCluster, svc *v1.Service, podList *v1.Po w.Write(readable.LEVEL_0, "%s\t", tc.Spec.PD.Requests.CPU) w.Write(readable.LEVEL_0, "%s\t", tc.Spec.PD.Requests.Memory) w.Write(readable.LEVEL_0, "%s\t", tc.Spec.PD.Requests.Storage) - w.Write(readable.LEVEL_0, "%s\t\n", tc.Spec.PD.Image) + w.Write(readable.LEVEL_0, "%s\t\n", tc.BasePDSpec().Image()) } w.Write(readable.LEVEL_1, "TiKV:\t") { @@ -180,7 +180,7 @@ func renderTidbCluster(tc *v1alpha1.TidbCluster, svc *v1.Service, podList *v1.Po w.Write(readable.LEVEL_0, "%s\t", tc.Spec.TiKV.Requests.CPU) w.Write(readable.LEVEL_0, "%s\t", tc.Spec.TiKV.Requests.Memory) w.Write(readable.LEVEL_0, "%s\t", tc.Spec.TiKV.Requests.Storage) - w.Write(readable.LEVEL_0, "%s\t\n", tc.Spec.TiKV.Image) + w.Write(readable.LEVEL_0, "%s\t\n", tc.BaseTiKVSpec().Image()) } w.Write(readable.LEVEL_1, "TiDB\t") { @@ -190,7 +190,7 @@ func renderTidbCluster(tc *v1alpha1.TidbCluster, svc *v1.Service, podList *v1.Po w.Write(readable.LEVEL_0, "%s\t", tc.Spec.TiDB.Requests.CPU) w.Write(readable.LEVEL_0, "%s\t", tc.Spec.TiDB.Requests.Memory) w.Write(readable.LEVEL_0, "%s\t", tc.Spec.TiDB.Requests.Storage) - w.Write(readable.LEVEL_0, "%s\t\n", tc.Spec.TiDB.Image) + w.Write(readable.LEVEL_0, "%s\t\n", tc.BaseTiDBSpec().Image()) } } w.WriteLine(readable.LEVEL_0, "Endpoints(%s):", svc.Spec.Type) diff --git a/pkg/tkctl/cmd/upinfo/upinfo.go b/pkg/tkctl/cmd/upinfo/upinfo.go index 19eeabf6f6..1982cff923 100644 --- a/pkg/tkctl/cmd/upinfo/upinfo.go +++ b/pkg/tkctl/cmd/upinfo/upinfo.go @@ -207,7 +207,7 @@ func renderTCUpgradeInfo(tc *v1alpha1.TidbCluster, set *apps.StatefulSet, podLis if dbPhase == v1alpha1.UpgradePhase { if len(podList.Items) != 0 { pod := podList.Items[0] - w.WriteLine(readable.LEVEL_0, "Image:\t%s ---> %s", pod.Spec.Containers[0].Image, tc.Spec.TiDB.Image) + w.WriteLine(readable.LEVEL_0, "Image:\t%s ---> %s", pod.Spec.Containers[0].Image, tc.BaseTiDBSpec().Image()) } } { diff --git a/pkg/util/json/json.go b/pkg/util/json/json.go new file mode 100644 index 0000000000..ea93a9dbf5 --- /dev/null +++ b/pkg/util/json/json.go @@ -0,0 +1,29 @@ +// Copyright 2019. PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package json + +// JsonObject is a wrapper of go interface{} that makes deepcopy-gen happy +type JsonObject interface { + DeepCopyJsonObject() JsonObject +} + +type jsonObject struct { + o interface{} +} + +func (c *jsonObject) DeepCopyJsonObject() JsonObject { + o := &jsonObject{} + *o = *c + return o +} diff --git a/pkg/util/util.go b/pkg/util/util.go index d112a73bb3..6377eb5e33 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -26,7 +26,7 @@ import ( // ResourceRequirement creates ResourceRequirements for MemberSpec // Optionally pass in a default value -func ResourceRequirement(spec v1alpha1.ContainerSpec, defaultRequests ...corev1.ResourceRequirements) corev1.ResourceRequirements { +func ResourceRequirement(resources v1alpha1.Resources, defaultRequests ...corev1.ResourceRequirements) corev1.ResourceRequirements { rr := corev1.ResourceRequirements{} if len(defaultRequests) > 0 { defaultRequest := defaultRequests[0] @@ -37,39 +37,39 @@ func ResourceRequirement(spec v1alpha1.ContainerSpec, defaultRequests ...corev1. rr.Limits[corev1.ResourceCPU] = defaultRequest.Limits[corev1.ResourceCPU] rr.Limits[corev1.ResourceMemory] = defaultRequest.Limits[corev1.ResourceMemory] } - if spec.Requests != nil { + if resources.Requests != nil { if rr.Requests == nil { rr.Requests = make(map[corev1.ResourceName]resource.Quantity) } - if spec.Requests.CPU != "" { - if q, err := resource.ParseQuantity(spec.Requests.CPU); err != nil { - glog.Errorf("failed to parse CPU resource %s to quantity: %v", spec.Requests.CPU, err) + if resources.Requests.CPU != "" { + if q, err := resource.ParseQuantity(resources.Requests.CPU); err != nil { + glog.Errorf("failed to parse CPU resource %s to quantity: %v", resources.Requests.CPU, err) } else { rr.Requests[corev1.ResourceCPU] = q } } - if spec.Requests.Memory != "" { - if q, err := resource.ParseQuantity(spec.Requests.Memory); err != nil { - glog.Errorf("failed to parse memory resource %s to quantity: %v", spec.Requests.Memory, err) + if resources.Requests.Memory != "" { + if q, err := resource.ParseQuantity(resources.Requests.Memory); err != nil { + glog.Errorf("failed to parse memory resource %s to quantity: %v", resources.Requests.Memory, err) } else { rr.Requests[corev1.ResourceMemory] = q } } } - if spec.Limits != nil { + if resources.Limits != nil { if rr.Limits == nil { rr.Limits = make(map[corev1.ResourceName]resource.Quantity) } - if spec.Limits.CPU != "" { - if q, err := resource.ParseQuantity(spec.Limits.CPU); err != nil { - glog.Errorf("failed to parse CPU resource %s to quantity: %v", spec.Limits.CPU, err) + if resources.Limits.CPU != "" { + if q, err := resource.ParseQuantity(resources.Limits.CPU); err != nil { + glog.Errorf("failed to parse CPU resource %s to quantity: %v", resources.Limits.CPU, err) } else { rr.Limits[corev1.ResourceCPU] = q } } - if spec.Limits.Memory != "" { - if q, err := resource.ParseQuantity(spec.Limits.Memory); err != nil { - glog.Errorf("failed to parse memory resource %s to quantity: %v", spec.Limits.Memory, err) + if resources.Limits.Memory != "" { + if q, err := resource.ParseQuantity(resources.Limits.Memory); err != nil { + glog.Errorf("failed to parse memory resource %s to quantity: %v", resources.Limits.Memory, err) } else { rr.Limits[corev1.ResourceMemory] = q } diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index 1cbb291c40..e255b9db58 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -26,7 +26,7 @@ func TestResourceRequirement(t *testing.T) { g := NewGomegaWithT(t) type testcase struct { name string - spec v1alpha1.ContainerSpec + spec v1alpha1.Resources defaultRequests []corev1.ResourceRequirements expectFn func(*GomegaWithT, corev1.ResourceRequirements) } @@ -37,7 +37,7 @@ func TestResourceRequirement(t *testing.T) { tests := []testcase{ { name: "don't have spec, has one defaultRequests", - spec: v1alpha1.ContainerSpec{}, + spec: v1alpha1.Resources{}, defaultRequests: []corev1.ResourceRequirements{ { Requests: corev1.ResourceList{ @@ -65,7 +65,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "don't have spec, has two defaultRequests", - spec: v1alpha1.ContainerSpec{}, + spec: v1alpha1.Resources{}, defaultRequests: []corev1.ResourceRequirements{ { Requests: corev1.ResourceList{ @@ -103,7 +103,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "spec cover defaultRequests", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ Memory: "200Gi", CPU: "200m", @@ -140,7 +140,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "spec is not correct", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ Memory: "200xi", CPU: "200x", @@ -177,7 +177,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Request don't have CPU", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ Memory: "100Gi", }, @@ -192,7 +192,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Request don't have CPU, default has", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ Memory: "100Gi", }, @@ -211,7 +211,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Request don't have memory", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ CPU: "100m", }, @@ -226,7 +226,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Request don't have memory, default has", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Requests: &v1alpha1.ResourceRequirement{ CPU: "100m", }, @@ -246,7 +246,7 @@ func TestResourceRequirement(t *testing.T) { { name: "Limits don't have CPU", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Limits: &v1alpha1.ResourceRequirement{ Memory: "100Gi", }, @@ -261,7 +261,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Limits don't have CPU, default has", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Limits: &v1alpha1.ResourceRequirement{ Memory: "100Gi", }, @@ -280,7 +280,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Limits don't have memory", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Limits: &v1alpha1.ResourceRequirement{ CPU: "100m", }, @@ -295,7 +295,7 @@ func TestResourceRequirement(t *testing.T) { }, { name: "Limits don't have memory, default has", - spec: v1alpha1.ContainerSpec{ + spec: v1alpha1.Resources{ Limits: &v1alpha1.ResourceRequirement{ CPU: "100m", }, diff --git a/pkg/webhook/pod/pods_test.go b/pkg/webhook/pod/pods_test.go index d460fc4da5..f27e540f2e 100644 --- a/pkg/webhook/pod/pods_test.go +++ b/pkg/webhook/pod/pods_test.go @@ -168,7 +168,7 @@ func newTidbClusterForPodAdmissionControl() *v1alpha1.TidbCluster { }, Spec: v1alpha1.TidbClusterSpec{ PD: v1alpha1.PDSpec{ - ContainerSpec: v1alpha1.ContainerSpec{ + ComponentSpec: v1alpha1.ComponentSpec{ Image: "pd-test-image", }, Replicas: pdReplicas, diff --git a/tests/actions.go b/tests/actions.go index 6b32e73a86..3b7fa0a999 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -1330,7 +1330,7 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err if tc.Spec.PD.Image != c.Image { glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s", - ns, pdSetName, c.Image, tc.Spec.PD.Image) + ns, pdSetName, c.Image, tc.BasePDSpec().Image()) return false, nil } @@ -1403,7 +1403,7 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e if tc.Spec.TiKV.Image != c.Image { glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", - ns, tikvSetName, c.Image, tc.Spec.TiKV.Image) + ns, tikvSetName, c.Image, tc.BaseTiKVSpec().Image()) return false, nil } @@ -1470,7 +1470,7 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e if tc.Spec.TiDB.Image != c.Image { glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tidb].image(%s) != %s", - ns, tidbSetName, c.Image, tc.Spec.TiDB.Image) + ns, tidbSetName, c.Image, tc.BaseTiDBSpec().Image()) return false, nil }