string
@@ -6995,7 +6980,6 @@ TLSCluster
(Appears on:
-BRConfig,
TidbClusterSpec)
diff --git a/manifests/backup/backup-aws-s3-br.yaml b/manifests/backup/backup-aws-s3-br.yaml
index baf00fa088..a750e660c1 100644
--- a/manifests/backup/backup-aws-s3-br.yaml
+++ b/manifests/backup/backup-aws-s3-br.yaml
@@ -13,8 +13,6 @@ spec:
br:
cluster: myCluster
# clusterNamespce:
- # tlsCluster:
- # enabled: false
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -28,7 +26,6 @@ spec:
# port: 4000
# user: root
# tlsClient:
- # enabled: false
# tlsSecret:
s3:
provider: aws
diff --git a/manifests/backup/backup-s3-br.yaml b/manifests/backup/backup-s3-br.yaml
index a99499337e..95fa298419 100644
--- a/manifests/backup/backup-s3-br.yaml
+++ b/manifests/backup/backup-s3-br.yaml
@@ -13,8 +13,6 @@ spec:
br:
cluster: myCluster
# clusterNamespce:
- # tlsCluster:
- # enabled: false
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -28,7 +26,6 @@ spec:
# port: 4000
# user: root
# tlsClient:
- # enabled: false
# tlsSecret:
s3:
provider: ceph
diff --git a/manifests/backup/backup-schedule-aws-s3-br.yaml b/manifests/backup/backup-schedule-aws-s3-br.yaml
index c66c48a99e..978cc9b55d 100644
--- a/manifests/backup/backup-schedule-aws-s3-br.yaml
+++ b/manifests/backup/backup-schedule-aws-s3-br.yaml
@@ -18,8 +18,6 @@ spec:
br:
cluster: myCluster
# clusterNamespce: backupNamespace
- # tlsCluster:
- # enabled: false
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -33,7 +31,6 @@ spec:
# port: 4000
# user: root
# tlsClient:
- # enabled: false
# tlsSecret:
s3:
provider: aws
diff --git a/manifests/backup/backup-schedule-s3-br.yaml b/manifests/backup/backup-schedule-s3-br.yaml
index 14898e84da..6880f686fe 100644
--- a/manifests/backup/backup-schedule-s3-br.yaml
+++ b/manifests/backup/backup-schedule-s3-br.yaml
@@ -18,8 +18,6 @@ spec:
br:
cluster: myCluster
# clusterNamespce: backupNamespace
- # tlsCluster:
- # enabled: false
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -33,7 +31,6 @@ spec:
# port: 4000
# user: root
# tlsClient:
- # enabled: false
# tlsSecret:
s3:
provider: ceph
diff --git a/manifests/backup/restore-aws-s3-br.yaml b/manifests/backup/restore-aws-s3-br.yaml
index 1aea2fdb28..cc3880798c 100644
--- a/manifests/backup/restore-aws-s3-br.yaml
+++ b/manifests/backup/restore-aws-s3-br.yaml
@@ -13,8 +13,6 @@ spec:
br:
cluster: myCluster
# clusterNamespce:
- # tlsCluster:
- # enabled: false
# db:
# table:
# logLevel: info
@@ -30,7 +28,6 @@ spec:
# port: 4000
# user: root
# tlsClient:
- # enabled: false
# tlsSecret:
s3:
provider: aws
diff --git a/manifests/backup/restore-s3-br.yaml b/manifests/backup/restore-s3-br.yaml
index 6c86a605f6..047bcd8d64 100644
--- a/manifests/backup/restore-s3-br.yaml
+++ b/manifests/backup/restore-s3-br.yaml
@@ -13,8 +13,6 @@ spec:
br:
cluster: myCluster
# clusterNamespce:
- # tlsCluster:
- # enabled: false
# db:
# table:
# logLevel: info
@@ -30,7 +28,6 @@ spec:
# port: 4000
# user: root
# tlsClient:
- # enabled: false
# tlsSecret:
s3:
provider: ceph
diff --git a/manifests/crd.yaml b/manifests/crd.yaml
index 041f2ef936..0be52d4360 100644
--- a/manifests/crd.yaml
+++ b/manifests/crd.yaml
@@ -7286,7 +7286,6 @@ spec:
description: TimeAgo is the history version of the backup task,
e.g. 1m, 1h
type: string
- tlsCluster: {}
required:
- cluster
type: object
@@ -8129,7 +8128,6 @@ spec:
description: TimeAgo is the history version of the backup task,
e.g. 1m, 1h
type: string
- tlsCluster: {}
required:
- cluster
type: object
@@ -9015,7 +9013,6 @@ spec:
description: TimeAgo is the history version of the backup task,
e.g. 1m, 1h
type: string
- tlsCluster: {}
required:
- cluster
type: object
diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
index e59ee0bc76..f4ec0aa806 100644
--- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go
+++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
@@ -377,12 +377,6 @@ func schema_pkg_apis_pingcap_v1alpha1_BRConfig(ref common.ReferenceCallback) com
Description: "BRConfig contains config for BR",
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "tlsCluster": {
- SchemaProps: spec.SchemaProps{
- Description: "Whether enable the TLS connection between TiDB server components Optional: Defaults to nil",
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TLSCluster"),
- },
- },
"cluster": {
SchemaProps: spec.SchemaProps{
Description: "ClusterName of backup/restore cluster",
@@ -471,8 +465,6 @@ func schema_pkg_apis_pingcap_v1alpha1_BRConfig(ref common.ReferenceCallback) com
Required: []string{"cluster"},
},
},
- Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TLSCluster"},
}
}
diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go
index 30c6967ab3..521ef1a6d7 100644
--- a/pkg/apis/pingcap/v1alpha1/types.go
+++ b/pkg/apis/pingcap/v1alpha1/types.go
@@ -855,10 +855,6 @@ type BackupSpec struct {
// +k8s:openapi-gen=true
// BRConfig contains config for BR
type BRConfig struct {
- // Whether enable the TLS connection between TiDB server components
- // Optional: Defaults to nil
- // +optional
- TLSCluster *TLSCluster `json:"tlsCluster,omitempty"`
// ClusterName of backup/restore cluster
Cluster string `json:"cluster"`
// Namespace of backup/restore cluster
diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
index 0eaff7a295..bd4f6f4ee7 100644
--- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
@@ -28,11 +28,6 @@ import (
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BRConfig) DeepCopyInto(out *BRConfig) {
*out = *in
- if in.TLSCluster != nil {
- in, out := &in.TLSCluster, &out.TLSCluster
- *out = new(TLSCluster)
- **out = **in
- }
if in.Concurrency != nil {
in, out := &in.Concurrency, &out.Concurrency
*out = new(uint32)
diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go
index bf10dd5dce..4c1eff1099 100644
--- a/pkg/backup/backup/backup_manager.go
+++ b/pkg/backup/backup/backup_manager.go
@@ -20,6 +20,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/backup"
"github.com/pingcap/tidb-operator/pkg/backup/constants"
backuputil "github.com/pingcap/tidb-operator/pkg/backup/util"
+ v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/util"
@@ -39,6 +40,7 @@ type backupManager struct {
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
+ tcLister v1alpha1listers.TidbClusterLister
pvcControl controller.GeneralPVCControlInterface
}
@@ -50,6 +52,7 @@ func NewBackupManager(
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
+ tcLister v1alpha1listers.TidbClusterLister,
pvcControl controller.GeneralPVCControlInterface,
) backup.BackupManager {
return &backupManager{
@@ -59,6 +62,7 @@ func NewBackupManager(
jobLister,
jobControl,
pvcLister,
+ tcLister,
pvcControl,
}
}
@@ -255,6 +259,14 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, string, error) {
ns := backup.GetNamespace()
name := backup.GetName()
+ backupNamespace := ns
+ if backup.Spec.BR.ClusterNamespace != "" {
+ backupNamespace = backup.Spec.BR.ClusterNamespace
+ }
+ tc, err := bm.tcLister.TidbClusters(backupNamespace).Get(backup.Spec.BR.Cluster)
+ if err != nil {
+ return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", backupNamespace, backup.Spec.BR.Cluster), err
+ }
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister)
if err != nil {
@@ -277,7 +289,8 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s
backupLabel := label.NewBackup().Instance(backup.GetInstanceName()).BackupJob().Backup(name)
volumeMounts := []corev1.VolumeMount{}
volumes := []corev1.Volume{}
- if backup.Spec.BR.TLSCluster != nil && backup.Spec.BR.TLSCluster.Enabled {
+ if tc.Spec.TLSCluster != nil && tc.Spec.TLSCluster.Enabled {
+ args = append(args, "--cluster-tls=true")
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: "cluster-client-tls",
ReadOnly: true,
@@ -292,9 +305,10 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s
},
})
}
- if backup.Spec.From.TLSClient != nil && backup.Spec.From.TLSClient.Enabled {
+ if tc.Spec.TiDB.TLSClient != nil && tc.Spec.TiDB.TLSClient.Enabled {
+ args = append(args, "--client-tls=true")
clientSecretName := util.TiDBClientTLSSecretName(backup.Spec.BR.Cluster)
- if backup.Spec.From.TLSClient.TLSSecret != "" {
+ if backup.Spec.From.TLSClient != nil && backup.Spec.From.TLSClient.TLSSecret != "" {
clientSecretName = backup.Spec.From.TLSClient.TLSSecret
}
volumeMounts = append(volumeMounts, corev1.VolumeMount{
diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go
index f1407ba3ec..ec8e63464d 100644
--- a/pkg/backup/restore/restore_manager.go
+++ b/pkg/backup/restore/restore_manager.go
@@ -21,6 +21,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/backup/constants"
backuputil "github.com/pingcap/tidb-operator/pkg/backup/util"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
+ v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/util"
@@ -40,6 +41,7 @@ type restoreManager struct {
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
+ tcLister v1alpha1listers.TidbClusterLister
pvcControl controller.GeneralPVCControlInterface
}
@@ -51,6 +53,7 @@ func NewRestoreManager(
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
+ tcLister v1alpha1listers.TidbClusterLister,
pvcControl controller.GeneralPVCControlInterface,
) backup.RestoreManager {
return &restoreManager{
@@ -60,6 +63,7 @@ func NewRestoreManager(
jobLister,
jobControl,
pvcLister,
+ tcLister,
pvcControl,
}
}
@@ -240,6 +244,14 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Job, string, error) {
ns := restore.GetNamespace()
name := restore.GetName()
+ restoreNamespace := ns
+ if restore.Spec.BR.ClusterNamespace != "" {
+ restoreNamespace = restore.Spec.BR.ClusterNamespace
+ }
+ tc, err := rm.tcLister.TidbClusters(restoreNamespace).Get(restore.Spec.BR.Cluster)
+ if err != nil {
+ return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", restoreNamespace, restore.Spec.BR.Cluster), err
+ }
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister)
if err != nil {
@@ -261,7 +273,8 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo
restoreLabel := label.NewBackup().Instance(restore.GetInstanceName()).RestoreJob().Restore(name)
volumeMounts := []corev1.VolumeMount{}
volumes := []corev1.Volume{}
- if restore.Spec.BR.TLSCluster != nil && restore.Spec.BR.TLSCluster.Enabled {
+ if tc.Spec.TLSCluster != nil && tc.Spec.TLSCluster.Enabled {
+ args = append(args, "--cluster-tls=true")
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: "cluster-client-tls",
ReadOnly: true,
@@ -276,10 +289,10 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo
},
})
}
-
- if restore.Spec.To.TLSClient != nil && restore.Spec.To.TLSClient.Enabled {
+ if tc.Spec.TiDB.TLSClient != nil && tc.Spec.TiDB.TLSClient.Enabled {
+ args = append(args, "--client-tls=true")
clientSecretName := util.TiDBClientTLSSecretName(restore.Spec.BR.Cluster)
- if restore.Spec.To.TLSClient.TLSSecret != "" {
+ if restore.Spec.To.TLSClient != nil && restore.Spec.To.TLSClient.TLSSecret != "" {
clientSecretName = restore.Spec.To.TLSClient.TLSSecret
}
volumeMounts = append(volumeMounts, corev1.VolumeMount{
diff --git a/pkg/controller/backup/backup_controller.go b/pkg/controller/backup/backup_controller.go
index 6a5cb48704..6418f931c3 100644
--- a/pkg/controller/backup/backup_controller.go
+++ b/pkg/controller/backup/backup_controller.go
@@ -68,6 +68,7 @@ func NewController(
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "backup"})
backupInformer := informerFactory.Pingcap().V1alpha1().Backups()
+ tcInformer := informerFactory.Pingcap().V1alpha1().TidbClusters()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims()
secretInformer := kubeInformerFactory.Core().V1().Secrets()
@@ -88,6 +89,7 @@ func NewController(
jobInformer.Lister(),
jobControl,
pvcInformer.Lister(),
+ tcInformer.Lister(),
pvcControl,
),
),
diff --git a/pkg/controller/restore/restore_controller.go b/pkg/controller/restore/restore_controller.go
index d170fa493f..dd763eda90 100644
--- a/pkg/controller/restore/restore_controller.go
+++ b/pkg/controller/restore/restore_controller.go
@@ -68,6 +68,7 @@ func NewController(
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "restore"})
restoreInformer := informerFactory.Pingcap().V1alpha1().Restores()
+ tcInformer := informerFactory.Pingcap().V1alpha1().TidbClusters()
backupInformer := informerFactory.Pingcap().V1alpha1().Backups()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims()
@@ -87,6 +88,7 @@ func NewController(
jobInformer.Lister(),
jobControl,
pvcInformer.Lister(),
+ tcInformer.Lister(),
pvcControl,
),
),