Skip to content

Commit

Permalink
Remove unnecessary informer caches (pingcap#1504)
Browse files Browse the repository at this point in the history
  • Loading branch information
Aylei authored and Song Gao committed Apr 13, 2020
1 parent 832ef4a commit e9dfe89
Show file tree
Hide file tree
Showing 16 changed files with 54 additions and 89 deletions.
20 changes: 7 additions & 13 deletions cmd/controller-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,20 +148,14 @@ func main() {

var informerFactory informers.SharedInformerFactory
var kubeInformerFactory kubeinformers.SharedInformerFactory
if controller.ClusterScoped {
informerFactory = informers.NewSharedInformerFactory(cli, controller.ResyncDuration)
kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeCli, controller.ResyncDuration)
} else {
options := []informers.SharedInformerOption{
informers.WithNamespace(ns),
}
informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)

kubeoptions := []kubeinformers.SharedInformerOption{
kubeinformers.WithNamespace(ns),
}
kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)
var options []informers.SharedInformerOption
var kubeoptions []kubeinformers.SharedInformerOption
if !controller.ClusterScoped {
options = append(options, informers.WithNamespace(ns))
kubeoptions = append(kubeoptions, kubeinformers.WithNamespace(ns))
}
informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)
kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)

rl := resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Expand Down
10 changes: 5 additions & 5 deletions pkg/backup/backup/backup_cleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog"
)

Expand All @@ -36,20 +36,20 @@ type BackupCleaner interface {

type backupCleaner struct {
statusUpdater controller.BackupConditionUpdaterInterface
secretLister corelisters.SecretLister
kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
}

// NewBackupCleaner returns a BackupCleaner
func NewBackupCleaner(
statusUpdater controller.BackupConditionUpdaterInterface,
secretLister corelisters.SecretLister,
kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface) BackupCleaner {
return &backupCleaner{
statusUpdater,
secretLister,
kubeCli,
jobLister,
jobControl,
}
Expand Down Expand Up @@ -112,7 +112,7 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st
ns := backup.GetNamespace()
name := backup.GetName()

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.kubeCli)
if err != nil {
return nil, reason, err
}
Expand Down
15 changes: 8 additions & 7 deletions pkg/backup/backup/backup_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,15 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)

type backupManager struct {
backupCleaner BackupCleaner
statusUpdater controller.BackupConditionUpdaterInterface
secretLister corelisters.SecretLister
kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
Expand All @@ -48,7 +49,7 @@ type backupManager struct {
func NewBackupManager(
backupCleaner BackupCleaner,
statusUpdater controller.BackupConditionUpdaterInterface,
secretLister corelisters.SecretLister,
kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
Expand All @@ -58,7 +59,7 @@ func NewBackupManager(
return &backupManager{
backupCleaner,
statusUpdater,
secretLister,
kubeCli,
jobLister,
jobControl,
pvcLister,
Expand Down Expand Up @@ -168,12 +169,12 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
ns := backup.GetNamespace()
name := backup.GetName()

envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister)
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli)
if err != nil {
return nil, reason, err
}

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
Expand Down Expand Up @@ -268,12 +269,12 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s
return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", backupNamespace, backup.Spec.BR.Cluster), err
}

envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister)
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli)
if err != nil {
return nil, reason, err
}

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
Expand Down
15 changes: 8 additions & 7 deletions pkg/backup/restore/restore_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,15 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)

type restoreManager struct {
backupLister listers.BackupLister
statusUpdater controller.RestoreConditionUpdaterInterface
secretLister corelisters.SecretLister
kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
Expand All @@ -49,7 +50,7 @@ type restoreManager struct {
func NewRestoreManager(
backupLister listers.BackupLister,
statusUpdater controller.RestoreConditionUpdaterInterface,
secretLister corelisters.SecretLister,
kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
Expand All @@ -59,7 +60,7 @@ func NewRestoreManager(
return &restoreManager{
backupLister,
statusUpdater,
secretLister,
kubeCli,
jobLister,
jobControl,
pvcLister,
Expand Down Expand Up @@ -159,12 +160,12 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
ns := restore.GetNamespace()
name := restore.GetName()

envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister)
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli)
if err != nil {
return nil, reason, err
}

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err)
}
Expand Down Expand Up @@ -253,12 +254,12 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo
return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", restoreNamespace, restore.Spec.BR.Cluster), err
}

envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister)
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli)
if err != nil {
return nil, reason, err
}

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err)
}
Expand Down
13 changes: 7 additions & 6 deletions pkg/backup/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/backup/constants"
corev1 "k8s.io/api/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

// CheckAllKeysExistInSecret check if all keys are included in the specific secret
Expand Down Expand Up @@ -160,7 +161,7 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s
}

// GenerateStorageCertEnv generate the env info in order to access backend backup storage
func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
var reason string
var err error
Expand All @@ -174,7 +175,7 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro

s3SecretName := provider.S3.SecretName
if s3SecretName != "" {
secret, err := secretLister.Secrets(ns).Get(s3SecretName)
secret, err := kubeCli.CoreV1().Secrets(ns).Get(s3SecretName, metav1.GetOptions{})
if err != nil {
err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err)
return certEnv, "GetS3SecretFailed", err
Expand All @@ -196,7 +197,7 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro
return certEnv, "GcsConfigIsEmpty", errors.New("gcs config is empty")
}
gcsSecretName := provider.Gcs.SecretName
secret, err := secretLister.Secrets(ns).Get(gcsSecretName)
secret, err := kubeCli.CoreV1().Secrets(ns).Get(gcsSecretName, metav1.GetOptions{})
if err != nil {
err := fmt.Errorf("get gcs secret %s/%s failed, err: %v", ns, gcsSecretName, err)
return certEnv, "GetGcsSecretFailed", err
Expand All @@ -221,10 +222,10 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro
}

// GenerateTidbPasswordEnv generate the password EnvVar
func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
var passwordKey string
secret, err := secretLister.Secrets(ns).Get(tidbSecretName)
secret, err := kubeCli.CoreV1().Secrets(ns).Get(tidbSecretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err)
return certEnv, "GetTidbSecretFailed", err
Expand Down
5 changes: 2 additions & 3 deletions pkg/controller/backup/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,10 @@ func NewController(
tcInformer := informerFactory.Pingcap().V1alpha1().TidbClusters()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims()
secretInformer := kubeInformerFactory.Core().V1().Secrets()
statusUpdater := controller.NewRealBackupConditionUpdater(cli, backupInformer.Lister(), recorder)
jobControl := controller.NewRealJobControl(kubeCli, recorder)
pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder)
backupCleaner := backup.NewBackupCleaner(statusUpdater, secretInformer.Lister(), jobInformer.Lister(), jobControl)
backupCleaner := backup.NewBackupCleaner(statusUpdater, kubeCli, jobInformer.Lister(), jobControl)

bkc := &Controller{
kubeClient: kubeCli,
Expand All @@ -85,7 +84,7 @@ func NewController(
backup.NewBackupManager(
backupCleaner,
statusUpdater,
secretInformer.Lister(),
kubeCli,
jobInformer.Lister(),
jobControl,
pvcInformer.Lister(),
Expand Down
8 changes: 1 addition & 7 deletions pkg/controller/configmap_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
Expand All @@ -44,19 +43,16 @@ type ConfigMapControlInterface interface {
type realConfigMapControl struct {
client client.Client
kubeCli kubernetes.Interface
cmLister corelisters.ConfigMapLister
recorder record.EventRecorder
}

// NewRealSecretControl creates a new SecretControlInterface
func NewRealConfigMapControl(
kubeCli kubernetes.Interface,
cmLister corelisters.ConfigMapLister,
recorder record.EventRecorder,
) ConfigMapControlInterface {
return &realConfigMapControl{
kubeCli: kubeCli,
cmLister: cmLister,
recorder: recorder,
}
}
Expand All @@ -81,7 +77,7 @@ func (cc *realConfigMapControl) UpdateConfigMap(owner runtime.Object, cm *corev1
return nil
}

if updated, err := cc.cmLister.ConfigMaps(cm.Namespace).Get(cmName); err != nil {
if updated, err := cc.kubeCli.CoreV1().ConfigMaps(cm.Namespace).Get(cmName, metav1.GetOptions{}); err != nil {
utilruntime.HandleError(fmt.Errorf("error getting updated ConfigMap %s/%s from lister: %v", ns, cmName, err))
} else {
cm = updated.DeepCopy()
Expand Down Expand Up @@ -124,7 +120,6 @@ var _ ConfigMapControlInterface = &realConfigMapControl{}
// NewFakeConfigMapControl returns a FakeConfigMapControl
func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeConfigMapControl {
return &FakeConfigMapControl{
cmInformer.Lister(),
cmInformer.Informer().GetIndexer(),
RequestTracker{},
RequestTracker{},
Expand All @@ -134,7 +129,6 @@ func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeCo

// FakeConfigMapControl is a fake ConfigMapControlInterface
type FakeConfigMapControl struct {
CmLister corelisters.ConfigMapLister
CmIndexer cache.Indexer
createConfigMapTracker RequestTracker
updateConfigMapTracker RequestTracker
Expand Down
18 changes: 6 additions & 12 deletions pkg/controller/configmap_control_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
corelisters "k8s.io/client-go/listers/core/v1"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
)

Expand All @@ -35,7 +33,7 @@ func TestConfigMapControlCreatesConfigMaps(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
Expand All @@ -54,7 +52,7 @@ func TestConfigMapControlCreatesConfigMapFailed(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
Expand All @@ -73,7 +71,7 @@ func TestConfigMapControlUpdateConfigMap(t *testing.T) {
cm := newConfigMap()
cm.Data["file"] = "test"
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
Expand All @@ -90,13 +88,9 @@ func TestConfigMapControlUpdateConfigMapConflictSuccess(t *testing.T) {
cm := newConfigMap()
cm.Data["file"] = "test"
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
oldcm := newConfigMap()
oldcm.Data["file"] = "test2"
err := indexer.Add(oldcm)
g.Expect(err).To(Succeed())
cmLister := corelisters.NewConfigMapLister(indexer)
control := NewRealConfigMapControl(fakeClient, cmLister, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
conflict := false
fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
Expand All @@ -117,7 +111,7 @@ func TestConfigMapControlDeleteConfigMap(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
Expand All @@ -134,7 +128,7 @@ func TestConfigMapControlDeleteConfigMapFailed(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
Expand Down
Loading

0 comments on commit e9dfe89

Please sign in to comment.