Skip to content

Commit

Permalink
introduce gardener-node-agent gardener#8023
Browse files Browse the repository at this point in the history
  • Loading branch information
vknabel committed Jun 27, 2023
1 parent 85f5d9d commit ceffb38
Show file tree
Hide file tree
Showing 568 changed files with 77,355 additions and 2,498 deletions.
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
REGISTRY := eu.gcr.io/gardener-project/gardener
APISERVER_IMAGE_REPOSITORY := $(REGISTRY)/apiserver
CONTROLLER_MANAGER_IMAGE_REPOSITORY := $(REGISTRY)/controller-manager
NODE_AGENT_IMAGE_REPOSITORY := $(REGISTRY)/node-agent
SCHEDULER_IMAGE_REPOSITORY := $(REGISTRY)/scheduler
ADMISSION_IMAGE_REPOSITORY := $(REGISTRY)/admission-controller
RESOURCE_MANAGER_IMAGE_REPOSITORY := $(REGISTRY)/resource-manager
Expand Down Expand Up @@ -94,6 +95,7 @@ docker-images:
@echo "Building docker images with version and tag $(EFFECTIVE_VERSION)"
@docker build --build-arg EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) -t $(APISERVER_IMAGE_REPOSITORY):$(EFFECTIVE_VERSION) -t $(APISERVER_IMAGE_REPOSITORY):latest -f Dockerfile --target apiserver .
@docker build --build-arg EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) -t $(CONTROLLER_MANAGER_IMAGE_REPOSITORY):$(EFFECTIVE_VERSION) -t $(CONTROLLER_MANAGER_IMAGE_REPOSITORY):latest -f Dockerfile --target controller-manager .
@docker build --build-arg EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) -t $(NODE_AGENT_IMAGE_REPOSITORY):$(EFFECTIVE_VERSION) -t $(NODE_AGENT_IMAGE_REPOSITORY):latest -f Dockerfile --target node-agent .
@docker build --build-arg EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) -t $(SCHEDULER_IMAGE_REPOSITORY):$(EFFECTIVE_VERSION) -t $(SCHEDULER_IMAGE_REPOSITORY):latest -f Dockerfile --target scheduler .
@docker build --build-arg EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) -t $(ADMISSION_IMAGE_REPOSITORY):$(EFFECTIVE_VERSION) -t $(ADMISSION_IMAGE_REPOSITORY):latest -f Dockerfile --target admission-controller .
@docker build --build-arg EFFECTIVE_VERSION=$(EFFECTIVE_VERSION) -t $(RESOURCE_MANAGER_IMAGE_REPOSITORY):$(EFFECTIVE_VERSION) -t $(RESOURCE_MANAGER_IMAGE_REPOSITORY):latest -f Dockerfile --target resource-manager .
Expand All @@ -105,6 +107,7 @@ docker-images:
docker-push:
@if ! docker images $(APISERVER_IMAGE_REPOSITORY) | awk '{ print $$2 }' | grep -q -F $(EFFECTIVE_VERSION); then echo "$(APISERVER_IMAGE_REPOSITORY) version $(EFFECTIVE_VERSION) is not yet built. Please run 'make docker-images'"; false; fi
@if ! docker images $(CONTROLLER_MANAGER_IMAGE_REPOSITORY) | awk '{ print $$2 }' | grep -q -F $(EFFECTIVE_VERSION); then echo "$(CONTROLLER_MANAGER_IMAGE_REPOSITORY) version $(EFFECTIVE_VERSION) is not yet built. Please run 'make docker-images'"; false; fi
@if ! docker images $(NODE_AGENT_IMAGE_REPOSITORY) | awk '{ print $$2 }' | grep -q -F $(EFFECTIVE_VERSION); then echo "$(NODE_AGENT_IMAGE_REPOSITORY) version $(EFFECTIVE_VERSION) is not yet built. Please run 'make docker-images'"; false; fi
@if ! docker images $(SCHEDULER_IMAGE_REPOSITORY) | awk '{ print $$2 }' | grep -q -F $(EFFECTIVE_VERSION); then echo "$(SCHEDULER_IMAGE_REPOSITORY) version $(EFFECTIVE_VERSION) is not yet built. Please run 'make docker-images'"; false; fi
@if ! docker images $(ADMISSION_IMAGE_REPOSITORY) | awk '{ print $$2 }' | grep -q -F $(EFFECTIVE_VERSION); then echo "$(ADMISSION_IMAGE_REPOSITORY) version $(EFFECTIVE_VERSION) is not yet built. Please run 'make docker-images'"; false; fi
@if ! docker images $(RESOURCE_MANAGER_IMAGE_REPOSITORY) | awk '{ print $$2 }' | grep -q -F $(EFFECTIVE_VERSION); then echo "$(RESOURCE_MANAGER_IMAGE_REPOSITORY) version $(EFFECTIVE_VERSION) is not yet built. Please run 'make docker-images'"; false; fi
Expand Down
3 changes: 3 additions & 0 deletions charts/images.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ images:
name: resource-manager
sourceRepository: github.com/gardener/gardener
repository: eu.gcr.io/gardener-project/gardener/resource-manager
- name: gardener-node-agent
sourceRepository: github.com/gardener/gardener
repository: eu.gcr.io/gardener-project/gardener/node-agent

# Seed bootstrap
- name: pause-container
Expand Down
294 changes: 294 additions & 0 deletions cmd/gardener-node-agent/app/app.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,294 @@
// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package app

import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"time"

"github.com/go-logr/logr"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"go.uber.org/automaxprocs/maxprocs"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
configlatest "k8s.io/client-go/tools/clientcmd/api/latest"
clientcmdv1 "k8s.io/client-go/tools/clientcmd/api/v1"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
controllerconfigv1alpha1 "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/healthz"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"

"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/component/extensions/operatingsystemconfig/downloader"
"github.com/gardener/gardener/pkg/component/extensions/operatingsystemconfig/original/components/kubelet"
"github.com/gardener/gardener/pkg/features"
gardenerhealthz "github.com/gardener/gardener/pkg/healthz"
"github.com/gardener/gardener/pkg/logger"
nodeagentv1alpha1 "github.com/gardener/gardener/pkg/nodeagent/apis/config/v1alpha1"
"github.com/gardener/gardener/pkg/nodeagent/bootstrap"
"github.com/gardener/gardener/pkg/nodeagent/controller"
"github.com/gardener/gardener/pkg/nodeagent/controller/common"
"github.com/gardener/gardener/pkg/nodeagent/dbus"
)

// Name is a const for the name of this component.
const Name = "gardener-node-agent"

// NewCommand creates a new cobra.Command for running gardener-node-agent.
func NewCommand() *cobra.Command {
opts := &options{}

cmd := &cobra.Command{
Use: Name,
Short: "Launch the " + Name,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()

// TODO: Make log level and format configurable:
// log, err := logger.NewZapLogger(opts.config.LogLevel, opts.config.LogFormat)
log, err := logger.NewZapLogger(logger.DebugLevel, logger.FormatJSON)
if err != nil {
return fmt.Errorf("error instantiating zap logger: %w", err)
}
if err := opts.validate(); err != nil {
return err
}

logf.SetLogger(log)
klog.SetLogger(log)

log.Info("Starting "+Name, "version", version.Get())
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
log.Info(fmt.Sprintf("FLAG: --%s=%s", flag.Name, flag.Value)) //nolint:logcheck
})

// don't output usage on further errors raised during execution
cmd.SilenceUsage = true
// further errors will be logged properly, don't duplicate
cmd.SilenceErrors = true

return run(cmd.Context(), log)
},
}

bootstrapCmd := &cobra.Command{
Use: "bootstrap",
Short: "bootstrap the " + Name,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
// TODO: Make log level and format configurable:
// log, err := logger.NewZapLogger(opts.config.LogLevel, opts.config.LogFormat)
log, err := logger.NewZapLogger(logger.DebugLevel, logger.FormatJSON)
if err != nil {
return err
}
if err := opts.validate(); err != nil {
return err
}
db := dbus.New()
return bootstrap.Bootstrap(cmd.Context(), log, db)
},
}

cmd.AddCommand(bootstrapCmd)
flags := cmd.Flags()
verflag.AddFlags(flags)
opts.addFlags(flags)

return cmd
}

func run(ctx context.Context, log logr.Logger) error {
log.Info("Feature Gates", "featureGates", features.DefaultFeatureGate)

// This is like importing the automaxprocs package for its init func (it will in turn call maxprocs.Set).
// Here we pass a custom logger, so that the result of the library gets logged to the same logger we use for the
// component itself.
if _, err := maxprocs.Set(maxprocs.Logger(func(s string, i ...interface{}) {
log.Info(fmt.Sprintf(s, i...)) //nolint:logcheck
})); err != nil {
log.Error(err, "Failed to set GOMAXPROCS")
}

// Check if token is present, else use bootstrap token to fetch token
config, err := common.ReadNodeAgentConfiguration(afero.NewOsFs())
if err != nil {
return err
}

if _, err := os.Stat(nodeagentv1alpha1.NodeAgentTokenFilePath); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("unable to read token %w", err)
}

log.Info("Token not present, fetching from token from API server")

// Fetch token with bootstrap token and store it on disk
restConfig := &rest.Config{
Host: config.APIServer.URL,
BearerToken: config.APIServer.BootstrapToken,
TLSClientConfig: rest.TLSClientConfig{
CAData: []byte(config.APIServer.CA),
},
}

// TODO(majst01): Remove this after v1.76 has been released.
if _, err := os.Stat(downloader.PathCredentialsToken); err == nil {
restConfig = &rest.Config{
Host: config.APIServer.URL,
BearerTokenFile: downloader.PathCredentialsToken,
TLSClientConfig: rest.TLSClientConfig{
CAData: []byte(config.APIServer.CA),
},
}
}

c, err := client.New(restConfig, client.Options{})
if err != nil {
return fmt.Errorf("unable to create runtime client: %w", err)
}

tokenSecret := &corev1.Secret{}
if err := c.Get(ctx, client.ObjectKey{Namespace: metav1.NamespaceSystem, Name: nodeagentv1alpha1.NodeAgentTokenSecretName}, tokenSecret); err != nil {
return fmt.Errorf("unable to fetch token from API server: %w", err)
}

if len(tokenSecret.Data[nodeagentv1alpha1.NodeAgentTokenSecretKey]) == 0 {
return fmt.Errorf("secret does not contain a %s key", nodeagentv1alpha1.NodeAgentTokenSecretKey)
}

log.Info("Token fetched from API server, writing it to disk")
if err := os.WriteFile(nodeagentv1alpha1.NodeAgentTokenFilePath, tokenSecret.Data[nodeagentv1alpha1.NodeAgentTokenSecretKey], 0600); err != nil {
return fmt.Errorf("unable to write token to %s: %w", nodeagentv1alpha1.NodeAgentTokenFilePath, err)
}

log.Info("token written to disk")
}

if err := writeBootstrapKubeconfigToDisk(config); err != nil {
return err
}

log.Info("Token found, getting rest config")
restConfig := &rest.Config{
Host: config.APIServer.URL,
BearerTokenFile: nodeagentv1alpha1.NodeAgentTokenFilePath,
TLSClientConfig: rest.TLSClientConfig{
CAData: []byte(config.APIServer.CA),
},
}

log.Info("Setting up manager")
mgr, err := manager.New(restConfig, manager.Options{
Logger: log,
Scheme: kubernetes.ShootScheme,
GracefulShutdownTimeout: pointer.Duration(5 * time.Second),

// TODO: refine cache selector to allow only access to needed secrets instead
Namespace: metav1.NamespaceSystem,

LeaderElection: false,
Controller: controllerconfigv1alpha1.ControllerConfigurationSpec{
RecoverPanic: pointer.Bool(true),
},
})
if err != nil {
return err
}

// TODO: Make debugging configurable
// if cfg.Debugging != nil && cfg.Debugging.EnableProfiling {
// if err := (routes.Profiling{}).AddToManager(mgr); err != nil {
// return fmt.Errorf("failed adding profiling handlers to manager: %w", err)
// }
// if cfg.Debugging.EnableContentionProfiling {
// goruntime.SetBlockProfileRate(1)
// }
// }

log.Info("Setting up health check endpoints")
if err := mgr.AddHealthzCheck("ping", healthz.Ping); err != nil {
return err
}
if err := mgr.AddReadyzCheck("informer-sync", gardenerhealthz.NewCacheSyncHealthz(mgr.GetCache())); err != nil {
return err
}

log.Info("Adding controllers to manager")
if err := controller.AddToManager(mgr); err != nil {
return fmt.Errorf("failed adding controllers to manager: %w", err)
}

log.Info("Starting manager")
return mgr.Start(ctx)
}

func writeBootstrapKubeconfigToDisk(config *nodeagentv1alpha1.NodeAgentConfiguration) error {
if _, err := os.Stat(kubelet.PathKubeconfigBootstrap); err == nil {
return nil
}

kubeconfig := &clientcmdv1.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: []clientcmdv1.NamedCluster{{
Name: "default",
Cluster: clientcmdv1.Cluster{
Server: config.APIServer.URL,
CertificateAuthorityData: []byte(config.APIServer.CA),
},
}},
CurrentContext: "kubelet-bootstrap@default",
Contexts: []clientcmdv1.NamedContext{{
Name: "kubelet-bootstrap@default",
Context: clientcmdv1.Context{
Cluster: "default",
AuthInfo: "kubelet-bootstrap",
},
}},
AuthInfos: []clientcmdv1.NamedAuthInfo{{
Name: "kubelet-bootstrap",
AuthInfo: clientcmdv1.AuthInfo{
Token: config.APIServer.BootstrapToken,
ImpersonateUserExtra: make(map[string][]string),
},
}},
}

raw, err := runtime.Encode(configlatest.Codec, kubeconfig)
if err != nil {
return fmt.Errorf("unable to encode kubeconfig: %w", err)
}

if err := os.MkdirAll(filepath.Dir(kubelet.PathKubeconfigBootstrap), fs.ModeDir); err != nil {
return fmt.Errorf("unable to create kubelet kubeconfig directory: %w", err)
}

return os.WriteFile(kubelet.PathKubeconfigBootstrap, raw, 0600)
}
Loading

0 comments on commit ceffb38

Please sign in to comment.