From 78f7514492ce8ff9ecf1aaf066b282a9c70b6ed5 Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Sun, 14 Jul 2024 21:09:29 +0300 Subject: [PATCH 1/7] Introduce pkg/log package with logging utilities --- src/k8s/pkg/log/config.go | 22 ++++++++++++++++++++++ src/k8s/pkg/log/context.go | 21 +++++++++++++++++++++ src/k8s/pkg/log/default.go | 10 ++++++++++ src/k8s/pkg/log/doc.go | 33 +++++++++++++++++++++++++++++++++ src/k8s/pkg/log/types.go | 6 ++++++ 5 files changed, 92 insertions(+) create mode 100644 src/k8s/pkg/log/config.go create mode 100644 src/k8s/pkg/log/context.go create mode 100644 src/k8s/pkg/log/default.go create mode 100644 src/k8s/pkg/log/doc.go create mode 100644 src/k8s/pkg/log/types.go diff --git a/src/k8s/pkg/log/config.go b/src/k8s/pkg/log/config.go new file mode 100644 index 000000000..6f01fd86d --- /dev/null +++ b/src/k8s/pkg/log/config.go @@ -0,0 +1,22 @@ +package log + +import ( + "flag" + "fmt" + + "k8s.io/klog/v2" +) + +type Options struct { + LogLevel int + AddDirHeader bool +} + +// Configure sets global logging configuration (affects all loggers). +func Configure(o Options) { + flags := flag.NewFlagSet("klog", flag.ContinueOnError) + klog.InitFlags(flags) + + flags.Set("v", fmt.Sprintf("%v", o.LogLevel)) + flags.Set("add_dir_header", fmt.Sprintf("%v", o.AddDirHeader)) +} diff --git a/src/k8s/pkg/log/context.go b/src/k8s/pkg/log/context.go new file mode 100644 index 000000000..736394b09 --- /dev/null +++ b/src/k8s/pkg/log/context.go @@ -0,0 +1,21 @@ +package log + +import ( + "context" + + "github.com/go-logr/logr" +) + +// NewContext returns a new context that contains the logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return logr.NewContext(ctx, logger) +} + +// FromContext returns the logger associated with the context. +// FromContext returns the default log.L() logger if there is no logger on the context. +func FromContext(ctx context.Context) Logger { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + return L() +} diff --git a/src/k8s/pkg/log/default.go b/src/k8s/pkg/log/default.go new file mode 100644 index 000000000..a82ee3e36 --- /dev/null +++ b/src/k8s/pkg/log/default.go @@ -0,0 +1,10 @@ +package log + +import "k8s.io/klog/v2" + +var logger = klog.NewKlogr().WithName("k8sd") + +// L is the default logger to use. +func L() Logger { + return logger +} diff --git a/src/k8s/pkg/log/doc.go b/src/k8s/pkg/log/doc.go new file mode 100644 index 000000000..98cff32ef --- /dev/null +++ b/src/k8s/pkg/log/doc.go @@ -0,0 +1,33 @@ +// package log provides logging tools and capabilities to k8s-snap. +// +// Typical usage: +// +// // When a context is available +// log := log.FromContext(ctx) +// log.Info("Info message") +// log.V(3).Info("Info message that will be printed when log level is 3 or more") +// log.Error(err, "Failed to do something") +// +// // When no context is available, you can use log.L() to get a default logger. +// log := log.L() +// +// // To add structured fields to your logs +// log.WithValues("name", "k8sd-config").Info("Reconcile configmap") +// +// // To add structured fields for multiple logs +// log = log.WithValues("name", "k8sd-config") +// log.Info("Start reconcile") +// log.Info("Looking for changes") +// +// // You can create a new context with a logger (use in controllers or components) +// ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("key", value)) +// +// Log messages include the file name and line number of the log message. In case +// of utility functions, you can print the line number of the caller instead with: +// +// log.FromContext(ctx).WithCallDepth(1).Error(err, "Failed to format JSON output") +// +// To configure the logger behaviour, you can use this in the main package: +// +// log.Configure(log.Options{LogLevel: 3, AddDirHeader: true}) +package log diff --git a/src/k8s/pkg/log/types.go b/src/k8s/pkg/log/types.go new file mode 100644 index 000000000..c47a6390e --- /dev/null +++ b/src/k8s/pkg/log/types.go @@ -0,0 +1,6 @@ +package log + +import "k8s.io/klog/v2" + +// Logger is the standard klog.Logger type. +type Logger = klog.Logger From c20bdbb45e228cba0000b798ddba46496addab5e Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Sun, 14 Jul 2024 21:16:36 +0300 Subject: [PATCH 2/7] use pkg/log instead of standard library log --- src/k8s/cmd/k8sd/k8sd.go | 9 ++++ src/k8s/cmd/main.go | 4 ++ src/k8s/cmd/util/formatter.go | 8 ++-- src/k8s/pkg/client/helm/client.go | 11 +++-- src/k8s/pkg/client/kubernetes/configmap.go | 7 +-- src/k8s/pkg/k8sd/api/cluster_remove.go | 15 +++--- src/k8s/pkg/k8sd/app/app.go | 17 +++---- src/k8s/pkg/k8sd/app/hooks_bootstrap.go | 48 ++++++++++--------- src/k8s/pkg/k8sd/app/hooks_join.go | 34 +++++++------ .../control_plane_configuration.go | 15 +++--- src/k8s/pkg/k8sd/controllers/feature.go | 5 +- .../k8sd/controllers/node_configuration.go | 9 ++-- .../controllers/update_node_configuration.go | 14 +++--- src/k8s/pkg/k8sd/features/cilium/network.go | 4 +- src/k8s/pkg/k8sd/setup/kube_proxy.go | 4 +- 15 files changed, 121 insertions(+), 83 deletions(-) diff --git a/src/k8s/cmd/k8sd/k8sd.go b/src/k8s/cmd/k8sd/k8sd.go index bda425a8b..3b92072e5 100644 --- a/src/k8s/cmd/k8sd/k8sd.go +++ b/src/k8s/cmd/k8sd/k8sd.go @@ -3,12 +3,14 @@ package k8sd import ( cmdutil "github.com/canonical/k8s/cmd/util" "github.com/canonical/k8s/pkg/k8sd/app" + "github.com/canonical/k8s/pkg/log" "github.com/spf13/cobra" ) var rootCmdOpts struct { logDebug bool logVerbose bool + logLevel int stateDir string pprofAddress string disableNodeConfigController bool @@ -22,6 +24,12 @@ func NewRootCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { Use: "k8sd", Short: "Canonical Kubernetes orchestrator and clustering daemon", Run: func(cmd *cobra.Command, args []string) { + // configure logging + log.Configure(log.Options{ + LogLevel: rootCmdOpts.logLevel, + AddDirHeader: true, + }) + app, err := app.New(app.Config{ Debug: rootCmdOpts.logDebug, Verbose: rootCmdOpts.logVerbose, @@ -51,6 +59,7 @@ func NewRootCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { cmd.SetOut(env.Stdout) cmd.SetErr(env.Stderr) + cmd.PersistentFlags().IntVarP(&rootCmdOpts.logLevel, "log-level", "l", 0, "k8sd log level") cmd.PersistentFlags().BoolVarP(&rootCmdOpts.logDebug, "debug", "d", false, "Show all debug messages") cmd.PersistentFlags().BoolVarP(&rootCmdOpts.logVerbose, "verbose", "v", true, "Show all information messages") cmd.PersistentFlags().StringVar(&rootCmdOpts.stateDir, "state-dir", "", "Directory with the dqlite datastore") diff --git a/src/k8s/cmd/main.go b/src/k8s/cmd/main.go index 67306fd70..3c6906af6 100644 --- a/src/k8s/cmd/main.go +++ b/src/k8s/cmd/main.go @@ -12,6 +12,7 @@ import ( k8s_apiserver_proxy "github.com/canonical/k8s/cmd/k8s-apiserver-proxy" "github.com/canonical/k8s/cmd/k8sd" cmdutil "github.com/canonical/k8s/cmd/util" + "github.com/canonical/k8s/pkg/log" "github.com/spf13/cobra" ) @@ -21,6 +22,9 @@ func main() { ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) defer cancel() + // logging + ctx = log.NewContext(ctx, log.L()) + // ensure hooks from all commands are executed cobra.EnableTraverseRunHooks = true diff --git a/src/k8s/cmd/util/formatter.go b/src/k8s/cmd/util/formatter.go index b845d21f4..05cf50d5b 100644 --- a/src/k8s/cmd/util/formatter.go +++ b/src/k8s/cmd/util/formatter.go @@ -4,8 +4,8 @@ import ( "encoding/json" "fmt" "io" - "log" + "github.com/canonical/k8s/pkg/log" "gopkg.in/yaml.v2" ) @@ -35,7 +35,7 @@ type plainFormatter struct { func (p plainFormatter) Print(data any) { if _, err := fmt.Fprint(p.writer, data, "\n"); err != nil { - log.Printf("Failed to format output: %v", err) + log.L().WithCallDepth(1).Error(err, "Failed to format output") } } @@ -47,7 +47,7 @@ func (j jsonFormatter) Print(data any) { encoder := json.NewEncoder(j.writer) encoder.SetIndent("", " ") if err := encoder.Encode(data); err != nil { - log.Printf("Failed to format JSON output: %v", err) + log.L().WithCallDepth(1).Error(err, "Failed to format JSON output") } } @@ -57,6 +57,6 @@ type yamlFormatter struct { func (y yamlFormatter) Print(data any) { if err := yaml.NewEncoder(y.writer).Encode(data); err != nil { - log.Printf("Failed to format YAML output: %v", err) + log.L().WithCallDepth(1).Error(err, "Failed to format YAML output") } } diff --git a/src/k8s/pkg/client/helm/client.go b/src/k8s/pkg/client/helm/client.go index d06e40add..fdea33880 100644 --- a/src/k8s/pkg/client/helm/client.go +++ b/src/k8s/pkg/client/helm/client.go @@ -5,9 +5,9 @@ import ( "context" "encoding/json" "fmt" - "log" "path" + "github.com/canonical/k8s/pkg/log" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/storage/driver" @@ -31,10 +31,13 @@ func NewClient(manifestsBaseDir string, restClientGetter func(string) genericcli } } -func (h *client) newActionConfiguration(namespace string) (*action.Configuration, error) { +func (h *client) newActionConfiguration(ctx context.Context, namespace string) (*action.Configuration, error) { actionConfig := new(action.Configuration) - if err := actionConfig.Init(h.restClientGetter(namespace), namespace, "", log.Printf); err != nil { + log := log.FromContext(ctx).WithName("helm") + if err := actionConfig.Init(h.restClientGetter(namespace), namespace, "", func(format string, v ...interface{}) { + log.Info(fmt.Sprintf(format, v...)) + }); err != nil { return nil, fmt.Errorf("failed to initialize: %w", err) } return actionConfig, nil @@ -42,7 +45,7 @@ func (h *client) newActionConfiguration(namespace string) (*action.Configuration // Apply implements the Client interface. func (h *client) Apply(ctx context.Context, c InstallableChart, desired State, values map[string]any) (bool, error) { - cfg, err := h.newActionConfiguration(c.Namespace) + cfg, err := h.newActionConfiguration(ctx, c.Namespace) if err != nil { return false, fmt.Errorf("failed to create action configuration: %w", err) } diff --git a/src/k8s/pkg/client/kubernetes/configmap.go b/src/k8s/pkg/client/kubernetes/configmap.go index 13770ba69..234ddba47 100644 --- a/src/k8s/pkg/client/kubernetes/configmap.go +++ b/src/k8s/pkg/client/kubernetes/configmap.go @@ -3,17 +3,18 @@ package kubernetes import ( "context" "fmt" - "log" + "github.com/canonical/k8s/pkg/log" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" applyv1 "k8s.io/client-go/applyconfigurations/core/v1" ) func (c *Client) WatchConfigMap(ctx context.Context, namespace string, name string, reconcile func(configMap *v1.ConfigMap) error) error { + log := log.FromContext(ctx).WithValues("namespace", namespace, "name", name) w, err := c.CoreV1().ConfigMaps(namespace).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{Name: name})) if err != nil { - return fmt.Errorf("failed to watch configmap, namespace: %s name: %s: %w", namespace, name, err) + return fmt.Errorf("failed to watch configmap namespace=%s name=%s: %w", namespace, name, err) } defer w.Stop() for { @@ -30,7 +31,7 @@ func (c *Client) WatchConfigMap(ctx context.Context, namespace string, name stri } if err := reconcile(configMap); err != nil { - log.Println(fmt.Errorf("failed to reconcile configmap, namespace: %s name: %s: %w", namespace, name, err)) + log.Error(err, "Reconcile ConfigMap failed") } } } diff --git a/src/k8s/pkg/k8sd/api/cluster_remove.go b/src/k8s/pkg/k8sd/api/cluster_remove.go index f70c0fbcf..6816e53f2 100644 --- a/src/k8s/pkg/k8sd/api/cluster_remove.go +++ b/src/k8s/pkg/k8sd/api/cluster_remove.go @@ -4,10 +4,10 @@ import ( "context" "database/sql" "fmt" - "log" "net/http" apiv1 "github.com/canonical/k8s/api/v1" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/control" nodeutil "github.com/canonical/k8s/pkg/utils/node" @@ -32,29 +32,32 @@ func (e *Endpoints) postClusterRemove(s *state.State, r *http.Request) response. defer cancel() } + log := log.FromContext(s.Context).WithValues("name", req.Name) + isControlPlane, err := nodeutil.IsControlPlaneNode(ctx, s, req.Name) if err != nil { return response.InternalError(fmt.Errorf("failed to check if node is control-plane: %w", err)) } if isControlPlane { - log.Printf("Waiting for node to not be pending") + log.Info("Waiting for node to not be pending") control.WaitUntilReady(ctx, func() (bool, error) { var notPending bool if err := s.Database.Transaction(ctx, func(ctx context.Context, tx *sql.Tx) error { member, err := cluster.GetInternalClusterMember(ctx, tx, req.Name) if err != nil { - log.Printf("Failed to get member: %v", err) + log.Error(err, "Failed to get member") return nil } - log.Printf("Node %s is %s", member.Name, member.Role) + log.WithValues("role", member.Role).Info("Current node role") notPending = member.Role != cluster.Pending return nil }); err != nil { - log.Printf("Transaction to check cluster member role failed: %v", err) + log.Error(err, "Transaction to check cluster member role failed") } return notPending, nil }) - log.Printf("Starting node deletion") + + log.Info("Starting node deletion") // Remove control plane via microcluster API. // The postRemove hook will take care of cleaning up kubernetes. diff --git a/src/k8s/pkg/k8sd/app/app.go b/src/k8s/pkg/k8sd/app/app.go index bbc9f30f2..eafae0639 100644 --- a/src/k8s/pkg/k8sd/app/app.go +++ b/src/k8s/pkg/k8sd/app/app.go @@ -3,7 +3,6 @@ package app import ( "context" "fmt" - "log" "net/http" "net/http/pprof" "sync" @@ -12,6 +11,7 @@ import ( "github.com/canonical/k8s/pkg/k8sd/api" "github.com/canonical/k8s/pkg/k8sd/controllers" "github.com/canonical/k8s/pkg/k8sd/database" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" "github.com/canonical/microcluster/client" "github.com/canonical/microcluster/config" @@ -103,7 +103,7 @@ func New(cfg Config) (*App, error) { app.readyWg.Wait, ) } else { - log.Println("node-config-controller disabled via config") + log.L().Info("node-config-controller disabled via config") } if !cfg.DisableControlPlaneConfigController { @@ -113,7 +113,7 @@ func New(cfg Config) (*App, error) { time.NewTicker(10*time.Second).C, ) } else { - log.Println("control-plane-config-controller disabled via config") + log.L().Info("control-plane-config-controller disabled via config") } app.triggerUpdateNodeConfigControllerCh = make(chan struct{}, 1) @@ -125,7 +125,7 @@ func New(cfg Config) (*App, error) { app.triggerUpdateNodeConfigControllerCh, ) } else { - log.Println("update-node-config-controller disabled via config") + log.L().Info("update-node-config-controller disabled via config") } app.triggerFeatureControllerNetworkCh = make(chan struct{}, 1) @@ -149,9 +149,8 @@ func New(cfg Config) (*App, error) { TriggerMetricsServerCh: app.triggerFeatureControllerMetricsServerCh, }) } else { - log.Println("feature-controller disabled via config") + log.L().Info("feature-controller disabled via config") } - return app, nil } @@ -180,9 +179,11 @@ func (a *App) Run(ctx context.Context, customHooks *config.Hooks) error { } } + log := log.FromContext(ctx) + // start profiling server if a.profilingAddress != "" { - log.Printf("Enable pprof endpoint at http://%s", a.profilingAddress) + log.WithValues("address", fmt.Sprintf("http://%s", a.profilingAddress)).Info("Enable pprof endpoint") go func() { mux := http.NewServeMux() @@ -193,7 +194,7 @@ func (a *App) Run(ctx context.Context, customHooks *config.Hooks) error { mux.HandleFunc("/debug/pprof/trace", pprof.Trace) if err := http.ListenAndServe(a.profilingAddress, mux); err != nil { - log.Printf("ERROR: Failed to serve pprof endpoint: %v", err) + log.Error(err, "Failed to serve pprof endpoint") } }() } diff --git a/src/k8s/pkg/k8sd/app/hooks_bootstrap.go b/src/k8s/pkg/k8sd/app/hooks_bootstrap.go index de686fe63..f93f7bb8a 100644 --- a/src/k8s/pkg/k8sd/app/hooks_bootstrap.go +++ b/src/k8s/pkg/k8sd/app/hooks_bootstrap.go @@ -7,7 +7,6 @@ import ( "database/sql" "encoding/json" "fmt" - "log" "net" "net/http" "os" @@ -18,6 +17,7 @@ import ( "github.com/canonical/k8s/pkg/k8sd/pki" "github.com/canonical/k8s/pkg/k8sd/setup" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/log" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/experimental/snapdconfig" @@ -55,13 +55,15 @@ func (a *App) onBootstrap(s *state.State, initConfig map[string]string) error { func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encodedToken string, joinConfig apiv1.WorkerNodeJoinConfig) (rerr error) { snap := a.Snap() + log := log.FromContext(ctx).WithValues("hook", "join") + // make sure to cleanup in case of errors // the code can register cleanup hooks by appending to this slice var cleanups []func(context.Context) error defer func() { // do not cleanup if bootstrap was successful if rerr == nil { - log.Println("Joined cluster successfully") + log.Info("Joined cluster successfully") return } @@ -72,18 +74,18 @@ func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encoded // start goroutine to cleanup on the background and return quickly go func() { - log.Printf("Join cluster failed: %v", rerr) - log.Println("Cleaning up...") + log.Error(rerr, "Failed to join cluster") + log.Info("Cleaning up") for i := len(cleanups) - 1; i >= 0; i-- { // run cleanup functions in reverse order if err := cleanups[i](s.Context); err != nil { - log.Printf("Cleanup hook %d/%d failed: %v", i, len(cleanups), err) + log.Error(err, fmt.Sprintf("Cleanup hook %d/%d failed", i, len(cleanups))) } } - log.Println("All cleanup hooks finished, resetting microcluster state") + log.Info("All cleanup hooks finished, resetting microcluster state") if err := a.client.ResetClusterMember(s.Context, s.Name(), true); err != nil { - log.Printf("Failed to ResetClusterMember: %v", err) + log.Error(err, "Failed to ResetClusterMember") } }() }() @@ -195,7 +197,7 @@ func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encoded return fmt.Errorf("failed to initialize worker node certificates: %w", err) } cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Cleaning up worker certificates") + log.Info("Cleaning up worker certificates") if _, err := setup.EnsureWorkerPKI(snap, &pki.WorkerNodePKI{}); err != nil { return fmt.Errorf("failed to cleanup worker certificates: %w", err) } @@ -247,7 +249,7 @@ func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encoded cleanups = append(cleanups, func(ctx context.Context) error { for _, dir := range []string{snap.ServiceArgumentsDir()} { - log.Printf("Cleaning up config files from %v", dir) + log.WithValues("directory", dir).Info("Cleaning up config files") if err := os.RemoveAll(dir); err != nil { return fmt.Errorf("failed to delete %v: %w", dir, err) } @@ -273,7 +275,7 @@ func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encoded } cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Removing worker node mark") + log.Info("Removing worker node mark") if err := snaputil.MarkAsWorkerNode(snap, false); err != nil { return fmt.Errorf("failed to unmark node as worker: %w", err) } @@ -285,7 +287,7 @@ func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encoded // Start services cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Stopping worker services") + log.Info("Stopping worker services") if err := snaputil.StopWorkerServices(ctx, snap); err != nil { return fmt.Errorf("failed to start worker services: %w", err) } @@ -301,13 +303,15 @@ func (a *App) onBootstrapWorkerNode(ctx context.Context, s *state.State, encoded func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, bootstrapConfig apiv1.BootstrapConfig) (rerr error) { snap := a.Snap() + log := log.FromContext(ctx).WithValues("hook", "bootstrap") + // make sure to cleanup in case of errors // the code can register cleanup hooks by appending to this slice var cleanups []func(context.Context) error defer func() { // do not cleanup if bootstrap was successful if rerr == nil { - log.Println("Bootstrapped cluster successfully") + log.Info("Bootstrapped cluster successfully") return } @@ -318,18 +322,18 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots // start goroutine to cleanup on the background and return quickly go func() { - log.Printf("Bootstrap failed: %v", rerr) - log.Println("Cleaning up...") + log.Error(rerr, "Failed to bootstrap cluster") + log.Info("Cleaning up") for i := len(cleanups) - 1; i >= 0; i-- { // run cleanup functions in reverse order if err := cleanups[i](s.Context); err != nil { - log.Printf("Cleanup hook %d/%d failed: %v", i, len(cleanups), err) + log.Error(err, fmt.Sprintf("Cleanup hook %d/%d failed", i, len(cleanups))) } } - log.Println("All cleanup hooks finished, resetting microcluster state") + log.Info("All cleanup hooks finished, resetting microcluster state") if err := a.client.ResetClusterMember(s.Context, s.Name(), true); err != nil { - log.Printf("Failed to ResetClusterMember: %v", err) + log.Error(err, "Failed to ResetClusterMember") } }() }() @@ -386,7 +390,7 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots return fmt.Errorf("failed to initialize external datastore certificates: %w", err) } cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Cleaning up external datastore certificates") + log.Info("Cleaning up external datastore certificates") if _, err := setup.EnsureExtDatastorePKI(snap, &pki.ExternalDatastorePKI{}); err != nil { return fmt.Errorf("failed to cleanup external datastore certificates: %w", err) } @@ -442,7 +446,7 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots } cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Cleaning up control plane certificates") + log.Info("Cleaning up control plane certificates") if _, err := setup.EnsureControlPlanePKI(snap, &pki.ControlPlanePKI{}); err != nil { return fmt.Errorf("failed to cleanup control plane certificates: %w", err) } @@ -481,7 +485,7 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots switch cfg.Datastore.GetType() { case "k8s-dqlite": cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Cleaning up k8s-dqlite directory") + log.Info("Cleaning up k8s-dqlite directory") if err := os.RemoveAll(snap.K8sDqliteStateDir()); err != nil { return fmt.Errorf("failed to cleanup k8s-dqlite state directory: %w", err) } @@ -496,7 +500,7 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots } cleanups = append(cleanups, func(ctx context.Context) error { for _, dir := range []string{snap.ServiceArgumentsDir()} { - log.Printf("Cleaning up config files from %v", dir) + log.WithValues("directory", dir).Info("Cleaning up config files", dir) if err := os.RemoveAll(dir); err != nil { return fmt.Errorf("failed to delete %v: %w", dir, err) } @@ -544,7 +548,7 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots // Start services cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Stopping control plane services") + log.Info("Stopping control plane services") if err := stopControlPlaneServices(ctx, snap, cfg.Datastore.GetType()); err != nil { return fmt.Errorf("failed to stop services: %w", err) } diff --git a/src/k8s/pkg/k8sd/app/hooks_join.go b/src/k8s/pkg/k8sd/app/hooks_join.go index 788ff3285..e6bf41545 100644 --- a/src/k8s/pkg/k8sd/app/hooks_join.go +++ b/src/k8s/pkg/k8sd/app/hooks_join.go @@ -4,7 +4,6 @@ import ( "context" "database/sql" "fmt" - "log" "net" "os" @@ -12,6 +11,7 @@ import ( databaseutil "github.com/canonical/k8s/pkg/k8sd/database/util" "github.com/canonical/k8s/pkg/k8sd/pki" "github.com/canonical/k8s/pkg/k8sd/setup" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/control" "github.com/canonical/k8s/pkg/utils/experimental/snapdconfig" @@ -24,6 +24,8 @@ import ( func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr error) { snap := a.Snap() + log := log.FromContext(s.Context).WithValues("hook", "join") + // NOTE(neoaggelos): context timeout is passed over configuration, so that hook failures are propagated to the client ctx, cancel := context.WithCancel(s.Context) defer cancel() @@ -38,7 +40,7 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err defer func() { // do not cleanup if joining was successful if rerr == nil { - log.Println("Joined cluster successfully") + log.Info("Joined cluster successfully") return } @@ -49,43 +51,43 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err // start goroutine to cleanup on the background and return quickly go func() { - log.Printf("Join cluster failed: %v", rerr) + log.Error(rerr, "Failed to join cluster") - log.Printf("Waiting for node to finish microcluster join") + log.Info("Waiting for node to finish microcluster join before removing") control.WaitUntilReady(s.Context, func() (bool, error) { var notPending bool if err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { member, err := cluster.GetInternalClusterMember(ctx, tx, s.Name()) if err != nil { - log.Printf("Failed to get member: %v", err) + log.Error(err, "Failed to get member") return nil } notPending = member.Role != cluster.Pending return nil }); err != nil { - log.Printf("Transaction to check cluster member role failed: %v", err) + log.Error(err, "Failed database transaction to check cluster member role") } return notPending, nil }) - log.Println("Cleaning up...") + log.Info("Cleaning up") for i := len(cleanups) - 1; i >= 0; i-- { // run cleanup functions in reverse order if err := cleanups[i](s.Context); err != nil { - log.Printf("Cleanup hook %d/%d failed: %v", i, len(cleanups), err) + log.Error(err, fmt.Sprintf("Cleanup hook %d/%d failed", i, len(cleanups))) } } - log.Println("All cleanup hooks finished, removing node from microcluster") + log.Info("All cleanup hooks finished, removing node from microcluster") // NOTE(neoaggelos): this also runs the pre-remove hook and resets the cluster member control.WaitUntilReady(s.Context, func() (bool, error) { client, err := s.Leader() if err != nil { - log.Printf("Error: failed to create client to leader: %v", err) + log.Error(err, "Failed to create client to dqlite leader") return false, nil } if err := client.DeleteClusterMember(s.Context, s.Name(), true); err != nil { - log.Printf("Error: failed to DeleteClusterMember: %v", err) + log.Error(err, "Failed to DeleteClusterMember") return false, nil } return true, nil @@ -192,7 +194,7 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err // Write certificates to disk cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Cleaning up control plane certificates") + log.Info("Cleaning up control plane certificates") if _, err := setup.EnsureControlPlanePKI(snap, &pki.ControlPlanePKI{}); err != nil { return fmt.Errorf("failed to cleanup control plane certificates: %w", err) } @@ -234,7 +236,7 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err cleanups = append(cleanups, func(ctx context.Context) error { for _, dir := range []string{snap.ServiceArgumentsDir()} { - log.Printf("Cleaning up config files from %v", dir) + log.WithValues("directory", dir).Info("Cleaning up config files") if err := os.RemoveAll(dir); err != nil { return fmt.Errorf("failed to delete %v: %w", dir, err) } @@ -272,7 +274,7 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err // Start services cleanups = append(cleanups, func(ctx context.Context) error { - log.Println("Stopping control plane services") + log.Info("Stopping control plane services") if err := stopControlPlaneServices(ctx, snap, cfg.Datastore.GetType()); err != nil { return fmt.Errorf("failed to stop services: %w", err) } @@ -293,13 +295,15 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err func (a *App) onPreRemove(s *state.State, force bool) (rerr error) { snap := a.Snap() + log := log.FromContext(s.Context).WithValues("hook", "preremove") + // NOTE(neoaggelos): When the pre-remove hook fails, the microcluster node will // be removed from the cluster members, but remains in the microcluster dqlite database. // // Log the error and proceed, such that the node is in fact removed. defer func() { if rerr != nil { - log.Printf("WARNING: There was an error when running the pre-remove hook: %v", rerr) + log.Error(rerr, "Failure during hook", rerr) } rerr = nil }() diff --git a/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go b/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go index 0ff72c5c2..4213c8ac2 100644 --- a/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go @@ -3,12 +3,12 @@ package controllers import ( "context" "fmt" - "log" "time" "github.com/canonical/k8s/pkg/k8sd/pki" "github.com/canonical/k8s/pkg/k8sd/setup" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils/experimental/snapdconfig" @@ -39,6 +39,9 @@ func NewControlPlaneConfigurationController(snap snap.Snap, waitReady func(), tr func (c *ControlPlaneConfigurationController) Run(ctx context.Context, getClusterConfig func(context.Context) (types.ClusterConfig, error)) { c.waitReady() + ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "controlplaneconfiguration")) + log := log.FromContext(ctx) + for { select { case <-ctx.Done(): @@ -47,21 +50,21 @@ func (c *ControlPlaneConfigurationController) Run(ctx context.Context, getCluste } if isWorker, err := snaputil.IsWorker(c.snap); err != nil { - log.Println(fmt.Errorf("failed to check if this is a worker node: %w", err)) + log.Error(err, "Failed to check if running on a worker node") continue } else if isWorker { - log.Println("Stopping control plane controller as this is a worker node") + log.Info("Stopping on worker node") return } config, err := getClusterConfig(ctx) if err != nil { - log.Println(fmt.Errorf("failed to retrieve cluster config: %w", err)) + log.Error(err, "Failed to retrieve cluster configuration") continue } if err := c.reconcile(ctx, config); err != nil { - log.Println(fmt.Errorf("failed to reconcile control plane configuration: %w", err)) + log.Error(err, "Failed to reconcile control plane configuration") } } } @@ -111,7 +114,7 @@ func (c *ControlPlaneConfigurationController) reconcile(ctx context.Context, con // snapd if meta, _, err := snapdconfig.ParseMeta(ctx, c.snap); err == nil && meta.Orb != "none" { if err := snapdconfig.SetSnapdFromK8sd(ctx, config.ToUserFacing(), c.snap); err != nil { - log.Printf("Warning: failed to update snapd configuration: %v", err) + log.FromContext(ctx).Error(err, "Failed to update snapd configuration") } } diff --git a/src/k8s/pkg/k8sd/controllers/feature.go b/src/k8s/pkg/k8sd/controllers/feature.go index e3dadb4f1..13ea26666 100644 --- a/src/k8s/pkg/k8sd/controllers/feature.go +++ b/src/k8s/pkg/k8sd/controllers/feature.go @@ -3,11 +3,11 @@ package controllers import ( "context" "fmt" - "log" "time" "github.com/canonical/k8s/pkg/k8sd/features" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" "github.com/canonical/k8s/pkg/utils" ) @@ -71,6 +71,7 @@ func NewFeatureController(opts FeatureControllerOpts) *FeatureController { func (c *FeatureController) Run(ctx context.Context, getClusterConfig func(context.Context) (types.ClusterConfig, error), notifyDNSChangedIP func(ctx context.Context, dnsIP string) error) { c.waitReady() + ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "feature")) go c.reconcileLoop(ctx, getClusterConfig, "network", c.triggerNetworkCh, c.reconciledNetworkCh, func(cfg types.ClusterConfig) error { return features.Implementation.ApplyNetwork(ctx, c.snap, cfg.Network, cfg.Annotations) @@ -127,7 +128,7 @@ func (c *FeatureController) reconcileLoop(ctx context.Context, getClusterConfig return case <-triggerCh: if err := c.reconcile(ctx, getClusterConfig, apply); err != nil { - log.Printf("failed to reconcile %s configuration, will retry in 5 seconds: %v", componentName, err) + log.FromContext(ctx).WithValues("feature", componentName).Error(err, "Failed to apply feature configuration") // notify triggerCh after 5 seconds to retry time.AfterFunc(5*time.Second, func() { utils.MaybeNotify(triggerCh) }) diff --git a/src/k8s/pkg/k8sd/controllers/node_configuration.go b/src/k8s/pkg/k8sd/controllers/node_configuration.go index b398b41ab..0df3fa5c4 100644 --- a/src/k8s/pkg/k8sd/controllers/node_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/node_configuration.go @@ -4,11 +4,11 @@ import ( "context" "crypto/rsa" "fmt" - "log" "time" "github.com/canonical/k8s/pkg/client/kubernetes" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" v1 "k8s.io/api/core/v1" @@ -45,16 +45,19 @@ func (c *NodeConfigurationController) Run(ctx context.Context, getRSAKey func(co // wait for microcluster node to be ready c.waitReady() + ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "nodeconfiguration")) + log := log.FromContext(ctx) + for { client, err := c.retryNewK8sClient(ctx) if err != nil { - log.Println(fmt.Errorf("failed to create a Kubernetes client: %w", err)) + log.Error(err, "Failed to create a Kubernetes client") } if err := client.WatchConfigMap(ctx, "kube-system", "k8sd-config", func(configMap *v1.ConfigMap) error { return c.reconcile(ctx, configMap, getRSAKey) }); err != nil { // This also can fail during bootstrapping/start up when api-server is not ready // So the watch requests get connection refused replies - log.Println(fmt.Errorf("failed to watch configmap: %w", err)) + log.WithValues("name", "k8sd-config", "namespace", "kube-system").Error(err, "Failed to watch configmap") } select { diff --git a/src/k8s/pkg/k8sd/controllers/update_node_configuration.go b/src/k8s/pkg/k8sd/controllers/update_node_configuration.go index 5f1b51a32..9b5e6a8d9 100644 --- a/src/k8s/pkg/k8sd/controllers/update_node_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/update_node_configuration.go @@ -3,11 +3,11 @@ package controllers import ( "context" "fmt" - "log" "time" "github.com/canonical/k8s/pkg/client/kubernetes" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" pkiutil "github.com/canonical/k8s/pkg/utils/pki" @@ -58,6 +58,8 @@ func (c *UpdateNodeConfigurationController) retryNewK8sClient(ctx context.Contex func (c *UpdateNodeConfigurationController) Run(ctx context.Context, getClusterConfig func(context.Context) (types.ClusterConfig, error)) { c.waitReady() + log := log.FromContext(ctx).WithValues("controller", "updatenodeconfiguration") + for { select { case <-ctx.Done(): @@ -66,26 +68,26 @@ func (c *UpdateNodeConfigurationController) Run(ctx context.Context, getClusterC } if isWorker, err := snaputil.IsWorker(c.snap); err != nil { - log.Println(fmt.Errorf("failed to check if this is a worker node: %w", err)) + log.Error(err, "Failed to check if running on a worker node") continue } else if isWorker { - log.Println("Stopping UpdateClusterConfig controller as this is a worker node") + log.Info("Stopping on worker node") return } config, err := getClusterConfig(ctx) if err != nil { - log.Println(fmt.Errorf("failed to retrieve cluster config: %w", err)) + log.Error(err, "Failed to retrieve cluster configuration") continue } client, err := c.retryNewK8sClient(ctx) if err != nil { - log.Println(fmt.Errorf("failed to create a Kubernetes client: %w", err)) + log.Error(err, "Failed to create Kubernetes client") } if err := c.reconcile(ctx, client, config); err != nil { - log.Println(fmt.Errorf("failed to reconcile cluster configuration: %w", err)) + log.Error(err, "Failed to reconcile cluster configuration") } // notify downstream that the reconciliation loop is done. diff --git a/src/k8s/pkg/k8sd/features/cilium/network.go b/src/k8s/pkg/k8sd/features/cilium/network.go index 13f268f19..ade80715d 100644 --- a/src/k8s/pkg/k8sd/features/cilium/network.go +++ b/src/k8s/pkg/k8sd/features/cilium/network.go @@ -3,10 +3,10 @@ package cilium import ( "context" "fmt" - "log" "github.com/canonical/k8s/pkg/client/helm" "github.com/canonical/k8s/pkg/k8sd/types" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" "github.com/canonical/k8s/pkg/utils" "github.com/canonical/k8s/pkg/utils/control" @@ -102,7 +102,7 @@ func ApplyNetwork(ctx context.Context, snap snap.Snap, cfg types.Network, _ type if p == "private" { onLXD, err := snap.OnLXD(ctx) if err != nil { - log.Printf("failed to check if on LXD: %v", err) + log.FromContext(ctx).Error(err, "Failed to check if running on LXD") } if onLXD { return fmt.Errorf("/sys is not a shared mount on the LXD container, this might be resolved by updating LXD on the host to version 5.0.2 or newer") diff --git a/src/k8s/pkg/k8sd/setup/kube_proxy.go b/src/k8s/pkg/k8sd/setup/kube_proxy.go index e2307d861..2fcbabc51 100644 --- a/src/k8s/pkg/k8sd/setup/kube_proxy.go +++ b/src/k8s/pkg/k8sd/setup/kube_proxy.go @@ -3,9 +3,9 @@ package setup import ( "context" "fmt" - "log" "path" + "github.com/canonical/k8s/pkg/log" "github.com/canonical/k8s/pkg/snap" snaputil "github.com/canonical/k8s/pkg/snap/util" "github.com/canonical/k8s/pkg/utils" @@ -25,7 +25,7 @@ func KubeProxy(ctx context.Context, snap snap.Snap, hostname string, podCIDR str } onLXD, err := snap.OnLXD(ctx) if err != nil { - log.Printf("failed to check if on lxd: %v", err) + log.FromContext(ctx).Error(err, "Failed to check if running on LXD") } if onLXD { // A container cannot set this sysctl config in LXD. So, we disable it by setting it to "0". From d34c74a1451c88514a2b6888caf116e05f7e4d42 Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Sun, 14 Jul 2024 21:17:03 +0300 Subject: [PATCH 3/7] cleanup not useful log messages --- src/k8s/pkg/k8sd/api/impl/k8sd.go | 20 ++++++-------------- src/k8s/pkg/snap/mock/runner.go | 4 ---- src/k8s/pkg/utils/certificate.go | 2 -- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/src/k8s/pkg/k8sd/api/impl/k8sd.go b/src/k8s/pkg/k8sd/api/impl/k8sd.go index 840b9cc17..8751ddefd 100644 --- a/src/k8s/pkg/k8sd/api/impl/k8sd.go +++ b/src/k8s/pkg/k8sd/api/impl/k8sd.go @@ -3,7 +3,6 @@ package impl import ( "context" "fmt" - "log" apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/snap" @@ -46,22 +45,15 @@ func GetLocalNodeStatus(ctx context.Context, s *state.State, snap snap.Snap) (ap if err != nil { return apiv1.NodeStatus{}, fmt.Errorf("failed to check if node is a worker: %w", err) } + if isWorker { clusterRole = apiv1.ClusterRoleWorker - } else { - node, err := nodeutil.GetControlPlaneNode(ctx, s, s.Name()) - if err != nil { - // The node is likely in a joining or leaving phase where the role is not yet settled. - // Use the unknown role but still log this incident for debugging. - log.Printf("Failed to check if node is control-plane. This is expected if the node is in a joining/leaving phase. %v", err) - clusterRole = apiv1.ClusterRoleUnknown - } else { - if node != nil { - return *node, nil - } - } - + } else if node, err := nodeutil.GetControlPlaneNode(ctx, s, s.Name()); err != nil { + clusterRole = apiv1.ClusterRoleUnknown + } else if node != nil { + return *node, nil } + return apiv1.NodeStatus{ Name: s.Name(), Address: s.Address().Hostname(), diff --git a/src/k8s/pkg/snap/mock/runner.go b/src/k8s/pkg/snap/mock/runner.go index ec69bdd60..3aa30ba65 100644 --- a/src/k8s/pkg/snap/mock/runner.go +++ b/src/k8s/pkg/snap/mock/runner.go @@ -2,7 +2,6 @@ package mock import ( "context" - "log" "os/exec" "strings" ) @@ -16,9 +15,6 @@ type Runner struct { // Run is a mock implementation of CommandRunner. func (m *Runner) Run(ctx context.Context, command []string, opts ...func(*exec.Cmd)) error { - if m.Log { - log.Printf("mock execute %#v", command) - } m.CalledWithCommand = append(m.CalledWithCommand, strings.Join(command, " ")) m.CalledWithCtx = ctx return m.Err diff --git a/src/k8s/pkg/utils/certificate.go b/src/k8s/pkg/utils/certificate.go index 2895c04e6..68067ea55 100644 --- a/src/k8s/pkg/utils/certificate.go +++ b/src/k8s/pkg/utils/certificate.go @@ -5,7 +5,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "log" "net" "net/http" ) @@ -18,7 +17,6 @@ func SplitIPAndDNSSANs(extraSANs []string) ([]net.IP, []string) { for _, san := range extraSANs { if san == "" { - log.Println("Skipping empty SAN") continue } From b6053cbc1eb23246931c45562d3716451dc52237 Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Sun, 14 Jul 2024 21:18:40 +0300 Subject: [PATCH 4/7] go mod tidy --- src/k8s/go.mod | 21 +++++++++++---------- src/k8s/go.sum | 41 ++++++++++++++++++++--------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/src/k8s/go.mod b/src/k8s/go.mod index 5b10785aa..62cc2b128 100644 --- a/src/k8s/go.mod +++ b/src/k8s/go.mod @@ -6,19 +6,21 @@ require ( github.com/canonical/go-dqlite v1.21.0 github.com/canonical/lxd v0.0.0-20240403135607-df45915ce961 github.com/canonical/microcluster v0.0.0-20240418162032-e0f837527e02 + github.com/go-logr/logr v1.4.1 github.com/mitchellh/mapstructure v1.5.0 github.com/moby/sys/mountinfo v0.7.1 - github.com/onsi/gomega v1.30.0 + github.com/onsi/gomega v1.32.0 github.com/pelletier/go-toml v1.9.5 github.com/spf13/cobra v1.8.0 golang.org/x/net v0.23.0 golang.org/x/sys v0.19.0 gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.14.2 - k8s.io/api v0.29.0 - k8s.io/apimachinery v0.29.0 + k8s.io/api v0.30.1 + k8s.io/apimachinery v0.30.1 k8s.io/cli-runtime v0.29.0 - k8s.io/client-go v0.29.0 + k8s.io/client-go v0.30.1 + k8s.io/klog/v2 v2.120.1 ) require ( @@ -59,7 +61,6 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -112,6 +113,7 @@ require ( github.com/muhlemmer/gu v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/ginkgo/v2 v2.17.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -154,11 +156,10 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect - k8s.io/apiextensions-apiserver v0.29.0 // indirect - k8s.io/apiserver v0.29.0 // indirect - k8s.io/component-base v0.29.0 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/apiserver v0.30.1 // indirect + k8s.io/component-base v0.30.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.29.0 // indirect k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect oras.land/oras-go v1.2.4 // indirect diff --git a/src/k8s/go.sum b/src/k8s/go.sum index 5536bf87e..d992e1b0e 100644 --- a/src/k8s/go.sum +++ b/src/k8s/go.sum @@ -193,7 +193,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -477,10 +476,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -1036,24 +1035,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= -k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= -k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= -k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= From 831be6c1e5fc3d333af19e7f13a6fece2c73ed16 Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Sun, 14 Jul 2024 21:25:51 +0300 Subject: [PATCH 5/7] use pkg/log in pkg/proxy --- src/k8s/pkg/proxy/apiserver.go | 17 +++++++++++------ src/k8s/pkg/proxy/proxy.go | 12 +++++++++--- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/k8s/pkg/proxy/apiserver.go b/src/k8s/pkg/proxy/apiserver.go index f4e2d1bf9..2774050d9 100644 --- a/src/k8s/pkg/proxy/apiserver.go +++ b/src/k8s/pkg/proxy/apiserver.go @@ -3,9 +3,10 @@ package proxy import ( "context" "fmt" - "log" "reflect" "time" + + "github.com/canonical/k8s/pkg/log" ) // APIServerProxy is a TCP proxy that forwards requests to the API Servers of the cluster. @@ -27,6 +28,8 @@ type APIServerProxy struct { // Run starts the proxy. func (p *APIServerProxy) Run(ctx context.Context) error { + ctx = log.NewContext(ctx, log.FromContext(ctx).WithName("apiserver-proxy")) + for { select { case <-ctx.Done(): @@ -48,12 +51,13 @@ func (p *APIServerProxy) Run(ctx context.Context) error { func (p *APIServerProxy) startProxy(ctx context.Context, cancel func(), endpoints []string) { if err := startProxy(ctx, p.ListenAddress, endpoints); err != nil { - log.Println(fmt.Errorf("apiserver proxy failed: %w", err)) + log.FromContext(ctx).Error(err, "Failed to start") } cancel() } func (p *APIServerProxy) watchForNewEndpoints(ctx context.Context, cancel func(), endpoints []string) { + log := log.FromContext(ctx).WithValues("controller", "watchendpoints") if p.RefreshCh == nil { return } @@ -68,18 +72,19 @@ func (p *APIServerProxy) watchForNewEndpoints(ctx context.Context, cancel func() newEndpoints, err := getKubernetesEndpoints(ctx, p.KubeconfigFile) switch { case err != nil: - log.Println(fmt.Errorf("failed to retrieve kubernetes endpoints: %w", err)) + log.Error(err, "Failed to retrieve Kubernetes endpoints") continue case len(newEndpoints) == 0: - log.Println("warning: empty list of endpoints, skipping update") + log.Info("Warning: empty list of endpoints, skipping update") continue case len(newEndpoints) == len(endpoints) && reflect.DeepEqual(newEndpoints, endpoints): continue } - log.Println("updating endpoints") + log = log.WithValues("endpoints", endpoints) + log.Info("Updating endpoints") if err := WriteEndpointsConfig(newEndpoints, p.EndpointsConfigFile); err != nil { - log.Printf("failed to update configuration file with new endpoints: %s", err) + log.Error(err, "Failed to update configuration file with new endpoints") continue } diff --git a/src/k8s/pkg/proxy/proxy.go b/src/k8s/pkg/proxy/proxy.go index fab536bcf..afe9cedd7 100644 --- a/src/k8s/pkg/proxy/proxy.go +++ b/src/k8s/pkg/proxy/proxy.go @@ -3,11 +3,12 @@ package proxy import ( "context" "fmt" - "log" "net" "net/url" "strconv" "time" + + "github.com/canonical/k8s/pkg/log" ) func startProxy(ctx context.Context, listenURL string, endpointURLs []string) error { @@ -41,10 +42,15 @@ func startProxy(ctx context.Context, listenURL string, endpointURLs []string) er MonitorInterval: time.Minute, } - log.Println("Starting proxy at", listenURL) + log := log.FromContext(ctx).WithValues( + "controller", "proxy", + "address", listenURL, + "endpoints", endpointURLs, + ) + log.Info("Starting proxy") go func() { if err := p.Run(); err != nil { - log.Printf("proxy failed: %v\n", err) + log.Error(err, "Proxy failed") } }() From 7d421155e92612a8df5e5b82afce7e0d4cbf6c90 Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Mon, 15 Jul 2024 09:17:59 +0300 Subject: [PATCH 6/7] go fmt --- src/k8s/pkg/log/doc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/k8s/pkg/log/doc.go b/src/k8s/pkg/log/doc.go index 98cff32ef..37ea6793b 100644 --- a/src/k8s/pkg/log/doc.go +++ b/src/k8s/pkg/log/doc.go @@ -25,7 +25,7 @@ // Log messages include the file name and line number of the log message. In case // of utility functions, you can print the line number of the caller instead with: // -// log.FromContext(ctx).WithCallDepth(1).Error(err, "Failed to format JSON output") +// log.FromContext(ctx).WithCallDepth(1).Error(err, "Failed to format JSON output") // // To configure the logger behaviour, you can use this in the main package: // From f281429decfcc17030c9621871f905c5d84243d3 Mon Sep 17 00:00:00 2001 From: Angelos Kolaitis Date: Mon, 15 Jul 2024 12:19:01 +0300 Subject: [PATCH 7/7] fixup controller names --- src/k8s/pkg/k8sd/controllers/control_plane_configuration.go | 2 +- src/k8s/pkg/k8sd/controllers/node_configuration.go | 2 +- src/k8s/pkg/k8sd/controllers/update_node_configuration.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go b/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go index 4213c8ac2..8e29eae83 100644 --- a/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/control_plane_configuration.go @@ -39,7 +39,7 @@ func NewControlPlaneConfigurationController(snap snap.Snap, waitReady func(), tr func (c *ControlPlaneConfigurationController) Run(ctx context.Context, getClusterConfig func(context.Context) (types.ClusterConfig, error)) { c.waitReady() - ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "controlplaneconfiguration")) + ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "control-plane-configuration")) log := log.FromContext(ctx) for { diff --git a/src/k8s/pkg/k8sd/controllers/node_configuration.go b/src/k8s/pkg/k8sd/controllers/node_configuration.go index 0df3fa5c4..6157ea061 100644 --- a/src/k8s/pkg/k8sd/controllers/node_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/node_configuration.go @@ -45,7 +45,7 @@ func (c *NodeConfigurationController) Run(ctx context.Context, getRSAKey func(co // wait for microcluster node to be ready c.waitReady() - ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "nodeconfiguration")) + ctx = log.NewContext(ctx, log.FromContext(ctx).WithValues("controller", "node-configuration")) log := log.FromContext(ctx) for { diff --git a/src/k8s/pkg/k8sd/controllers/update_node_configuration.go b/src/k8s/pkg/k8sd/controllers/update_node_configuration.go index 9b5e6a8d9..6d8046d8b 100644 --- a/src/k8s/pkg/k8sd/controllers/update_node_configuration.go +++ b/src/k8s/pkg/k8sd/controllers/update_node_configuration.go @@ -58,7 +58,7 @@ func (c *UpdateNodeConfigurationController) retryNewK8sClient(ctx context.Contex func (c *UpdateNodeConfigurationController) Run(ctx context.Context, getClusterConfig func(context.Context) (types.ClusterConfig, error)) { c.waitReady() - log := log.FromContext(ctx).WithValues("controller", "updatenodeconfiguration") + log := log.FromContext(ctx).WithValues("controller", "update-node-configuration") for { select {