Skip to content

Commit

Permalink
Fix check for initialized nodes (#599)
Browse files Browse the repository at this point in the history
* (k8sd.Client).NodeStatus() returns if node is initialized
* first check for errors, then for node initialized
  • Loading branch information
neoaggelos committed Aug 13, 2024
1 parent afca21c commit 6b92ed7
Show file tree
Hide file tree
Showing 12 changed files with 54 additions and 160 deletions.
6 changes: 5 additions & 1 deletion src/k8s/cmd/k8s/k8s_bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,11 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
return
}

if _, err := client.NodeStatus(cmd.Context()); err == nil {
if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil {
cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if initialized {
cmd.PrintErrln("Error: The node is already part of a cluster")
env.Exit(1)
return
Expand Down
8 changes: 4 additions & 4 deletions src/k8s/cmd/k8s/k8s_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ func newKubeConfigCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
return
}

if _, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil {
cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if err != nil {
cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err)
} else if !initialized {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
env.Exit(1)
return
}
Expand Down
8 changes: 4 additions & 4 deletions src/k8s/cmd/k8s/k8s_helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ func newHelmCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
return
}

if status, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
if status, initialized, err := client.NodeStatus(cmd.Context()); err != nil {
cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if err != nil {
cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err)
} else if !initialized {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
env.Exit(1)
return
} else if status.ClusterRole == apiv1.ClusterRoleWorker {
Expand Down
6 changes: 5 additions & 1 deletion src/k8s/cmd/k8s/k8s_join_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ func newJoinClusterCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
return
}

if _, err := client.NodeStatus(cmd.Context()); err == nil {
if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil {
cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if initialized {
cmd.PrintErrln("Error: The node is already part of a cluster")
env.Exit(1)
return
Expand Down
8 changes: 4 additions & 4 deletions src/k8s/cmd/k8s/k8s_kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ func newKubectlCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
return
}

if status, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
if status, initialized, err := client.NodeStatus(cmd.Context()); err != nil {
cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if err != nil {
cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err)
} else if !initialized {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
env.Exit(1)
return
} else if status.ClusterRole == apiv1.ClusterRoleWorker {
Expand Down
10 changes: 5 additions & 5 deletions src/k8s/cmd/k8s/k8s_local_node_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ func newLocalNodeStatusCommand(env cmdutil.ExecutionEnvironment) *cobra.Command
return
}

status, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env)
if !isBootstrapped {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
status, initialized, err := client.NodeStatus(cmd.Context())
if err != nil {
cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if err != nil {
cmd.PrintErrf("Error: Failed to retrieve the local node status.\n\nThe error was: %v\n", err)
} else if !initialized {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
env.Exit(1)
return
}
Expand Down
8 changes: 4 additions & 4 deletions src/k8s/cmd/k8s/k8s_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,12 @@ func newStatusCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
ctx, cancel := context.WithTimeout(cmd.Context(), opts.timeout)
cobra.OnFinalize(cancel)

if _, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil {
cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err)
env.Exit(1)
return
} else if err != nil {
cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err)
} else if !initialized {
cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap")
env.Exit(1)
return
}
Expand Down
37 changes: 0 additions & 37 deletions src/k8s/cmd/util/node_status.go

This file was deleted.

90 changes: 0 additions & 90 deletions src/k8s/cmd/util/node_status_test.go

This file was deleted.

3 changes: 2 additions & 1 deletion src/k8s/pkg/client/k8sd/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ type ClusterClient interface {
// StatusClient implements methods for retrieving the current status of the cluster.
type StatusClient interface {
// NodeStatus retrieves the current status of the local node.
NodeStatus(ctx context.Context) (apiv1.NodeStatus, error)
// The second return value is false if the node is not part of a cluster.
NodeStatus(ctx context.Context) (apiv1.NodeStatus, bool, error)
// ClusterStatus retrieves the current status of the Kubernetes cluster.
ClusterStatus(ctx context.Context, waitReady bool) (apiv1.ClusterStatus, error)
}
Expand Down
13 changes: 7 additions & 6 deletions src/k8s/pkg/client/k8sd/mock/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ type Mock struct {
RemoveNodeErr error

// k8sd.StatusClient
NodeStatusResult apiv1.NodeStatus
NodeStatusErr error
ClusterStatusResult apiv1.ClusterStatus
ClusterStatusErr error
NodeStatusResult apiv1.NodeStatus
NodeStatusInitialized bool
NodeStatusErr error
ClusterStatusResult apiv1.ClusterStatus
ClusterStatusErr error

// k8sd.ConfigClient
GetClusterConfigResult apiv1.UserFacingClusterConfig
Expand Down Expand Up @@ -60,8 +61,8 @@ func (m *Mock) RemoveNode(_ context.Context, request apiv1.RemoveNodeRequest) er
return m.RemoveNodeErr
}

func (m *Mock) NodeStatus(_ context.Context) (apiv1.NodeStatus, error) {
return m.NodeStatusResult, m.NodeStatusErr
func (m *Mock) NodeStatus(_ context.Context) (apiv1.NodeStatus, bool, error) {
return m.NodeStatusResult, m.NodeStatusInitialized, m.NodeStatusErr
}
func (m *Mock) ClusterStatus(_ context.Context, waitReady bool) (apiv1.ClusterStatus, error) {
return m.ClusterStatusResult, m.ClusterStatusErr
Expand Down
17 changes: 14 additions & 3 deletions src/k8s/pkg/client/k8sd/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,30 @@ package k8sd

import (
"context"
"errors"
"fmt"
"net/http"

apiv1 "github.com/canonical/k8s/api/v1"
"github.com/canonical/k8s/pkg/utils/control"
"github.com/canonical/lxd/shared/api"
)

func (c *k8sd) NodeStatus(ctx context.Context) (apiv1.NodeStatus, error) {
func (c *k8sd) NodeStatus(ctx context.Context) (apiv1.NodeStatus, bool, error) {
var response apiv1.GetNodeStatusResponse
if err := c.client.Query(ctx, "GET", apiv1.K8sdAPIVersion, api.NewURL().Path("k8sd", "node"), nil, &response); err != nil {
return apiv1.NodeStatus{}, fmt.Errorf("failed to GET /k8sd/node: %w", err)

// Error 503 means the node is not initialized yet
var statusErr api.StatusError
if errors.As(err, &statusErr) {
if statusErr.Status() == http.StatusServiceUnavailable {
return apiv1.NodeStatus{}, false, nil
}
}

return apiv1.NodeStatus{}, false, fmt.Errorf("failed to GET /k8sd/node: %w", err)
}
return response.NodeStatus, nil
return response.NodeStatus, true, nil
}

func (c *k8sd) ClusterStatus(ctx context.Context, waitReady bool) (apiv1.ClusterStatus, error) {
Expand Down

0 comments on commit 6b92ed7

Please sign in to comment.