diff --git a/src/k8s/cmd/k8s/k8s_bootstrap.go b/src/k8s/cmd/k8s/k8s_bootstrap.go index 0b02bce4a..41dddbc96 100644 --- a/src/k8s/cmd/k8s/k8s_bootstrap.go +++ b/src/k8s/cmd/k8s/k8s_bootstrap.go @@ -85,7 +85,11 @@ func newBootstrapCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - if _, err := client.NodeStatus(cmd.Context()); err == nil { + if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil { + cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err) + env.Exit(1) + return + } else if initialized { cmd.PrintErrln("Error: The node is already part of a cluster") env.Exit(1) return diff --git a/src/k8s/cmd/k8s/k8s_config.go b/src/k8s/cmd/k8s/k8s_config.go index bd0a2b190..93ccf2499 100644 --- a/src/k8s/cmd/k8s/k8s_config.go +++ b/src/k8s/cmd/k8s/k8s_config.go @@ -32,12 +32,12 @@ func newKubeConfigCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - if _, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped { - cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") + if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil { + cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err) env.Exit(1) return - } else if err != nil { - cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err) + } else if !initialized { + cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") env.Exit(1) return } diff --git a/src/k8s/cmd/k8s/k8s_helm.go b/src/k8s/cmd/k8s/k8s_helm.go index 383749a58..e2b7749ef 100644 --- a/src/k8s/cmd/k8s/k8s_helm.go +++ b/src/k8s/cmd/k8s/k8s_helm.go @@ -24,12 +24,12 @@ func newHelmCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - if status, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped { - cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") + if status, initialized, err := client.NodeStatus(cmd.Context()); err != nil { + cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err) env.Exit(1) return - } else if err != nil { - cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err) + } else if !initialized { + cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") env.Exit(1) return } else if status.ClusterRole == apiv1.ClusterRoleWorker { diff --git a/src/k8s/cmd/k8s/k8s_join_cluster.go b/src/k8s/cmd/k8s/k8s_join_cluster.go index ad7ebc52a..de7115a5a 100644 --- a/src/k8s/cmd/k8s/k8s_join_cluster.go +++ b/src/k8s/cmd/k8s/k8s_join_cluster.go @@ -68,7 +68,11 @@ func newJoinClusterCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - if _, err := client.NodeStatus(cmd.Context()); err == nil { + if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil { + cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err) + env.Exit(1) + return + } else if initialized { cmd.PrintErrln("Error: The node is already part of a cluster") env.Exit(1) return diff --git a/src/k8s/cmd/k8s/k8s_kubectl.go b/src/k8s/cmd/k8s/k8s_kubectl.go index 93d191d7d..754d91007 100644 --- a/src/k8s/cmd/k8s/k8s_kubectl.go +++ b/src/k8s/cmd/k8s/k8s_kubectl.go @@ -24,12 +24,12 @@ func newKubectlCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { return } - if status, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped { - cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") + if status, initialized, err := client.NodeStatus(cmd.Context()); err != nil { + cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err) env.Exit(1) return - } else if err != nil { - cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err) + } else if !initialized { + cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") env.Exit(1) return } else if status.ClusterRole == apiv1.ClusterRoleWorker { diff --git a/src/k8s/cmd/k8s/k8s_local_node_status.go b/src/k8s/cmd/k8s/k8s_local_node_status.go index 16c7ded13..71cf71268 100644 --- a/src/k8s/cmd/k8s/k8s_local_node_status.go +++ b/src/k8s/cmd/k8s/k8s_local_node_status.go @@ -22,13 +22,13 @@ func newLocalNodeStatusCommand(env cmdutil.ExecutionEnvironment) *cobra.Command return } - status, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env) - if !isBootstrapped { - cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") + status, initialized, err := client.NodeStatus(cmd.Context()) + if err != nil { + cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err) env.Exit(1) return - } else if err != nil { - cmd.PrintErrf("Error: Failed to retrieve the local node status.\n\nThe error was: %v\n", err) + } else if !initialized { + cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") env.Exit(1) return } diff --git a/src/k8s/cmd/k8s/k8s_status.go b/src/k8s/cmd/k8s/k8s_status.go index d4ce46faa..839bc4151 100644 --- a/src/k8s/cmd/k8s/k8s_status.go +++ b/src/k8s/cmd/k8s/k8s_status.go @@ -36,12 +36,12 @@ func newStatusCmd(env cmdutil.ExecutionEnvironment) *cobra.Command { ctx, cancel := context.WithTimeout(cmd.Context(), opts.timeout) cobra.OnFinalize(cancel) - if _, isBootstrapped, err := cmdutil.GetNodeStatus(cmd.Context(), client, env); !isBootstrapped { - cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") + if _, initialized, err := client.NodeStatus(cmd.Context()); err != nil { + cmd.PrintErrf("Error: Failed to check the current node status.\n\nThe error was: %v\n", err) env.Exit(1) return - } else if err != nil { - cmd.PrintErrf("Error: Failed to retrieve the node status.\n\nThe error was: %v\n", err) + } else if !initialized { + cmd.PrintErrln("Error: The node is not part of a Kubernetes cluster. You can bootstrap a new cluster with:\n\n sudo k8s bootstrap") env.Exit(1) return } diff --git a/src/k8s/cmd/util/node_status.go b/src/k8s/cmd/util/node_status.go deleted file mode 100644 index e5e4892e0..000000000 --- a/src/k8s/cmd/util/node_status.go +++ /dev/null @@ -1,37 +0,0 @@ -package cmdutil - -import ( - "context" - "errors" - "net/http" - - apiv1 "github.com/canonical/k8s/api/v1" - "github.com/canonical/k8s/pkg/client/k8sd" - - "github.com/canonical/lxd/shared/api" -) - -// GetNodeStatus retrieves the NodeStatus from k8sd server. If the daemon is not initialized, it exits with an error -// describing that the cluster should be bootstrapped. In case of any other errors it exits and shows the error. -func GetNodeStatus(ctx context.Context, client k8sd.Client, env ExecutionEnvironment) (status apiv1.NodeStatus, isBootstrapped bool, err error) { - status, err = client.NodeStatus(ctx) - if err == nil { - return status, true, nil - } - - if errors.As(err, &api.StatusError{}) { - // the returned `ok` can be ignored since we're using errors.As() - // on the same type immediately before it - statusErr, _ := err.(api.StatusError) - - // if we get an `http.StatusServiceUnavailable` it will be (most likely) because - // the handler we're trying to reach is not `AllowedBeforeInit` and hence we can understand that - // the daemon is not yet initialized (this statement should be available - // in the `statusErr.Error()` explicitly but for the sake of decoupling we don't rely on that) - if statusErr.Status() == http.StatusServiceUnavailable { - return status, false, err - } - } - - return status, true, err -} diff --git a/src/k8s/cmd/util/node_status_test.go b/src/k8s/cmd/util/node_status_test.go deleted file mode 100644 index c60de4d41..000000000 --- a/src/k8s/cmd/util/node_status_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package cmdutil_test - -import ( - "context" - "errors" - "net/http" - "testing" - - apiv1 "github.com/canonical/k8s/api/v1" - cmdutil "github.com/canonical/k8s/cmd/util" - k8sdmock "github.com/canonical/k8s/pkg/client/k8sd/mock" - snapmock "github.com/canonical/k8s/pkg/snap/mock" - "github.com/canonical/lxd/shared/api" - - . "github.com/onsi/gomega" -) - -func TestGetNodeStatusUtil(t *testing.T) { - type testcase struct { - expIsBootstrapped bool - name string - nodeStatusErr error - nodeStatusResult apiv1.NodeStatus - } - - tests := []testcase{ - { - name: "NoError", - expIsBootstrapped: true, - nodeStatusResult: apiv1.NodeStatus{ - Name: "name", Address: "addr", - ClusterRole: apiv1.ClusterRoleControlPlane, DatastoreRole: apiv1.DatastoreRoleVoter, - }, - }, - { - name: "DaemonNotInitialized", - nodeStatusErr: api.StatusErrorf(http.StatusServiceUnavailable, "Daemon not yet initialized"), - expIsBootstrapped: false, - }, - { - name: "RandomError", - nodeStatusErr: errors.New("something went bad"), - expIsBootstrapped: true, - }, - { - name: "ContextCanceled", - nodeStatusErr: context.Canceled, - expIsBootstrapped: true, - }, - { - name: "ContextDeadlineExceeded", - nodeStatusErr: context.DeadlineExceeded, - expIsBootstrapped: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - g := NewWithT(t) - - var ( - mockClient = &k8sdmock.Mock{ - NodeStatusErr: test.nodeStatusErr, - NodeStatusResult: test.nodeStatusResult, - } - env = cmdutil.ExecutionEnvironment{ - Getuid: func() int { return 0 }, - Snap: &snapmock.Snap{ - Mock: snapmock.Mock{ - K8sdClient: mockClient, - }, - }, - } - ) - - status, isBootstrapped, err := cmdutil.GetNodeStatus(context.TODO(), mockClient, env) - - g.Expect(isBootstrapped).To(Equal(test.expIsBootstrapped)) - if test.nodeStatusErr == nil { - g.Expect(err).To(BeNil()) - } else { - g.Expect(err).To(MatchError(test.nodeStatusErr)) - } - - if err == nil { - g.Expect(status).To(Equal(test.nodeStatusResult)) - } - }) - } -} diff --git a/src/k8s/pkg/client/k8sd/interface.go b/src/k8s/pkg/client/k8sd/interface.go index b0858d2b6..47205a4a5 100644 --- a/src/k8s/pkg/client/k8sd/interface.go +++ b/src/k8s/pkg/client/k8sd/interface.go @@ -21,7 +21,8 @@ type ClusterClient interface { // StatusClient implements methods for retrieving the current status of the cluster. type StatusClient interface { // NodeStatus retrieves the current status of the local node. - NodeStatus(ctx context.Context) (apiv1.NodeStatus, error) + // The second return value is false if the node is not part of a cluster. + NodeStatus(ctx context.Context) (apiv1.NodeStatus, bool, error) // ClusterStatus retrieves the current status of the Kubernetes cluster. ClusterStatus(ctx context.Context, waitReady bool) (apiv1.ClusterStatus, error) } diff --git a/src/k8s/pkg/client/k8sd/mock/mock.go b/src/k8s/pkg/client/k8sd/mock/mock.go index bd73bf727..44d125d3f 100644 --- a/src/k8s/pkg/client/k8sd/mock/mock.go +++ b/src/k8s/pkg/client/k8sd/mock/mock.go @@ -22,10 +22,11 @@ type Mock struct { RemoveNodeErr error // k8sd.StatusClient - NodeStatusResult apiv1.NodeStatus - NodeStatusErr error - ClusterStatusResult apiv1.ClusterStatus - ClusterStatusErr error + NodeStatusResult apiv1.NodeStatus + NodeStatusInitialized bool + NodeStatusErr error + ClusterStatusResult apiv1.ClusterStatus + ClusterStatusErr error // k8sd.ConfigClient GetClusterConfigResult apiv1.UserFacingClusterConfig @@ -60,8 +61,8 @@ func (m *Mock) RemoveNode(_ context.Context, request apiv1.RemoveNodeRequest) er return m.RemoveNodeErr } -func (m *Mock) NodeStatus(_ context.Context) (apiv1.NodeStatus, error) { - return m.NodeStatusResult, m.NodeStatusErr +func (m *Mock) NodeStatus(_ context.Context) (apiv1.NodeStatus, bool, error) { + return m.NodeStatusResult, m.NodeStatusInitialized, m.NodeStatusErr } func (m *Mock) ClusterStatus(_ context.Context, waitReady bool) (apiv1.ClusterStatus, error) { return m.ClusterStatusResult, m.ClusterStatusErr diff --git a/src/k8s/pkg/client/k8sd/status.go b/src/k8s/pkg/client/k8sd/status.go index 572b3178e..0e9e85179 100644 --- a/src/k8s/pkg/client/k8sd/status.go +++ b/src/k8s/pkg/client/k8sd/status.go @@ -2,19 +2,30 @@ package k8sd import ( "context" + "errors" "fmt" + "net/http" apiv1 "github.com/canonical/k8s/api/v1" "github.com/canonical/k8s/pkg/utils/control" "github.com/canonical/lxd/shared/api" ) -func (c *k8sd) NodeStatus(ctx context.Context) (apiv1.NodeStatus, error) { +func (c *k8sd) NodeStatus(ctx context.Context) (apiv1.NodeStatus, bool, error) { var response apiv1.GetNodeStatusResponse if err := c.client.Query(ctx, "GET", apiv1.K8sdAPIVersion, api.NewURL().Path("k8sd", "node"), nil, &response); err != nil { - return apiv1.NodeStatus{}, fmt.Errorf("failed to GET /k8sd/node: %w", err) + + // Error 503 means the node is not initialized yet + var statusErr api.StatusError + if errors.As(err, &statusErr) { + if statusErr.Status() == http.StatusServiceUnavailable { + return apiv1.NodeStatus{}, false, nil + } + } + + return apiv1.NodeStatus{}, false, fmt.Errorf("failed to GET /k8sd/node: %w", err) } - return response.NodeStatus, nil + return response.NodeStatus, true, nil } func (c *k8sd) ClusterStatus(ctx context.Context, waitReady bool) (apiv1.ClusterStatus, error) {