From 9a4b7b0ee5aae120d87adae8d6266fd6b16ce965 Mon Sep 17 00:00:00 2001 From: Gerrit Date: Tue, 16 Jan 2024 15:40:45 +0100 Subject: [PATCH] Implement worker group update. (#34) --- README.md | 4 +- cmd/api/v1/cluster.go | 171 +++++++++++++++++++++++++++++++---- cmd/cluster_test.go | 13 ++- cmd/common_test.go | 9 ++ cmd/completion/cluster.go | 24 ++++- cmd/config/config.go | 2 + cmd/root.go | 2 + docs/metal_cluster_create.md | 2 +- docs/metal_cluster_update.md | 7 ++ go.mod | 2 +- go.sum | 4 +- pkg/helpers/viper.go | 12 +++ 12 files changed, 227 insertions(+), 25 deletions(-) create mode 100644 pkg/helpers/viper.go diff --git a/README.md b/README.md index 2367494..2be8d5b 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,9 @@ [![Markdown Docs](https://img.shields.io/badge/markdown-docs-blue?link=https%3A%2F%2Fgithub.com%2Fmetal-stack-cloud%2Fcli%2Fdocs)](./docs/metal.md) -To work with this CLI, it is first necessary to create a metalstack.cloud api-token. This can be issued through the cloud console. +This is the official CLI for accessing the API of [metalstack.cloud](https://metalstack.cloud). + +To work with this CLI, it is first necessary to create an api-token. This can be issued through the [cloud console](https://console.metalstack.cloud/token). Once you got the token, you probably want to create a CLI context: diff --git a/cmd/api/v1/cluster.go b/cmd/api/v1/cluster.go index 8879958..9282d49 100644 --- a/cmd/api/v1/cluster.go +++ b/cmd/api/v1/cluster.go @@ -2,6 +2,7 @@ package v1 import ( "fmt" + "slices" "time" "connectrpc.com/connect" @@ -54,7 +55,7 @@ func newClusterCmd(c *config.Config) *cobra.Command { cmd.Flags().Int32("maintenance-minute", 0, "minute in which cluster maintenance is allowed to take place") cmd.Flags().String("maintenance-timezone", time.Local.String(), "timezone used for the maintenance time window") // nolint cmd.Flags().Duration("maintenance-duration", 2*time.Hour, "duration in which cluster maintenance is allowed to take place") - cmd.Flags().String("worker-name", "group-0", "the name of the initial worker group") + cmd.Flags().String("worker-group", "group-0", "the name of the initial worker group") cmd.Flags().Uint32("worker-min", 1, "the minimum amount of worker nodes of the worker group") cmd.Flags().Uint32("worker-max", 3, "the maximum amount of worker nodes of the worker group") cmd.Flags().Uint32("worker-max-surge", 1, "the maximum amount of new worker nodes added to the worker group during a rolling update") @@ -83,9 +84,18 @@ func newClusterCmd(c *config.Config) *cobra.Command { cmd.Flags().Uint32("maintenance-minute", 0, "minute in which cluster maintenance is allowed to take place") cmd.Flags().String("maintenance-timezone", time.Local.String(), "timezone used for the maintenance time window") // nolint cmd.Flags().Duration("maintenance-duration", 2*time.Hour, "duration in which cluster maintenance is allowed to take place") + cmd.Flags().String("worker-group", "", "the name of the worker group to add, update or remove") + cmd.Flags().Uint32("worker-min", 1, "the minimum amount of worker nodes of the worker group") + cmd.Flags().Uint32("worker-max", 3, "the maximum amount of worker nodes of the worker group") + cmd.Flags().Uint32("worker-max-surge", 1, "the maximum amount of new worker nodes added to the worker group during a rolling update") + cmd.Flags().Uint32("worker-max-unavailable", 0, "the maximum amount of worker nodes removed from the worker group during a rolling update") + cmd.Flags().String("worker-type", "", "the worker type of the initial worker group") + cmd.Flags().Bool("remove-worker-group", false, "if set the selected worker group is being removed") genericcli.Must(cmd.RegisterFlagCompletionFunc("project", c.Completion.ProjectListCompletion)) genericcli.Must(cmd.RegisterFlagCompletionFunc("kubernetes-version", c.Completion.KubernetesVersionAssetListCompletion)) + genericcli.Must(cmd.RegisterFlagCompletionFunc("worker-type", c.Completion.MachineTypeAssetListCompletion)) + genericcli.Must(cmd.RegisterFlagCompletionFunc("worker-group", c.Completion.ClusterWorkerGroupsCompletion)) }, UpdateRequestFromCLI: w.updateFromCLI, } @@ -173,14 +183,9 @@ func (c *cluster) createFromCLI() (*apiv1.ClusterServiceCreateRequest, error) { } } - if viper.IsSet("worker-name") || - viper.IsSet("worker-min") || - viper.IsSet("worker-max") || - viper.IsSet("worker-max-surge") || - viper.IsSet("worker-max-unavailable") || - viper.IsSet("worker-type") { + if helpers.IsAnyViperFlagSet("worker-group", "worker-min", "worker-max", "worker-max-surge", "worker-max-unavailable", "worker-type") { rq.Workers = append(rq.Workers, &apiv1.Worker{ - Name: viper.GetString("worker-name"), + Name: viper.GetString("worker-group"), MachineType: viper.GetString("worker-type"), Minsize: viper.GetUint32("worker-min"), Maxsize: viper.GetUint32("worker-max"), @@ -267,14 +272,36 @@ func ClusterResponseToCreate(r *apiv1.Cluster) *apiv1.ClusterServiceCreateReques func ClusterResponseToUpdate(r *apiv1.Cluster) *apiv1.ClusterServiceUpdateRequest { return &apiv1.ClusterServiceUpdateRequest{ - Uuid: r.Uuid, - Project: r.Project, - Kubernetes: r.Kubernetes, - // Workers: workers, // TODO + Uuid: r.Uuid, + Project: r.Project, + Kubernetes: r.Kubernetes, + Workers: clusterWorkersToWorkerUpdate(r.Workers), Maintenance: r.Maintenance, } } +func clusterWorkersToWorkerUpdate(workers []*apiv1.Worker) []*apiv1.WorkerUpdate { + var res []*apiv1.WorkerUpdate + for _, worker := range workers { + worker := worker + + res = append(res, clusterWorkerToWorkerUpdate(worker)) + } + + return res +} + +func clusterWorkerToWorkerUpdate(worker *apiv1.Worker) *apiv1.WorkerUpdate { + return &apiv1.WorkerUpdate{ + Name: worker.Name, + MachineType: pointer.Pointer(worker.MachineType), + Minsize: pointer.Pointer(worker.Minsize), + Maxsize: pointer.Pointer(worker.Maxsize), + Maxsurge: pointer.Pointer(worker.Maxsurge), + Maxunavailable: pointer.Pointer(worker.Maxunavailable), + } +} + func (c *cluster) Update(req *apiv1.ClusterServiceUpdateRequest) (*apiv1.Cluster, error) { ctx, cancel := c.c.NewRequestContext() defer cancel() @@ -299,10 +326,8 @@ func (c *cluster) updateFromCLI(args []string) (*apiv1.ClusterServiceUpdateReque } rq := &apiv1.ClusterServiceUpdateRequest{ - Uuid: uuid, - Project: c.c.GetProject(), - Kubernetes: &apiv1.KubernetesSpec{}, - Maintenance: &apiv1.Maintenance{}, + Uuid: uuid, + Project: cluster.Project, } if viper.IsSet("maintenance-hour") || viper.IsSet("maintenance-minute") || viper.IsSet("maintenance-duration") { @@ -323,9 +348,123 @@ func (c *cluster) updateFromCLI(args []string) (*apiv1.ClusterServiceUpdateReque } if viper.IsSet("kubernetes-version") { + rq.Kubernetes = cluster.Kubernetes + rq.Kubernetes.Version = viper.GetString("kubernetes-version") } + findWorkerGroup := func() (*apiv1.Worker, error) { + if viper.GetString("worker-group") == "" { + if len(cluster.Workers) != 1 { + return nil, fmt.Errorf("please specify the group to act on using the flag --worker-group") + } + + return cluster.Workers[0], nil + } + + for _, worker := range cluster.Workers { + worker := worker + if worker.Name == viper.GetString("worker-group") { + return worker, nil + } + } + + return nil, nil + } + + if helpers.IsAnyViperFlagSet("worker-group", "worker-min", "worker-max", "worker-max-surge", "worker-max-unavailable", "worker-type", "remove-worker-group") { + type operation string + + const ( + update operation = "Updating" + delete operation = "Deleting" + add operation = "Adding" + ) + + var ( + newWorkers []*apiv1.WorkerUpdate + showPrompt = func(op operation, name string) error { + if viper.GetBool("skip-security-prompts") { + return nil + } + + return genericcli.PromptCustom(&genericcli.PromptConfig{ + Message: fmt.Sprintf("%s worker group %q, continue?", op, name), + ShowAnswers: true, + Out: c.c.PromptOut, + In: c.c.In, + }) + } + ) + + selectedGroup, err := findWorkerGroup() + if err != nil { + return nil, err + } + + if selectedGroup == nil { + if viper.IsSet("remove-worker-group") { + return nil, fmt.Errorf("cluster has no worker group with name %q", viper.GetString("worker-group")) + } + + if err := showPrompt(add, viper.GetString("worker-group")); err != nil { + return nil, err + } + + newWorkers = append(clusterWorkersToWorkerUpdate(cluster.Workers), &apiv1.WorkerUpdate{ + Name: viper.GetString("worker-group"), + MachineType: pointer.PointerOrNil(viper.GetString("worker-type")), + Minsize: pointer.PointerOrNil(viper.GetUint32("worker-min")), + Maxsize: pointer.PointerOrNil(viper.GetUint32("worker-max")), + Maxsurge: pointer.PointerOrNil(viper.GetUint32("worker-max-surge")), + Maxunavailable: pointer.PointerOrNil(viper.GetUint32("worker-max-unavailable")), + }) + } else { + if viper.IsSet("remove-worker-group") { + if err := showPrompt(delete, selectedGroup.Name); err != nil { + return nil, err + } + + newWorkers = clusterWorkersToWorkerUpdate(cluster.Workers) + slices.DeleteFunc(newWorkers, func(w *apiv1.WorkerUpdate) bool { + return w.Name == selectedGroup.Name + }) + } else { + if err := showPrompt(update, selectedGroup.Name); err != nil { + return nil, err + } + + for _, worker := range cluster.Workers { + worker := worker + + workerUpdate := clusterWorkerToWorkerUpdate(worker) + + if worker.Name == selectedGroup.Name { + if viper.IsSet("worker-min") { + workerUpdate.Minsize = pointer.Pointer(viper.GetUint32("worker-min")) + } + if viper.IsSet("worker-max") { + workerUpdate.Maxsize = pointer.Pointer(viper.GetUint32("worker-max")) + } + if viper.IsSet("worker-max-surge") { + workerUpdate.Maxsurge = pointer.Pointer(viper.GetUint32("worker-max-surge")) + } + if viper.IsSet("worker-max-unavailable") { + workerUpdate.Maxunavailable = pointer.Pointer(viper.GetUint32("worker-max-unavailable")) + } + if viper.IsSet("worker-type") { + workerUpdate.MachineType = pointer.Pointer(viper.GetString("worker-type")) + } + } + + newWorkers = append(newWorkers, workerUpdate) + } + } + } + + rq.Workers = newWorkers + } + return rq, nil } diff --git a/cmd/cluster_test.go b/cmd/cluster_test.go index 8cff311..6d6e692 100644 --- a/cmd/cluster_test.go +++ b/cmd/cluster_test.go @@ -1,6 +1,7 @@ package cmd import ( + "bytes" "strconv" "testing" "time" @@ -324,7 +325,7 @@ ID TENANT PROJECT NAME PARTIT "--maintenance-hour", strconv.Itoa(int(want.Maintenance.TimeWindow.Begin.Hour)), "--maintenance-minute", strconv.Itoa(int(want.Maintenance.TimeWindow.Begin.Minute)), "--maintenance-timezone", want.Maintenance.TimeWindow.Begin.Timezone, - "--worker-name", want.Workers[0].Name, + "--worker-group", want.Workers[0].Name, "--worker-min", strconv.Itoa(int(want.Workers[0].Minsize)), "--worker-max", strconv.Itoa(int(want.Workers[0].Maxsize)), "--worker-max-surge", strconv.Itoa(int(want.Workers[0].Maxsurge)), @@ -358,10 +359,18 @@ ID TENANT PROJECT NAME PARTIT "--maintenance-hour", strconv.Itoa(int(want.Maintenance.TimeWindow.Begin.Hour)), "--maintenance-minute", strconv.Itoa(int(want.Maintenance.TimeWindow.Begin.Minute)), "--maintenance-timezone", want.Maintenance.TimeWindow.Begin.Timezone, + "--worker-group", want.Workers[0].Name, + "--worker-min", strconv.Itoa(int(want.Workers[0].Minsize)), + "--worker-max", strconv.Itoa(int(want.Workers[0].Maxsize)), + "--worker-max-surge", strconv.Itoa(int(want.Workers[0].Maxsurge)), + "--worker-max-unavailable", strconv.Itoa(int(want.Workers[0].Maxunavailable)), + "--worker-type", want.Workers[0].MachineType, } - AssertExhaustiveArgs(t, args, commonExcludedFileArgs()...) + exclude := append(commonExcludedFileArgs(), "remove-worker-group") + AssertExhaustiveArgs(t, args, exclude...) return args }, + MockStdin: bytes.NewBufferString("y"), ClientMocks: &apitests.ClientMockFns{ Apiv1Mocks: &apitests.Apiv1MockFns{ Cluster: func(m *mock.Mock) { diff --git a/cmd/common_test.go b/cmd/common_test.go index cc34716..85c2eac 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "os" "strings" "testing" @@ -40,6 +41,7 @@ type Test[R any] struct { ClientMocks *apitests.ClientMockFns FsMocks func(fs afero.Fs, want R) + MockStdin *bytes.Buffer DisableMockClient bool // can switch off mock client creation @@ -93,11 +95,18 @@ func (c *Test[R]) newMockConfig(t *testing.T) (any, *bytes.Buffer, *config.Confi c.FsMocks(fs, c.Want) } + var in io.Reader + if c.MockStdin != nil { + in = bytes.NewReader(c.MockStdin.Bytes()) + } + var ( out bytes.Buffer config = &config.Config{ Fs: fs, Out: &out, + In: in, + PromptOut: io.Discard, Completion: &completion.Completion{}, Client: mock.Client(c.ClientMocks), } diff --git a/cmd/completion/cluster.go b/cmd/completion/cluster.go index 27a7f36..1d008e1 100644 --- a/cmd/completion/cluster.go +++ b/cmd/completion/cluster.go @@ -18,8 +18,28 @@ func (c *Completion) ClusterListCompletion(cmd *cobra.Command, args []string, to return nil, cobra.ShellCompDirectiveError } var names []string - for _, s := range resp.Msg.Clusters { - names = append(names, s.Uuid+"\t"+s.Name) + for _, c := range resp.Msg.Clusters { + c := c + names = append(names, c.Uuid+"\t"+c.Name) + } + return names, cobra.ShellCompDirectiveNoFileComp +} + +func (c *Completion) ClusterWorkerGroupsCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + req := &apiv1.ClusterServiceListRequest{ + Project: c.Project, + } + resp, err := c.Client.Apiv1().Cluster().List(c.Ctx, connect.NewRequest(req)) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + var names []string + for _, c := range resp.Msg.Clusters { + c := c + for _, w := range c.Workers { + w := w + names = append(names, w.Name) + } } return names, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/config/config.go b/cmd/config/config.go index 4af13c2..7870bef 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -25,7 +25,9 @@ const ( type Config struct { Fs afero.Fs + In io.Reader Out io.Writer + PromptOut io.Writer Client client.Client ListPrinter printers.Printer DescribePrinter printers.Printer diff --git a/cmd/root.go b/cmd/root.go index a79edbd..59ce282 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -22,6 +22,8 @@ func Execute() { cfg := &config.Config{ Fs: afero.NewOsFs(), Out: os.Stdout, + PromptOut: os.Stdout, + In: os.Stdin, Completion: &completion.Completion{}, } diff --git a/docs/metal_cluster_create.md b/docs/metal_cluster_create.md index 66b604a..13c13e3 100644 --- a/docs/metal_cluster_create.md +++ b/docs/metal_cluster_create.md @@ -33,11 +33,11 @@ metal cluster create [flags] -p, --project string project of the cluster --skip-security-prompts skips security prompt for bulk operations --timestamps when used with --file (bulk operation): prints timestamps in-between the operations + --worker-group string the name of the initial worker group (default "group-0") --worker-max uint32 the maximum amount of worker nodes of the worker group (default 3) --worker-max-surge uint32 the maximum amount of new worker nodes added to the worker group during a rolling update (default 1) --worker-max-unavailable uint32 the maximum amount of worker nodes removed from the worker group during a rolling update --worker-min uint32 the minimum amount of worker nodes of the worker group (default 1) - --worker-name string the name of the initial worker group (default "group-0") --worker-type string the worker type of the initial worker group ``` diff --git a/docs/metal_cluster_update.md b/docs/metal_cluster_update.md index 25c625f..524f1d3 100644 --- a/docs/metal_cluster_update.md +++ b/docs/metal_cluster_update.md @@ -29,8 +29,15 @@ metal cluster update [flags] --maintenance-minute uint32 minute in which cluster maintenance is allowed to take place --maintenance-timezone string timezone used for the maintenance time window (default "Local") -p, --project string project of the cluster + --remove-worker-group if set the selected worker group is being removed --skip-security-prompts skips security prompt for bulk operations --timestamps when used with --file (bulk operation): prints timestamps in-between the operations + --worker-group string the name of the worker group to add, update or remove + --worker-max uint32 the maximum amount of worker nodes of the worker group (default 3) + --worker-max-surge uint32 the maximum amount of new worker nodes added to the worker group during a rolling update (default 1) + --worker-max-unavailable uint32 the maximum amount of worker nodes removed from the worker group during a rolling update + --worker-min uint32 the minimum amount of worker nodes of the worker group (default 1) + --worker-type string the worker type of the initial worker group ``` ### Options inherited from parent commands diff --git a/go.mod b/go.mod index 8b08b2c..1cc51ac 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/go-openapi/strfmt v0.22.0 github.com/google/go-cmp v0.6.0 github.com/metal-stack-cloud/api v0.7.0 - github.com/metal-stack/metal-lib v0.14.2 + github.com/metal-stack/metal-lib v0.14.3 github.com/metal-stack/v v1.0.3 github.com/olekukonko/tablewriter v0.0.5 github.com/spf13/afero v1.11.0 diff --git a/go.sum b/go.sum index e09d1e5..b6e1f02 100644 --- a/go.sum +++ b/go.sum @@ -234,8 +234,8 @@ github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= github.com/metal-stack-cloud/api v0.7.0 h1:n2c+5DE3ToKW/8OQB1xHoffmBxf8c77FBPiVjvG10bs= github.com/metal-stack-cloud/api v0.7.0/go.mod h1:woNo6De8bV/CvpuzXwedoTqRJGKXZZgGV7PXuQuWDzI= -github.com/metal-stack/metal-lib v0.14.2 h1:ntIZiV8fVoWsgPLXOy9xrObZr1NdU5caYUP0zzefUME= -github.com/metal-stack/metal-lib v0.14.2/go.mod h1:2wKxFXSCpA1Dr+Rq0ddpQCPKPGMWJp4cpIaVTM4lDi0= +github.com/metal-stack/metal-lib v0.14.3 h1:oHtOnGsQC/ySLXzj14mfy7/8bwmCPfD5SD6U4yh8BHU= +github.com/metal-stack/metal-lib v0.14.3/go.mod h1:2wKxFXSCpA1Dr+Rq0ddpQCPKPGMWJp4cpIaVTM4lDi0= github.com/metal-stack/v v1.0.3 h1:Sh2oBlnxrCUD+mVpzfC8HiqL045YWkxs0gpTvkjppqs= github.com/metal-stack/v v1.0.3/go.mod h1:YTahEu7/ishwpYKnp/VaW/7nf8+PInogkfGwLcGPdXg= github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= diff --git a/pkg/helpers/viper.go b/pkg/helpers/viper.go new file mode 100644 index 0000000..72dc361 --- /dev/null +++ b/pkg/helpers/viper.go @@ -0,0 +1,12 @@ +package helpers + +import "github.com/spf13/viper" + +func IsAnyViperFlagSet(names ...string) bool { + for _, name := range names { + if viper.IsSet(name) { + return true + } + } + return false +}