From 184e06949adfbd77fcdcd04bb077d99ea3a015d2 Mon Sep 17 00:00:00 2001 From: Matt Bolt Date: Fri, 1 May 2020 17:30:41 -0400 Subject: [PATCH 1/6] Advance version to 1.2-SNAPSHOT --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 955deda..b437068 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -VERSION=v1.1 +VERSION=v1.2-SNAPSHOT REGISTRY=gcr.io PROJECT_ID=kubecost1 APPNAME=cluster-turndown From d9c5db46d5775490b38747c4b61dae6138f1987b Mon Sep 17 00:00:00 2001 From: Webb Brown <298359+dwbrown2@users.noreply.github.com> Date: Thu, 7 May 2020 22:11:18 -0700 Subject: [PATCH 2/6] Add EKS support --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f642d72..36f8c53 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ The parameters to supply the script are as follows: --- -### AWS (Kops) Setup +### EKS & AWS Kops Setup Create a new User with **AutoScalingFullAccess** permissions. Create a new file, service-key.json, and use the access key id and secret access key to fill out the following template: From a3dd75a75c963dd65b62482c0cba0873d1087dcb Mon Sep 17 00:00:00 2001 From: Matt Bolt Date: Mon, 11 May 2020 17:59:10 -0400 Subject: [PATCH 3/6] Allow label set to be passed to the masterless strategy to be used in singleton node creation. Move turndown magic strings to const values. --- pkg/turndown/provider/awsprovider.go | 6 ++---- pkg/turndown/provider/eksprovider.go | 6 ++---- pkg/turndown/provider/gkeprovider.go | 6 ++---- pkg/turndown/provider/provider.go | 31 ++++++++++++++++------------ pkg/turndown/strategy/masterless.go | 29 +++++++++++++++++--------- pkg/turndown/strategy/standard.go | 4 ++-- pkg/turndown/turndown.go | 4 ++-- 7 files changed, 47 insertions(+), 39 deletions(-) diff --git a/pkg/turndown/provider/awsprovider.go b/pkg/turndown/provider/awsprovider.go index 202ae7b..cfb0575 100644 --- a/pkg/turndown/provider/awsprovider.go +++ b/pkg/turndown/provider/awsprovider.go @@ -42,12 +42,10 @@ func (p *AWSProvider) IsTurndownNodePool() bool { return p.clusterProvider.IsNodePool(AWSTurndownPoolName) } -func (p *AWSProvider) CreateSingletonNodePool() error { +func (p *AWSProvider) CreateSingletonNodePool(labels map[string]string) error { ctx := context.TODO() - return p.clusterProvider.CreateNodePool(ctx, AWSTurndownPoolName, "t2.small", 1, "gp2", 10, map[string]string{ - TurndownNodeLabel: "true", - }) + return p.clusterProvider.CreateNodePool(ctx, AWSTurndownPoolName, "t2.small", 1, "gp2", 10, toTurndownNodePoolLabels(labels)) } func (p *AWSProvider) GetPoolID(node *v1.Node) string { diff --git a/pkg/turndown/provider/eksprovider.go b/pkg/turndown/provider/eksprovider.go index 9910f69..c59b949 100644 --- a/pkg/turndown/provider/eksprovider.go +++ b/pkg/turndown/provider/eksprovider.go @@ -38,12 +38,10 @@ func (p *EKSProvider) IsTurndownNodePool() bool { return p.clusterProvider.IsNodePool(EKSTurndownPoolName) } -func (p *EKSProvider) CreateSingletonNodePool() error { +func (p *EKSProvider) CreateSingletonNodePool(labels map[string]string) error { ctx := context.TODO() - return p.clusterProvider.CreateNodePool(ctx, EKSTurndownPoolName, "t2.small", 1, "gp2", 10, map[string]string{ - TurndownNodeLabel: "true", - }) + return p.clusterProvider.CreateNodePool(ctx, EKSTurndownPoolName, "t2.small", 1, "gp2", 10, toTurndownNodePoolLabels(labels)) } func (p *EKSProvider) GetPoolID(node *v1.Node) string { diff --git a/pkg/turndown/provider/gkeprovider.go b/pkg/turndown/provider/gkeprovider.go index 0a03a82..02ce49b 100644 --- a/pkg/turndown/provider/gkeprovider.go +++ b/pkg/turndown/provider/gkeprovider.go @@ -38,12 +38,10 @@ func (p *GKEProvider) IsTurndownNodePool() bool { return p.clusterProvider.IsNodePool(GKETurndownPoolName) } -func (p *GKEProvider) CreateSingletonNodePool() error { +func (p *GKEProvider) CreateSingletonNodePool(labels map[string]string) error { ctx := context.TODO() - return p.clusterProvider.CreateNodePool(ctx, GKETurndownPoolName, "g1-small", 1, "pd-standard", 10, map[string]string{ - TurndownNodeLabel: "true", - }) + return p.clusterProvider.CreateNodePool(ctx, GKETurndownPoolName, "g1-small", 1, "pd-standard", 10, toTurndownNodePoolLabels(labels)) } func (p *GKEProvider) GetPoolID(node *v1.Node) string { diff --git a/pkg/turndown/provider/provider.go b/pkg/turndown/provider/provider.go index de74d9b..fcf39ed 100644 --- a/pkg/turndown/provider/provider.go +++ b/pkg/turndown/provider/provider.go @@ -2,7 +2,6 @@ package provider import ( "errors" - "net/http" "strings" "cloud.google.com/go/compute/metadata" @@ -17,29 +16,21 @@ import ( ) const ( - TurndownNodeLabel = "cluster-turndown-node" + TurndownNodeLabel = "cluster-turndown-node" + TurndownNodeLabelSelector = "cluster-turndown-node=true" ) -type UserAgentTransport struct { - userAgent string - base http.RoundTripper -} - -func (t UserAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", t.userAgent) - return t.base.RoundTrip(req) -} - // TurndownProvider contains methods used to manage turndown type TurndownProvider interface { IsTurndownNodePool() bool - CreateSingletonNodePool() error + CreateSingletonNodePool(labels map[string]string) error GetNodePools() ([]cp.NodePool, error) GetPoolID(node *v1.Node) string SetNodePoolSizes(nodePools []cp.NodePool, size int32) error ResetNodePoolSizes(nodePools []cp.NodePool) error } +// Creates a new TurndownProvider implementation using the kubernetes client instance a ClusterProvider func NewTurndownProvider(client kubernetes.Interface, clusterProvider cp.ClusterProvider) (TurndownProvider, error) { if metadata.OnGCE() { return NewGKEProvider(client, clusterProvider), nil @@ -67,3 +58,17 @@ func NewTurndownProvider(client kubernetes.Interface, clusterProvider cp.Cluster return nil, errors.New("Custom Not Supported") } } + +// Utility function which creates a new map[string]string containing turndown labels in addition +// to the provided labels +func toTurndownNodePoolLabels(labels map[string]string) map[string]string { + m := map[string]string{ + TurndownNodeLabel: "true", + } + + for k, v := range labels { + m[k] = v + } + + return m +} diff --git a/pkg/turndown/strategy/masterless.go b/pkg/turndown/strategy/masterless.go index 338eec2..4816181 100644 --- a/pkg/turndown/strategy/masterless.go +++ b/pkg/turndown/strategy/masterless.go @@ -18,17 +18,26 @@ const ( MasterlessTaintKey = "CriticalAddonsOnly" ) +// MasterlessTurndownStrategy is a TurndownStrategy implementation used in managed kubernetes where the master control plane is +// not available as a node to the cluster. When the turndown schedule occurs, a new node pool with a single "small" node is created. +// Taints are added to this node to only allow specific pods to be scheduled there. We update the turndown deployment such +// that the turndown pod is allowed to schedule on the singleton node. Once the pod is moved to the new node, it will start back up and +// resume scaledown. This is done by cordoning all nodes in the cluster (other than our new small node), and then reducing the node pool +// sizes to 0. type MasterlessTurndownStrategy struct { - client kubernetes.Interface - provider provider.TurndownProvider - log logging.NamedLogger + client kubernetes.Interface + provider provider.TurndownProvider + nodePoolLabels map[string]string + log logging.NamedLogger } -func NewMasterlessTurndownStrategy(client kubernetes.Interface, provider provider.TurndownProvider) TurndownStrategy { +// Creates a new MasterlessTurndownStrategy instance +func NewMasterlessTurndownStrategy(client kubernetes.Interface, provider provider.TurndownProvider, npLabels map[string]string) TurndownStrategy { return &MasterlessTurndownStrategy{ - client: client, - provider: provider, - log: logging.NamedLogger("MasterlessStrategy"), + client: client, + provider: provider, + nodePoolLabels: npLabels, + log: logging.NamedLogger("MasterlessStrategy"), } } @@ -70,7 +79,7 @@ func (ktdm *MasterlessTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error) if !ktdm.provider.IsTurndownNodePool() { // Create a new singleton node pool with a small instance capable of hosting the turndown // pod -- this implementation will create and wait for the node to exist before returning - err := ktdm.provider.CreateSingletonNodePool() + err := ktdm.provider.CreateSingletonNodePool(ktdm.nodePoolLabels) if err != nil { return nil, err } @@ -78,7 +87,7 @@ func (ktdm *MasterlessTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error) // Lookup the turndown node in the kubernetes API nodeList, err := ktdm.client.CoreV1().Nodes().List(metav1.ListOptions{ - LabelSelector: "cluster-turndown-node=true", + LabelSelector: provider.TurndownNodeLabelSelector, }) if err != nil { return nil, err @@ -105,7 +114,7 @@ func (ktdm *MasterlessTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error) } // Patch and get the updated node - tnode, err = patcher.UpdateNodeLabel(ktdm.client, *targetNode, "cluster-turndown-node", "true") + tnode, err = patcher.UpdateNodeLabel(ktdm.client, *targetNode, provider.TurndownNodeLabel, "true") if err != nil { return nil, err } diff --git a/pkg/turndown/strategy/standard.go b/pkg/turndown/strategy/standard.go index 87d5c56..a056fc2 100644 --- a/pkg/turndown/strategy/standard.go +++ b/pkg/turndown/strategy/standard.go @@ -65,7 +65,7 @@ func (ktdm *StandardTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error) { masterNode := &nodeList.Items[0] // Patch and get the updated node - return patcher.UpdateNodeLabel(ktdm.client, *masterNode, "cluster-turndown-node", "true") + return patcher.UpdateNodeLabel(ktdm.client, *masterNode, provider.TurndownNodeLabel, "true") } func (sts *StandardTurndownStrategy) UpdateDNS() error { @@ -129,7 +129,7 @@ func (sts *StandardTurndownStrategy) ReverseHostNode() error { masterNode := &nodeList.Items[0] // Patch and get the updated node - _, err = patcher.DeleteNodeLabel(sts.client, *masterNode, "cluster-turndown-node") + _, err = patcher.DeleteNodeLabel(sts.client, *masterNode, provider.TurndownNodeLabel) dns, err := sts.client.AppsV1().Deployments("kube-system").Get("kube-dns", metav1.GetOptions{}) if err != nil { diff --git a/pkg/turndown/turndown.go b/pkg/turndown/turndown.go index 243d8e6..99a2dbe 100644 --- a/pkg/turndown/turndown.go +++ b/pkg/turndown/turndown.go @@ -75,7 +75,7 @@ func (ktdm *KubernetesTurndownManager) IsScaledDown() bool { func (ktdm *KubernetesTurndownManager) IsRunningOnTurndownNode() (bool, error) { nodeList, err := ktdm.client.CoreV1().Nodes().List(metav1.ListOptions{ - LabelSelector: "cluster-turndown-node=true", + LabelSelector: provider.TurndownNodeLabelSelector, }) if err != nil { return false, err @@ -136,7 +136,7 @@ func (ktdm *KubernetesTurndownManager) PrepareTurndownEnvironment() error { Operator: v1.TolerationOpExists, }) d.Spec.Template.Spec.NodeSelector = map[string]string{ - "cluster-turndown-node": "true", + provider.TurndownNodeLabel: "true", } return nil }) From 232f51239a12ed65f5de827695ce3687487383be Mon Sep 17 00:00:00 2001 From: Matt Bolt Date: Mon, 11 May 2020 18:02:21 -0400 Subject: [PATCH 4/6] Update turndown main to pass empty map for labels. --- cmd/turndown/main.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/turndown/main.go b/cmd/turndown/main.go index d79d767..7b88e0a 100644 --- a/cmd/turndown/main.go +++ b/cmd/turndown/main.go @@ -93,11 +93,13 @@ func runTurndownResourceController(kubeClient kubernetes.Interface, tdClient cli // For now, we'll choose our strategy based on the provider, but functionally, there is // no dependency. func strategyForProvider(c kubernetes.Interface, p provider.TurndownProvider) (strategy.TurndownStrategy, error) { + m := make(map[string]string) + switch v := p.(type) { case *provider.GKEProvider: - return strategy.NewMasterlessTurndownStrategy(c, p), nil + return strategy.NewMasterlessTurndownStrategy(c, p, m), nil case *provider.EKSProvider: - return strategy.NewMasterlessTurndownStrategy(c, p), nil + return strategy.NewMasterlessTurndownStrategy(c, p, m), nil case *provider.AWSProvider: return strategy.NewStandardTurndownStrategy(c, p), nil default: From 37dd22c7aa5a2b91ec916bc065288e7d40280cd0 Mon Sep 17 00:00:00 2001 From: Matt Bolt Date: Mon, 11 May 2020 18:25:17 -0400 Subject: [PATCH 5/6] Advance version to 1.2 --- Makefile | 2 +- artifacts/cluster-turndown-full.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index b437068..11d8862 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -VERSION=v1.2-SNAPSHOT +VERSION=v1.2 REGISTRY=gcr.io PROJECT_ID=kubecost1 APPNAME=cluster-turndown diff --git a/artifacts/cluster-turndown-full.yaml b/artifacts/cluster-turndown-full.yaml index ee896b0..6a4eb9f 100644 --- a/artifacts/cluster-turndown-full.yaml +++ b/artifacts/cluster-turndown-full.yaml @@ -179,7 +179,7 @@ spec: spec: containers: - name: cluster-turndown - image: gcr.io/kubecost1/cluster-turndown:v1.1 + image: gcr.io/kubecost1/cluster-turndown:v1.2 volumeMounts: - name: turndown-keys mountPath: /var/keys From 288c269c8c5253457bccc67de09ebd754a8cd291 Mon Sep 17 00:00:00 2001 From: Matt Bolt Date: Mon, 11 May 2020 18:25:40 -0400 Subject: [PATCH 6/6] Update gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 66fd13c..812fe08 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ # Dependency directories (remove the comment below to include it) # vendor/ +update-version.sh