Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
mbolt35 committed May 11, 2020
2 parents 33d9ecf + 288c269 commit 4646af9
Show file tree
Hide file tree
Showing 12 changed files with 55 additions and 44 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@

# Dependency directories (remove the comment below to include it)
# vendor/
update-version.sh
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
VERSION=v1.1
VERSION=v1.2
REGISTRY=gcr.io
PROJECT_ID=kubecost1
APPNAME=cluster-turndown
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ The parameters to supply the script are as follows:

---

### AWS (Kops) Setup
### EKS & AWS Kops Setup

Create a new User with **AutoScalingFullAccess** permissions. Create a new file, service-key.json, and use the access key id and secret access key to fill out the following template:

Expand Down
2 changes: 1 addition & 1 deletion artifacts/cluster-turndown-full.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ spec:
spec:
containers:
- name: cluster-turndown
image: gcr.io/kubecost1/cluster-turndown:v1.1
image: gcr.io/kubecost1/cluster-turndown:v1.2
volumeMounts:
- name: turndown-keys
mountPath: /var/keys
Expand Down
6 changes: 4 additions & 2 deletions cmd/turndown/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,13 @@ func runTurndownResourceController(kubeClient kubernetes.Interface, tdClient cli
// For now, we'll choose our strategy based on the provider, but functionally, there is
// no dependency.
func strategyForProvider(c kubernetes.Interface, p provider.TurndownProvider) (strategy.TurndownStrategy, error) {
m := make(map[string]string)

switch v := p.(type) {
case *provider.GKEProvider:
return strategy.NewMasterlessTurndownStrategy(c, p), nil
return strategy.NewMasterlessTurndownStrategy(c, p, m), nil
case *provider.EKSProvider:
return strategy.NewMasterlessTurndownStrategy(c, p), nil
return strategy.NewMasterlessTurndownStrategy(c, p, m), nil
case *provider.AWSProvider:
return strategy.NewStandardTurndownStrategy(c, p), nil
default:
Expand Down
6 changes: 2 additions & 4 deletions pkg/turndown/provider/awsprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,10 @@ func (p *AWSProvider) IsTurndownNodePool() bool {
return p.clusterProvider.IsNodePool(AWSTurndownPoolName)
}

func (p *AWSProvider) CreateSingletonNodePool() error {
func (p *AWSProvider) CreateSingletonNodePool(labels map[string]string) error {
ctx := context.TODO()

return p.clusterProvider.CreateNodePool(ctx, AWSTurndownPoolName, "t2.small", 1, "gp2", 10, map[string]string{
TurndownNodeLabel: "true",
})
return p.clusterProvider.CreateNodePool(ctx, AWSTurndownPoolName, "t2.small", 1, "gp2", 10, toTurndownNodePoolLabels(labels))
}

func (p *AWSProvider) GetPoolID(node *v1.Node) string {
Expand Down
6 changes: 2 additions & 4 deletions pkg/turndown/provider/eksprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,10 @@ func (p *EKSProvider) IsTurndownNodePool() bool {
return p.clusterProvider.IsNodePool(EKSTurndownPoolName)
}

func (p *EKSProvider) CreateSingletonNodePool() error {
func (p *EKSProvider) CreateSingletonNodePool(labels map[string]string) error {
ctx := context.TODO()

return p.clusterProvider.CreateNodePool(ctx, EKSTurndownPoolName, "t2.small", 1, "gp2", 10, map[string]string{
TurndownNodeLabel: "true",
})
return p.clusterProvider.CreateNodePool(ctx, EKSTurndownPoolName, "t2.small", 1, "gp2", 10, toTurndownNodePoolLabels(labels))
}

func (p *EKSProvider) GetPoolID(node *v1.Node) string {
Expand Down
6 changes: 2 additions & 4 deletions pkg/turndown/provider/gkeprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,10 @@ func (p *GKEProvider) IsTurndownNodePool() bool {
return p.clusterProvider.IsNodePool(GKETurndownPoolName)
}

func (p *GKEProvider) CreateSingletonNodePool() error {
func (p *GKEProvider) CreateSingletonNodePool(labels map[string]string) error {
ctx := context.TODO()

return p.clusterProvider.CreateNodePool(ctx, GKETurndownPoolName, "g1-small", 1, "pd-standard", 10, map[string]string{
TurndownNodeLabel: "true",
})
return p.clusterProvider.CreateNodePool(ctx, GKETurndownPoolName, "g1-small", 1, "pd-standard", 10, toTurndownNodePoolLabels(labels))
}

func (p *GKEProvider) GetPoolID(node *v1.Node) string {
Expand Down
31 changes: 18 additions & 13 deletions pkg/turndown/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package provider

import (
"errors"
"net/http"
"strings"

"cloud.google.com/go/compute/metadata"
Expand All @@ -17,29 +16,21 @@ import (
)

const (
TurndownNodeLabel = "cluster-turndown-node"
TurndownNodeLabel = "cluster-turndown-node"
TurndownNodeLabelSelector = "cluster-turndown-node=true"
)

type UserAgentTransport struct {
userAgent string
base http.RoundTripper
}

func (t UserAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Set("User-Agent", t.userAgent)
return t.base.RoundTrip(req)
}

// TurndownProvider contains methods used to manage turndown
type TurndownProvider interface {
IsTurndownNodePool() bool
CreateSingletonNodePool() error
CreateSingletonNodePool(labels map[string]string) error
GetNodePools() ([]cp.NodePool, error)
GetPoolID(node *v1.Node) string
SetNodePoolSizes(nodePools []cp.NodePool, size int32) error
ResetNodePoolSizes(nodePools []cp.NodePool) error
}

// Creates a new TurndownProvider implementation using the kubernetes client instance a ClusterProvider
func NewTurndownProvider(client kubernetes.Interface, clusterProvider cp.ClusterProvider) (TurndownProvider, error) {
if metadata.OnGCE() {
return NewGKEProvider(client, clusterProvider), nil
Expand Down Expand Up @@ -67,3 +58,17 @@ func NewTurndownProvider(client kubernetes.Interface, clusterProvider cp.Cluster
return nil, errors.New("Custom Not Supported")
}
}

// Utility function which creates a new map[string]string containing turndown labels in addition
// to the provided labels
func toTurndownNodePoolLabels(labels map[string]string) map[string]string {
m := map[string]string{
TurndownNodeLabel: "true",
}

for k, v := range labels {
m[k] = v
}

return m
}
29 changes: 19 additions & 10 deletions pkg/turndown/strategy/masterless.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,26 @@ const (
MasterlessTaintKey = "CriticalAddonsOnly"
)

// MasterlessTurndownStrategy is a TurndownStrategy implementation used in managed kubernetes where the master control plane is
// not available as a node to the cluster. When the turndown schedule occurs, a new node pool with a single "small" node is created.
// Taints are added to this node to only allow specific pods to be scheduled there. We update the turndown deployment such
// that the turndown pod is allowed to schedule on the singleton node. Once the pod is moved to the new node, it will start back up and
// resume scaledown. This is done by cordoning all nodes in the cluster (other than our new small node), and then reducing the node pool
// sizes to 0.
type MasterlessTurndownStrategy struct {
client kubernetes.Interface
provider provider.TurndownProvider
log logging.NamedLogger
client kubernetes.Interface
provider provider.TurndownProvider
nodePoolLabels map[string]string
log logging.NamedLogger
}

func NewMasterlessTurndownStrategy(client kubernetes.Interface, provider provider.TurndownProvider) TurndownStrategy {
// Creates a new MasterlessTurndownStrategy instance
func NewMasterlessTurndownStrategy(client kubernetes.Interface, provider provider.TurndownProvider, npLabels map[string]string) TurndownStrategy {
return &MasterlessTurndownStrategy{
client: client,
provider: provider,
log: logging.NamedLogger("MasterlessStrategy"),
client: client,
provider: provider,
nodePoolLabels: npLabels,
log: logging.NamedLogger("MasterlessStrategy"),
}
}

Expand Down Expand Up @@ -70,15 +79,15 @@ func (ktdm *MasterlessTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error)
if !ktdm.provider.IsTurndownNodePool() {
// Create a new singleton node pool with a small instance capable of hosting the turndown
// pod -- this implementation will create and wait for the node to exist before returning
err := ktdm.provider.CreateSingletonNodePool()
err := ktdm.provider.CreateSingletonNodePool(ktdm.nodePoolLabels)
if err != nil {
return nil, err
}
}

// Lookup the turndown node in the kubernetes API
nodeList, err := ktdm.client.CoreV1().Nodes().List(metav1.ListOptions{
LabelSelector: "cluster-turndown-node=true",
LabelSelector: provider.TurndownNodeLabelSelector,
})
if err != nil {
return nil, err
Expand All @@ -105,7 +114,7 @@ func (ktdm *MasterlessTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error)
}

// Patch and get the updated node
tnode, err = patcher.UpdateNodeLabel(ktdm.client, *targetNode, "cluster-turndown-node", "true")
tnode, err = patcher.UpdateNodeLabel(ktdm.client, *targetNode, provider.TurndownNodeLabel, "true")
if err != nil {
return nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/turndown/strategy/standard.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func (ktdm *StandardTurndownStrategy) CreateOrGetHostNode() (*v1.Node, error) {
masterNode := &nodeList.Items[0]

// Patch and get the updated node
return patcher.UpdateNodeLabel(ktdm.client, *masterNode, "cluster-turndown-node", "true")
return patcher.UpdateNodeLabel(ktdm.client, *masterNode, provider.TurndownNodeLabel, "true")
}

func (sts *StandardTurndownStrategy) UpdateDNS() error {
Expand Down Expand Up @@ -129,7 +129,7 @@ func (sts *StandardTurndownStrategy) ReverseHostNode() error {
masterNode := &nodeList.Items[0]

// Patch and get the updated node
_, err = patcher.DeleteNodeLabel(sts.client, *masterNode, "cluster-turndown-node")
_, err = patcher.DeleteNodeLabel(sts.client, *masterNode, provider.TurndownNodeLabel)

dns, err := sts.client.AppsV1().Deployments("kube-system").Get("kube-dns", metav1.GetOptions{})
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/turndown/turndown.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func (ktdm *KubernetesTurndownManager) IsScaledDown() bool {

func (ktdm *KubernetesTurndownManager) IsRunningOnTurndownNode() (bool, error) {
nodeList, err := ktdm.client.CoreV1().Nodes().List(metav1.ListOptions{
LabelSelector: "cluster-turndown-node=true",
LabelSelector: provider.TurndownNodeLabelSelector,
})
if err != nil {
return false, err
Expand Down Expand Up @@ -136,7 +136,7 @@ func (ktdm *KubernetesTurndownManager) PrepareTurndownEnvironment() error {
Operator: v1.TolerationOpExists,
})
d.Spec.Template.Spec.NodeSelector = map[string]string{
"cluster-turndown-node": "true",
provider.TurndownNodeLabel: "true",
}
return nil
})
Expand Down

0 comments on commit 4646af9

Please sign in to comment.