From c0221c36156084cd49fc35e450f7d0d341bcac62 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 26 Jan 2017 14:33:04 +0900 Subject: [PATCH 1/6] feat: Allow definiting private and public subnets to be chosen for worker/controller/etcd nodes and a controller loadbalancer --- config/config.go | 158 +++++-- config/config_test.go | 37 +- config/stack_config_test.go | 14 +- config/templates/cluster.yaml | 3 +- config/templates/stack-template.json | 224 ++-------- model/controller.go | 26 +- model/etcd.go | 13 +- model/identifier.go | 23 + model/nat_gateway.go | 73 ++++ model/subnet.go | 148 +++++-- model/worker.go | 16 +- nodepool/config/config.go | 13 +- nodepool/config/templates/stack-template.json | 53 +-- test/integration/maincluster_test.go | 413 ++++++++++++++++++ 14 files changed, 850 insertions(+), 364 deletions(-) create mode 100644 model/nat_gateway.go diff --git a/config/config.go b/config/config.go index 871afa29f..901e871f5 100644 --- a/config/config.go +++ b/config/config.go @@ -72,7 +72,7 @@ func NewDefaultCluster() *Cluster { AWSCliImageRepo: "quay.io/coreos/awscli", AWSCliTag: "master", ContainerRuntime: "docker", - Subnets: []*model.Subnet{}, + Subnets: []model.Subnet{}, EIPAllocationIDs: []string{}, MapPublicIPs: true, Experimental: experimental, @@ -168,25 +168,64 @@ func ClusterFromBytes(data []byte) (*Cluster, error) { return nil, fmt.Errorf("invalid cluster: %v", err) } + c.SetDefaults() + + return c, nil +} + +func (c *Cluster) SetDefaults() { // For backward-compatibility if len(c.Subnets) == 0 { - c.Subnets = []*model.Subnet{ - { - AvailabilityZone: c.AvailabilityZone, - InstanceCIDR: c.InstanceCIDR, - }, + c.Subnets = []model.Subnet{ + model.NewPublicSubnet(c.AvailabilityZone, c.InstanceCIDR), } } for i, s := range c.Subnets { if s.CustomName == "" { - s.CustomName = fmt.Sprintf("Subnet%d", i) + c.Subnets[i].CustomName = fmt.Sprintf("Subnet%d", i) } - // Mark top-level subnets appropriately - s.TopLevel = true } - return c, nil + for i, s := range c.Worker.Subnets { + linkedSubnet := c.FindSubnetMatching(s) + c.Worker.Subnets[i] = linkedSubnet + } + + for i, s := range c.Controller.Subnets { + linkedSubnet := c.FindSubnetMatching(s) + c.Controller.Subnets[i] = linkedSubnet + } + + for i, s := range c.Controller.LoadBalancer.Subnets { + linkedSubnet := c.FindSubnetMatching(s) + c.Controller.LoadBalancer.Subnets[i] = linkedSubnet + } + + for i, s := range c.Etcd.Subnets { + linkedSubnet := c.FindSubnetMatching(s) + c.Etcd.Subnets[i] = linkedSubnet + } + + if len(c.Worker.Subnets) == 0 { + c.Worker.Subnets = c.PublicSubnets() + } + + if len(c.Controller.Subnets) == 0 { + c.Controller.Subnets = c.PublicSubnets() + } + + if len(c.Controller.LoadBalancer.Subnets) == 0 { + if c.Controller.LoadBalancer.Private == true { + c.Controller.LoadBalancer.Subnets = c.PrivateSubnets() + } else { + c.Controller.LoadBalancer.Subnets = c.PublicSubnets() + } + } + + if len(c.Etcd.Subnets) == 0 { + c.Etcd.Subnets = c.PublicSubnets() + } } func ClusterFromBytesWithEncryptService(data []byte, encryptService EncryptService) (*Cluster, error) { @@ -245,7 +284,7 @@ type DeploymentSettings struct { ContainerRuntime string `yaml:"containerRuntime,omitempty"` KMSKeyARN string `yaml:"kmsKeyArn,omitempty"` StackTags map[string]string `yaml:"stackTags,omitempty"` - Subnets []*model.Subnet `yaml:"subnets,omitempty"` + Subnets []model.Subnet `yaml:"subnets,omitempty"` EIPAllocationIDs []string `yaml:"eipAllocationIDs,omitempty"` MapPublicIPs bool `yaml:"mapPublicIPs,omitempty"` ElasticFileSystemID string `yaml:"elasticFileSystemId,omitempty"` @@ -271,15 +310,14 @@ type WorkerSettings struct { // Part of configuration which is specific to controller nodes type ControllerSettings struct { - model.Controller `yaml:"controller,omitempty"` - ControllerCount int `yaml:"controllerCount,omitempty"` - ControllerCreateTimeout string `yaml:"controllerCreateTimeout,omitempty"` - ControllerInstanceType string `yaml:"controllerInstanceType,omitempty"` - ControllerLoadBalancerPrivate bool `yaml:"controllerLoadBalancerPrivate,omitempty"` - ControllerRootVolumeType string `yaml:"controllerRootVolumeType,omitempty"` - ControllerRootVolumeIOPS int `yaml:"controllerRootVolumeIOPS,omitempty"` - ControllerRootVolumeSize int `yaml:"controllerRootVolumeSize,omitempty"` - ControllerTenancy string `yaml:"controllerTenancy,omitempty"` + model.Controller `yaml:"controller,omitempty"` + ControllerCount int `yaml:"controllerCount,omitempty"` + ControllerCreateTimeout string `yaml:"controllerCreateTimeout,omitempty"` + ControllerInstanceType string `yaml:"controllerInstanceType,omitempty"` + ControllerRootVolumeType string `yaml:"controllerRootVolumeType,omitempty"` + ControllerRootVolumeIOPS int `yaml:"controllerRootVolumeIOPS,omitempty"` + ControllerRootVolumeSize int `yaml:"controllerRootVolumeSize,omitempty"` + ControllerTenancy string `yaml:"controllerTenancy,omitempty"` } // Part of configuration which is specific to etcd nodes @@ -498,11 +536,8 @@ func (c Cluster) Config() (*Config, error) { for etcdIndex := 0; etcdIndex < config.EtcdCount; etcdIndex++ { //Round-robbin etcd instances across all available subnets - subnetIndex := etcdIndex % len(config.Subnets) - subnet := *config.Subnets[subnetIndex] - if config.Etcd.TopologyPrivate() { - subnet = *config.Etcd.Subnets[subnetIndex] - } + subnetIndex := etcdIndex % len(config.Etcd.Subnets) + subnet := config.Etcd.Subnets[subnetIndex] instance := model.EtcdInstance{ Subnet: subnet, @@ -516,24 +551,75 @@ func (c Cluster) Config() (*Config, error) { // Populate top-level subnets to model if len(config.Subnets) > 0 { - if config.WorkerSettings.MinWorkerCount() > 0 && config.WorkerSettings.TopologyPrivate() == false { + if config.WorkerSettings.MinWorkerCount() > 0 && len(config.WorkerSettings.Subnets) == 0 { config.WorkerSettings.Subnets = config.Subnets } - if config.ControllerSettings.MinControllerCount() > 0 && config.ControllerSettings.TopologyPrivate() == false { + if config.ControllerSettings.MinControllerCount() > 0 && len(config.ControllerSettings.Subnets) == 0 { config.ControllerSettings.Subnets = config.Subnets } } - config.ControllerElb.Private = config.ControllerSettings.ControllerLoadBalancerPrivate - config.ControllerElb.Subnets = config.Subnets - if config.ControllerElb.Private == true { - config.ControllerElb.Subnets = config.ControllerSettings.Subnets - } config.IsChinaRegion = strings.HasPrefix(config.Region, "cn") return &config, nil } +func (c *Cluster) FindSubnetMatching(condition model.Subnet) model.Subnet { + for _, s := range c.Subnets { + if s.CustomName == condition.CustomName { + return s + } + } + out := "" + for _, s := range c.Subnets { + out = fmt.Sprintf("%s%+v ", out, s) + } + panic(fmt.Errorf("No subnet matching %v found in %s", condition, out)) +} + +func (c *Cluster) PrivateSubnets() []model.Subnet { + result := []model.Subnet{} + for _, s := range c.Subnets { + if s.Private { + result = append(result, s) + } + } + return result +} + +func (c *Cluster) PublicSubnets() []model.Subnet { + result := []model.Subnet{} + for _, s := range c.Subnets { + if !s.Private { + result = append(result, s) + } + } + return result +} + +func (c *Cluster) NATGateways() []model.NATGateway { + ngws := []model.NATGateway{} + for _, privateSubnet := range c.PrivateSubnets() { + var publicSubnet model.Subnet + config := privateSubnet.NATGateway + if !config.HasIdentifier() { + found := false + for _, s := range c.PublicSubnets() { + if s.AvailabilityZone == privateSubnet.AvailabilityZone { + publicSubnet = s + found = true + } + } + if !found { + panic(fmt.Sprintf("No public subnet found for a NAT gateway associated to private subnet %s", privateSubnet.LogicalName())) + } + } + ngw := model.NewNATGateway(config, privateSubnet, publicSubnet) + ngws = append(ngws, ngw) + } + return ngws +} + // releaseVersionIsGreaterThan will return true if the supplied version is greater then // or equal to the current CoreOS release indicated by the given release // channel. @@ -612,7 +698,6 @@ func (c Cluster) StackConfig(opts StackTemplateOptions) (*StackConfig, error) { type Config struct { Cluster - ControllerElb model.ControllerElb EtcdInstances []model.EtcdInstance // Encoded TLS assets @@ -825,7 +910,7 @@ func (c DeploymentSettings) Valid() (*DeploymentValidationResult, error) { var instanceCIDRs = make([]*net.IPNet, 0) for i, subnet := range c.Subnets { - if subnet.ID != "" { + if subnet.ID != "" || subnet.IDFromStackOutput != "" { continue } if subnet.AvailabilityZone == "" { @@ -861,6 +946,11 @@ func (c DeploymentSettings) Valid() (*DeploymentValidationResult, error) { return &DeploymentValidationResult{vpcNet: vpcNet}, nil } +func (s DeploymentSettings) AllSubnets() []model.Subnet { + subnets := s.Subnets + return subnets +} + func (c WorkerSettings) Valid() error { if c.WorkerRootVolumeType == "io1" { if c.WorkerRootVolumeIOPS < 100 || c.WorkerRootVolumeIOPS > 2000 { diff --git a/config/config_test.go b/config/config_test.go index 6a5587b87..5d6982adb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -315,7 +315,7 @@ func TestMultipleSubnets(t *testing.T) { validConfigs := []struct { conf string - subnets []*model.Subnet + subnets []model.Subnet }{ { conf: ` @@ -328,18 +328,16 @@ subnets: - availabilityZone: ap-northeast-1c instanceCIDR: 10.4.4.0/24 `, - subnets: []*model.Subnet{ + subnets: []model.Subnet{ { InstanceCIDR: "10.4.3.0/24", AvailabilityZone: "ap-northeast-1a", CustomName: "Subnet0", - TopLevel: true, }, { InstanceCIDR: "10.4.4.0/24", AvailabilityZone: "ap-northeast-1c", CustomName: "Subnet1", - TopLevel: true, }, }, }, @@ -351,12 +349,11 @@ controllerIP: 10.4.3.50 availabilityZone: ap-northeast-1a instanceCIDR: 10.4.3.0/24 `, - subnets: []*model.Subnet{ + subnets: []model.Subnet{ { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.4.3.0/24", CustomName: "Subnet0", - TopLevel: true, }, }, }, @@ -369,12 +366,11 @@ availabilityZone: ap-northeast-1a instanceCIDR: 10.4.3.0/24 subnets: [] `, - subnets: []*model.Subnet{ + subnets: []model.Subnet{ { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.4.3.0/24", CustomName: "Subnet0", - TopLevel: true, }, }, }, @@ -384,12 +380,11 @@ subnets: [] availabilityZone: "ap-northeast-1a" subnets: [] `, - subnets: []*model.Subnet{ + subnets: []model.Subnet{ { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.0.0.0/24", CustomName: "Subnet0", - TopLevel: true, }, }, }, @@ -398,12 +393,11 @@ subnets: [] # Missing subnets field fall-backs to the single subnet with the default az/cidr. availabilityZone: "ap-northeast-1a" `, - subnets: []*model.Subnet{ + subnets: []model.Subnet{ { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.0.0.0/24", CustomName: "Subnet0", - TopLevel: true, }, }, }, @@ -743,16 +737,11 @@ func newMinimalConfig() (*Config, error) { cluster := NewDefaultCluster() cluster.ExternalDNSName = "k8s.example.com" cluster.Region = "us-west-1" - cluster.Subnets = []*model.Subnet{ - &model.Subnet{ - AvailabilityZone: "us-west-1a", - InstanceCIDR: "10.0.0.0/24", - }, - &model.Subnet{ - AvailabilityZone: "us-west-1b", - InstanceCIDR: "10.0.1.0/24", - }, + cluster.Subnets = []model.Subnet{ + model.NewPublicSubnet("us-west-1a", "10.0.0.0/24"), + model.NewPublicSubnet("us-west-1b", "10.0.1.0/24"), } + cluster.SetDefaults() c, err := cluster.Config() if err != nil { return nil, err @@ -886,9 +875,9 @@ func TestValidateExistingVPC(t *testing.T) { cluster := NewDefaultCluster() cluster.VPCCIDR = "10.0.0.0/16" - cluster.Subnets = []*model.Subnet{ - {"ap-northeast-1a", "10.0.1.0/24", "", "", "", model.NatGateway{}, true}, - {"ap-northeast-1a", "10.0.2.0/24", "", "", "", model.NatGateway{}, true}, + cluster.Subnets = []model.Subnet{ + model.NewPublicSubnet("ap-northeast-1a", "10.0.1.0/24"), + model.NewPublicSubnet("ap-northeast-1a", "10.0.2.0/24"), } for _, testCase := range validCases { diff --git a/config/stack_config_test.go b/config/stack_config_test.go index f479679b8..4e3a58f54 100644 --- a/config/stack_config_test.go +++ b/config/stack_config_test.go @@ -10,10 +10,11 @@ func TestRenderStackTemplate(t *testing.T) { clusterConfig := newDefaultClusterWithDeps(&dummyEncryptService{}) clusterConfig.Region = "us-west-1" - clusterConfig.Subnets = []*model.Subnet{ - {"us-west-1a", "10.0.1.0/16", "", "", "", model.NatGateway{}, false}, - {"us-west-1b", "10.0.2.0/16", "", "", "", model.NatGateway{}, false}, + clusterConfig.Subnets = []model.Subnet{ + model.NewPublicSubnet("us-west-1a", "10.0.1.0/16"), + model.NewPublicSubnet("us-west-1b", "10.0.2.0/16"), } + clusterConfig.SetDefaults() helper.WithDummyCredentials(func(dir string) { var stackTemplateOptions = StackTemplateOptions{ @@ -44,10 +45,11 @@ func TestValidateUserData(t *testing.T) { cluster := newDefaultClusterWithDeps(&dummyEncryptService{}) cluster.Region = "us-west-1" - cluster.Subnets = []*model.Subnet{ - {"us-west-1a", "10.0.1.0/16", "", "", "", model.NatGateway{}, false}, - {"us-west-1b", "10.0.2.0/16", "", "", "", model.NatGateway{}, false}, + cluster.Subnets = []model.Subnet{ + model.NewPublicSubnet("us-west-1a", "10.0.1.0/16"), + model.NewPublicSubnet("us-west-1b", "10.0.2.0/16"), } + cluster.SetDefaults() helper.WithDummyCredentials(func(dir string) { var stackTemplateOptions = StackTemplateOptions{ diff --git a/config/templates/cluster.yaml b/config/templates/cluster.yaml index 5bdd61508..65946055b 100644 --- a/config/templates/cluster.yaml +++ b/config/templates/cluster.yaml @@ -205,7 +205,8 @@ kmsKeyArn: "{{.KMSKeyARN}}" # # id: "ngw-abcdef12" # # # Pre-allocated EIP for NAT Gateways. Used with private subnets. # # eipAllocationId: "eipalloc-abcdef12" -# subnetId: "subnet-xxxxxxxx" #optional +# # Existing subnet. Beware that `availabilityZone` can't be omitted; it must be the one in which the subnet exists. +# # id: "subnet-xxxxxxxx" #optional # CIDR for all service IP addresses # serviceCIDR: "10.3.0.0/24" diff --git a/config/templates/stack-template.json b/config/templates/stack-template.json index 346fc6ea3..ee8f7e362 100644 --- a/config/templates/stack-template.json +++ b/config/templates/stack-template.json @@ -47,9 +47,7 @@ "VPCZoneIdentifier": [ {{range $index, $subnet := .Worker.Subnets}} {{if gt $index 0}},{{end}} - { - "Ref": "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" - } + {{$subnet.Ref}} {{end}} ] }, @@ -148,9 +146,7 @@ "VPCZoneIdentifier": [ {{range $index, $subnet := .Controller.Subnets}} {{if gt $index 0}},{{end}} - { - "Ref": "{{$.Controller.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" - } + {{$subnet.Ref}} {{end}} ], "LoadBalancerNames" : [ @@ -229,7 +225,8 @@ } } } - } + }, + "DependsOn": ["InstanceEtcd0"] }, {{ if .CreateRecordSet }} "ExternalDNS": { @@ -512,9 +509,7 @@ {{range $etcdIndex, $etcdInstance := .EtcdInstances}} "InstanceEtcd{{$etcdIndex}}eni": { "Properties": { - "SubnetId": { - "Ref": "{{$etcdInstance.SubnetLogicalNamePrefix}}{{$etcdInstance.Subnet.LogicalName}}" - }, + "SubnetId": {{$etcdInstance.Subnet.Ref}}, "GroupSet": [ { "Ref": "SecurityGroupEtcd" @@ -576,6 +571,9 @@ "Tenancy": "{{$.EtcdTenancy}}", "UserData": { "Fn::FindInMap" : [ "EtcdInstanceParams", "UserData", "cloudconfig"] } }, + {{if $etcdInstance.Subnet.Private}} + "DependsOn": ["{{$etcdInstance.Subnet.NATGatewayRouteName}}"], + {{end}} "Type": "AWS::EC2::Instance" }, {{end}} @@ -723,9 +721,9 @@ "IdleTimeout" : "3600" }, "Subnets" : [ - {{range $index, $subnet := .ControllerElb.Subnets}} + {{range $index, $subnet := .Controller.LoadBalancer.Subnets}} {{if gt $index 0}},{{end}} - {"Ref": "{{$.ControllerElb.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}"} + {{$subnet.Ref}} {{end}} ], "Listeners" : [ @@ -736,7 +734,7 @@ "Protocol" : "TCP" } ], - {{if .ControllerLoadBalancerPrivate}} + {{if .Controller.LoadBalancer.Private}} "Scheme": "internal", {{else}} "Scheme": "internet-facing", @@ -1150,13 +1148,13 @@ {{end}} {{range $index, $subnet := .Subnets}} - {{if not $subnet.ID }} + {{if not $subnet.HasIdentifier }} , "{{$subnet.LogicalName}}": { "Properties": { "AvailabilityZone": "{{$subnet.AvailabilityZone}}", "CidrBlock": "{{$subnet.InstanceCIDR}}", - "MapPublicIpOnLaunch": {{$.MapPublicIPs}}, + "MapPublicIpOnLaunch": {{$subnet.MapPublicIPs}}, "Tags": [ { "Key": "Name", @@ -1170,7 +1168,7 @@ "VpcId": {{$.VPCRef}} }, "Type": "AWS::EC2::Subnet" - }, + } {{end}} {{if $.ElasticFileSystemID}} , @@ -1183,112 +1181,21 @@ "Type" : "AWS::EFS::MountTarget" } {{end}} + , "{{$subnet.LogicalName}}RouteTableAssociation": { "Properties": { - "RouteTableId": - {{if not $subnet.RouteTableID}} - {"Ref" : "PublicRouteTable"}, - {{else}} - "{{$subnet.RouteTableID}}", - {{end}} + "RouteTableId": {{$subnet.RouteTableRef}}, "SubnetId": {{$subnet.Ref}} }, "Type": "AWS::EC2::SubnetRouteTableAssociation" } - {{end}} - - {{if $.Etcd.TopologyPrivate}} - {{range $etcdIndex, $etcdInstance := .EtcdInstances}} - , - "{{$etcdInstance.SubnetLogicalNamePrefix}}{{$etcdInstance.Subnet.LogicalName}}": { - "Properties": { - "AvailabilityZone": "{{$etcdInstance.Subnet.AvailabilityZone}}", - "CidrBlock": "{{$etcdInstance.Subnet.InstanceCIDR}}", - "MapPublicIpOnLaunch": {{not $.Etcd.TopologyPrivate}}, - "Tags": [ - { - "Key": "Name", - "Value": "{{$.ClusterName}}-{{$etcdInstance.SubnetLogicalNamePrefix}}{{$etcdInstance.Subnet.LogicalName}}" - }, - { - "Key": "KubernetesCluster", - "Value": "{{$.ClusterName}}" - } - ], - "VpcId": {{$.VPCRef}} - }, - "Type": "AWS::EC2::Subnet" - } - , - "{{$etcdInstance.SubnetLogicalNamePrefix}}{{$etcdInstance.Subnet.LogicalName}}RouteTableAssociation": { - "Properties": { - "RouteTableId": - {{if not $etcdInstance.Subnet.RouteTableID}} - {"Ref" : "PrivateRouteTable{{$etcdInstance.Subnet.AvailabilityZoneLogicalName}}"}, - {{else}} - "{{$etcdInstance.Subnet.RouteTableID}}", - {{end}} - "SubnetId": { - "Ref": "{{$etcdInstance.SubnetLogicalNamePrefix}}{{$etcdInstance.Subnet.LogicalName}}" - } - }, - "Type": "AWS::EC2::SubnetRouteTableAssociation" - } - {{end}} - {{end}} - - {{if $.Controller.TopologyPrivate}} - {{range $index, $subnet := .Controller.Subnets}} - , - "{{$.Controller.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}": { - "Properties": { - "AvailabilityZone": "{{$subnet.AvailabilityZone}}", - "CidrBlock": "{{$subnet.InstanceCIDR}}", - "MapPublicIpOnLaunch": {{not $.Controller.TopologyPrivate}}, - "Tags": [ - { - "Key": "Name", - "Value": "{{$.ClusterName}}-{{$.Controller.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" - }, - { - "Key": "KubernetesCluster", - "Value": "{{$.ClusterName}}" - } - ], - "VpcId": {{$.VPCRef}} - }, - "Type": "AWS::EC2::Subnet" - } - , - "{{$.Controller.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}RouteTableAssociation": { - "Properties": { - "RouteTableId": - {{if not $subnet.RouteTableID}} - {"Ref" : "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}"}, - {{else}} - "{{$subnet.RouteTableID}}", - {{end}} - "SubnetId": { - "Ref": "{{$.Controller.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" - } - }, - "Type": "AWS::EC2::SubnetRouteTableAssociation" - } - {{end}} - {{end}} - - {{if $.Worker.TopologyPrivate}} - {{range $index, $subnet := .Worker.Subnets}} , - "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}": { + "{{$subnet.RouteTableName}}": { "Properties": { - "AvailabilityZone": "{{$subnet.AvailabilityZone}}", - "CidrBlock": "{{$subnet.InstanceCIDR}}", - "MapPublicIpOnLaunch": {{not $.Worker.TopologyPrivate}}, "Tags": [ { "Key": "Name", - "Value": "{{$.ClusterName}}-{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" + "Value": "{{$.ClusterName}}-{{$subnet.RouteTableName}}" }, { "Key": "KubernetesCluster", @@ -1297,32 +1204,16 @@ ], "VpcId": {{$.VPCRef}} }, - "Type": "AWS::EC2::Subnet" + "Type": "AWS::EC2::RouteTable" } + {{if $subnet.Public}} , - "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}RouteTableAssociation": { - "Properties": { - "RouteTableId": - {{if not $subnet.RouteTableID}} - {"Ref" : "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}"}, - {{else}} - "{{$subnet.RouteTableID}}", - {{end}} - "SubnetId": { - "Ref": "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" - } - }, - "Type": "AWS::EC2::SubnetRouteTableAssociation" - } - {{end}} - {{end}} - , - "PublicRouteTable": { + "{{$subnet.RouteTableName}}": { "Properties": { "Tags": [ { "Key": "Name", - "Value": "{{$.ClusterName}}-PublicRouteTable" + "Value": "{{$.ClusterName}}-{{$subnet.RouteTableName}}" }, { "Key": "KubernetesCluster", @@ -1333,73 +1224,47 @@ }, "Type": "AWS::EC2::RouteTable" }, - "PublicRouteToInternet": { + "{{$subnet.RouteTableName}}ToInternet": { "Properties": { "DestinationCidrBlock": "0.0.0.0/0", "GatewayId": {{$.InternetGatewayRef}}, - "RouteTableId": {"Ref" : "PublicRouteTable"} + "RouteTableId": {{$subnet.RouteTableRef}} }, "Type": "AWS::EC2::Route" } + {{end}} + {{end}} - {{if (or .WorkerTopologyPrivate .Worker.TopologyPrivate .Etcd.TopologyPrivate .Controller.TopologyPrivate)}} - {{range $index, $subnet := .Subnets}} - {{if not $subnet.NatGateway.EIPAllocationID}} + {{range $i, $ngw := .NATGateways}} + {{if $ngw.ManageEIP}} , - "NatGateway{{$subnet.AvailabilityZoneLogicalName}}EIP": { + "{{$ngw.EIPLogicalName}}": { "Properties": { "Domain": "vpc" }, "Type": "AWS::EC2::EIP" } {{end}} - {{if not $subnet.NatGateway.ID}} + {{if $ngw.ManageNATGateway}} , - "NatGateway{{$subnet.AvailabilityZoneLogicalName}}": { + "{{$ngw.LogicalName}}": { "Properties": { - "AllocationId": - {{if not $subnet.NatGateway.EIPAllocationID}} - {"Fn::GetAtt": ["NatGateway{{$subnet.AvailabilityZoneLogicalName}}EIP", "AllocationId"]}, - {{else}} - "{{$subnet.NatGateway.EIPAllocationID}}", - {{end}} - "SubnetId": { "Ref": "{{$subnet.LogicalName}}" } + "AllocationId": {{$ngw.EIPAllocationIDRef}}, + "SubnetId": {{$ngw.PublicSubnetRef}} }, "Type": "AWS::EC2::NatGateway" } - {{end}} , - "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}": { - "Properties": { - "Tags": [ - { - "Key": "Name", - "Value": "{{$.ClusterName}}-PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}" - }, - { - "Key": "KubernetesCluster", - "Value": "{{$.ClusterName}}" - } - ], - "VpcId": {{$.VPCRef}} - }, - "Type": "AWS::EC2::RouteTable" - }, - "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}RouteToNatGateway": { + {{end}} + "{{$ngw.NATGatewayRouteName}}": { "Properties": { "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": - {{if not $subnet.NatGateway.ID}} - {"Ref": "NatGateway{{$subnet.AvailabilityZoneLogicalName}}"}, - {{else}} - "{{$subnet.NatGateway.ID}}", - {{end}} - "RouteTableId": {"Ref" : "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}"} + "NatGatewayId": {{$ngw.Ref}}, + "RouteTableId": {{$ngw.PrivateSubnetRouteTableRef}} }, "Type": "AWS::EC2::Route" } {{end}} - {{end}} {{if not .InternetGatewayID}} , @@ -1459,20 +1324,13 @@ "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-VPC" }} }, {{end}} - "PublicRouteTable" : { - "Description" : "The public route table assigned to the internet gateway for worker nodes", - "Value" : { "Ref" : "PublicRouteTable" }, - "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-PublicRouteTable" }} - }, - {{if (or .WorkerTopologyPrivate .Worker.TopologyPrivate .Etcd.TopologyPrivate .Controller.TopologyPrivate)}} {{range $index, $subnet := .Subnets}} - "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}" : { - "Description" : "The private route table assigned to the nat gateway for worker nodes", - "Value" : { "Ref" : "PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}" }, - "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}" }} + "{{$subnet.RouteTableName}}" : { + "Description" : "The route table assigned to the subnet {{$subnet.LogicalName}}", + "Value" : {{$subnet.RouteTableRef}}, + "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-{{$subnet.RouteTableName}}" }} }, {{end}} - {{end}} "WorkerSecurityGroup" : { "Description" : "The security group assigned to worker nodes", "Value" : { "Ref" : "SecurityGroupWorker" }, diff --git a/model/controller.go b/model/controller.go index 4c1a87e86..e091da197 100644 --- a/model/controller.go +++ b/model/controller.go @@ -1,36 +1,16 @@ package model type Controller struct { + LoadBalancer ControllerElb `yaml:"loadBalancer,omitempty"` AutoScalingGroup `yaml:"autoScalingGroup,omitempty"` - Subnets []*Subnet `yaml:"subnets,omitempty"` -} - -func (c Controller) TopologyPrivate() bool { - if len(c.Subnets) > 0 { - return !c.Subnets[0].TopLevel - } - return false + Subnets []Subnet `yaml:"subnets,omitempty"` } func (c Controller) LogicalName() string { return "Controllers" } -func (c Controller) SubnetLogicalNamePrefix() string { - if c.TopologyPrivate() == true { - return "Controller" - } - return "" -} - type ControllerElb struct { Private bool - Subnets []*Subnet -} - -func (c ControllerElb) SubnetLogicalNamePrefix() string { - if c.Private == true { - return "Controller" - } - return "" + Subnets []Subnet } diff --git a/model/etcd.go b/model/etcd.go index c1a5e173b..3f7654faf 100644 --- a/model/etcd.go +++ b/model/etcd.go @@ -1,20 +1,9 @@ package model type Etcd struct { - Subnets []*Subnet `yaml:"subnets,omitempty"` -} - -func (c Etcd) TopologyPrivate() bool { - return len(c.Subnets) > 0 + Subnets []Subnet `yaml:"subnets,omitempty"` } type EtcdInstance struct { Subnet Subnet } - -func (c EtcdInstance) SubnetLogicalNamePrefix() string { - if c.Subnet.TopLevel == false { - return "Etcd" - } - return "" -} diff --git a/model/identifier.go b/model/identifier.go index 8b5379070..e345eacaa 100644 --- a/model/identifier.go +++ b/model/identifier.go @@ -1 +1,24 @@ package model + +import ( + "fmt" +) + +type Identifier struct { + ID string `yaml:"id,omitempty"` + IDFromStackOutput string `yaml:"idFromStackOutput,omitempty"` +} + +func (i Identifier) HasIdentifier() bool { + return i.ID != "" || i.IDFromStackOutput != "" +} + +func (i Identifier) Ref(logicalName string) string { + if i.IDFromStackOutput != "" { + return fmt.Sprintf(`{ "ImportValue" : %q }`, i.IDFromStackOutput) + } else if i.ID != "" { + return fmt.Sprintf(`"%s"`, i.ID) + } else { + return fmt.Sprintf(`{ "Ref" : %q }`, logicalName) + } +} diff --git a/model/nat_gateway.go b/model/nat_gateway.go new file mode 100644 index 000000000..215701b76 --- /dev/null +++ b/model/nat_gateway.go @@ -0,0 +1,73 @@ +package model + +import "fmt" + +type NATGatewayConfig struct { + Identifier `yaml:",inline"` + EIPAllocationID string `yaml:"eipAllocationId,omitempty"` +} + +type NATGateway interface { + LogicalName() string + ManageNATGateway() bool + ManageEIP() bool + EIPLogicalName() string + EIPAllocationIDRef() string + Ref() string + PublicSubnetRef() string + PrivateSubnetRouteTableRef() string + NATGatewayRouteName() string +} + +type natGatewayImpl struct { + NATGatewayConfig + privateSubnet Subnet + publicSubnet Subnet +} + +func NewNATGateway(c NATGatewayConfig, private Subnet, public Subnet) NATGateway { + return natGatewayImpl{ + NATGatewayConfig: c, + privateSubnet: private, + publicSubnet: public, + } +} + +func (g natGatewayImpl) LogicalName() string { + return fmt.Sprintf("NatGateway%s", g.privateSubnet.AvailabilityZoneLogicalName()) +} + +func (g natGatewayImpl) ManageNATGateway() bool { + return !g.HasIdentifier() +} + +func (g natGatewayImpl) ManageEIP() bool { + return g.EIPAllocationID == "" +} + +func (g natGatewayImpl) EIPLogicalName() string { + return fmt.Sprintf("%sEIP", g.LogicalName()) +} + +func (g natGatewayImpl) EIPAllocationIDRef() string { + if g.ManageEIP() { + return fmt.Sprintf(`{"Fn::GetAtt": ["%s", "AllocationId"]}`, g.EIPLogicalName()) + } + return g.EIPAllocationID +} + +func (g natGatewayImpl) Ref() string { + return g.Identifier.Ref(g.LogicalName()) +} + +func (g natGatewayImpl) PublicSubnetRef() string { + return g.publicSubnet.Ref() +} + +func (g natGatewayImpl) PrivateSubnetRouteTableRef() string { + return g.privateSubnet.RouteTableRef() +} + +func (g natGatewayImpl) NATGatewayRouteName() string { + return g.privateSubnet.NATGatewayRouteName() +} diff --git a/model/subnet.go b/model/subnet.go index e99487e27..7e9e7066a 100644 --- a/model/subnet.go +++ b/model/subnet.go @@ -1,44 +1,140 @@ package model import ( - "fmt" "strings" ) type Subnet struct { - //ID string `yaml:"id,omitempty"` - AvailabilityZone string `yaml:"availabilityZone,omitempty"` - InstanceCIDR string `yaml:"instanceCIDR,omitempty"` - RouteTableID string `yaml:"routeTableId,omitempty"` - ID string `yaml:"id,omitempty"` - CustomName string `yaml:"name"` - NatGateway NatGateway `yaml:"natGateway,omitempty"` - TopLevel bool + Identifier `yaml:",inline"` + CustomName string `yaml:"name,omitempty"` + AvailabilityZone string `yaml:"availabilityZone,omitempty"` + InstanceCIDR string `yaml:"instanceCIDR,omitempty"` + RouteTable RouteTable `yaml:"routeTable,omitempty"` + NATGateway NATGatewayConfig `yaml:"natGateway,omitempty"` + InternetGateway InternetGateway `yaml:"internetGateway,omitempty"` + Private bool } -func (c Subnet) AvailabilityZoneLogicalName() string { - return strings.Replace(strings.Title(c.AvailabilityZone), "-", "", -1) +func NewPublicSubnet(az string, cidr string) Subnet { + return Subnet{ + AvailabilityZone: az, + InstanceCIDR: cidr, + Private: false, + } } -func (c Subnet) LogicalName() string { - if c.TopLevel == true { - if c.CustomName != "" { - return c.CustomName - } - return "Subnet" + c.AvailabilityZoneLogicalName() +func NewPrivateSubnet(az string, cidr string) Subnet { + return Subnet{ + AvailabilityZone: az, + InstanceCIDR: cidr, + Private: true, } - return "PrivateSubnet" + c.AvailabilityZoneLogicalName() } -// Ref returns ID or ref to newly created resource -func (s Subnet) Ref() string { - if s.ID != "" { - return fmt.Sprintf("%q", s.ID) +func NewExistingPrivateSubnet(az string, id string) Subnet { + return Subnet{ + Identifier: Identifier{ + ID: id, + }, + AvailabilityZone: az, + Private: true, + } +} + +func NewImportedPrivateSubnet(az string, name string) Subnet { + return Subnet{ + Identifier: Identifier{ + IDFromStackOutput: name, + }, + AvailabilityZone: az, + Private: true, + } +} + +func NewExistingPublicSubnet(az string, id string) Subnet { + return Subnet{ + Identifier: Identifier{ + ID: id, + }, + AvailabilityZone: az, + Private: false, + } +} + +func NewImportedPublicSubnet(az string, name string) Subnet { + return Subnet{ + Identifier: Identifier{ + IDFromStackOutput: name, + }, + AvailabilityZone: az, + Private: false, + } +} + +func (s *Subnet) Public() bool { + return !s.Private +} + +func (s *Subnet) AvailabilityZoneLogicalName() string { + return strings.Replace(strings.Title(s.AvailabilityZone), "-", "", -1) +} + +func (s *Subnet) MapPublicIPs() bool { + return !s.Private +} + +func (s *Subnet) ResourcePrefix() string { + var t string + if s.Private { + t = "Private" + } else { + t = "Public" } - return fmt.Sprintf(`{"Ref" : "%s"}`, s.LogicalName()) + return t +} + +func (s *Subnet) LogicalName() string { + if s.CustomName != "" { + return s.CustomName + } + return s.ResourcePrefix() + "Subnet" + s.AvailabilityZoneLogicalName() +} + +func (s *Subnet) RouteTableID() string { + return s.RouteTable.ID +} + +func (s *Subnet) ManageRouteTable() bool { + return !s.RouteTable.HasIdentifier() +} + +func (s *Subnet) ManageInternetGateway() bool { + return !s.InternetGateway.HasIdentifier() +} + +func (s *Subnet) NATGatewayRouteName() string { + return s.RouteTableName() + "RouteToNatGateway" +} + +// Ref returns ID or ref to newly created resource +func (s *Subnet) Ref() string { + return s.Identifier.Ref(s.LogicalName()) +} + +// RouteTableName represents the name of the route table to which this subnet is associated. +func (s *Subnet) RouteTableName() string { + return s.ResourcePrefix() + "RouteTable" + s.AvailabilityZoneLogicalName() +} + +func (s *Subnet) RouteTableRef() string { + logicalName := s.RouteTableName() + return s.RouteTable.Ref(logicalName) +} + +type InternetGateway struct { + Identifier `yaml:",inline"` } -type NatGateway struct { - ID string `yaml:"id,omitempty"` - EIPAllocationID string `yaml:"eipAllocationId,omitempty"` +type RouteTable struct { + Identifier `yaml:",inline"` } diff --git a/model/worker.go b/model/worker.go index 2bc91b98b..192f5e2ce 100644 --- a/model/worker.go +++ b/model/worker.go @@ -6,21 +6,7 @@ type Worker struct { AutoScalingGroup `yaml:"autoScalingGroup,omitempty"` ClusterAutoscaler ClusterAutoscaler `yaml:"clusterAutoscaler"` SpotFleet `yaml:"spotFleet,omitempty"` - Subnets []*Subnet `yaml:"subnets,omitempty"` -} - -func (c Worker) TopologyPrivate() bool { - if len(c.Subnets) > 0 { - return !c.Subnets[0].TopLevel - } - return false -} - -func (c Worker) SubnetLogicalNamePrefix() string { - if c.TopologyPrivate() == true { - return "Worker" - } - return "" + Subnets []Subnet `yaml:"subnets,omitempty"` } type ClusterAutoscaler struct { diff --git a/nodepool/config/config.go b/nodepool/config/config.go index 5efe5447c..99e7687f6 100644 --- a/nodepool/config/config.go +++ b/nodepool/config/config.go @@ -146,20 +146,15 @@ func ClusterFromBytes(data []byte, main *cfg.Config) (*ProvidedConfig, error) { // For backward-compatibility if len(c.Subnets) == 0 { - c.Subnets = []*model.Subnet{ - { - AvailabilityZone: c.AvailabilityZone, - InstanceCIDR: c.InstanceCIDR, - }, + c.Subnets = []model.Subnet{ + model.NewPublicSubnet(c.AvailabilityZone, c.InstanceCIDR), } } for i, s := range c.Subnets { if s.CustomName == "" { - s.CustomName = fmt.Sprintf("Subnet%d", i) + c.Subnets[i].CustomName = fmt.Sprintf("Subnet%d", i) } - // Mark top-level subnets appropriately - s.TopLevel = true } c.EtcdInstances = main.EtcdInstances @@ -189,7 +184,7 @@ func (c ProvidedConfig) Config() (*ComputedConfig, error) { } // Populate top-level subnets to model - if len(c.Subnets) > 0 && c.WorkerSettings.TopologyPrivate() == false { + if len(c.Subnets) > 0 && len(c.WorkerSettings.Subnets) == 0 { config.WorkerSettings.Subnets = c.Subnets } diff --git a/nodepool/config/templates/stack-template.json b/nodepool/config/templates/stack-template.json index 4f280f0ee..6741f84c5 100644 --- a/nodepool/config/templates/stack-template.json +++ b/nodepool/config/templates/stack-template.json @@ -98,9 +98,7 @@ {"GroupId":{{$sgRef}}} {{end}} ], - "SubnetId": { - "Ref": "{{$.Worker.SubnetLogicalNamePrefix}}{{$workerSubnet.LogicalName}}" - }, + "SubnetId": {{$workerSubnet.Ref}}, "UserData": {{template "UserData" $}} } {{end}} @@ -152,11 +150,9 @@ ], {{end}} "VPCZoneIdentifier": [ - {{range $index, $workerSubnet := .Worker.Subnets}} + {{range $index, $subnet := .Worker.Subnets}} {{if gt $index 0}},{{end}} - { - "Ref": "{{$.Worker.SubnetLogicalNamePrefix}}{{$workerSubnet.LogicalName}}" - } + {{$subnet.Ref}} {{end}} ] }, @@ -356,31 +352,17 @@ "Type": "AWS::IAM::Role" } - {{if $.ElasticFileSystemID}} - {{range $index, $subnet := .Worker.Subnets}} - , - "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}MountTarget": { - "Properties" : { - "FileSystemId": "{{$.ElasticFileSystemID}}", - "SubnetId": { "Ref": "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" }, - "SecurityGroups": [ { "Ref": "SecurityGroupMountTarget" } ] - }, - "Type" : "AWS::EFS::MountTarget" - } - {{end}} - {{end}} - - {{range $index, $subnet := .Worker.Subnets}} + {{range $index, $subnet := .Subnets}} , - "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}": { + "{{$subnet.LogicalName}}": { "Properties": { "AvailabilityZone": "{{$subnet.AvailabilityZone}}", "CidrBlock": "{{$subnet.InstanceCIDR}}", - "MapPublicIpOnLaunch": {{not $.Worker.TopologyPrivate}}, + "MapPublicIpOnLaunch": {{$subnet.MapPublicIPs}}, "Tags": [ { "Key": "Name", - "Value": "{{$.ClusterName}}-{{$.NodePoolName}}-{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" + "Value": "{{$.ClusterName}}-{{$.NodePoolName}}-{{$subnet.LogicalName}}" }, { "Key": "KubernetesCluster", @@ -391,24 +373,33 @@ }, "Type": "AWS::EC2::Subnet" }, - "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}RouteTableAssociation": { + "{{$subnet.LogicalName}}RouteTableAssociation": { "Properties": { "RouteTableId": {{if not $subnet.RouteTableID}} - {{if $.WorkerTopologyPrivate}} + {{if $subnet.Private}} {"Fn::ImportValue" : {"Fn::Sub" : "{{$.ClusterName}}-PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}"}}, {{else}} - {"Fn::ImportValue" : {"Fn::Sub" : "{{$.ClusterName}}-PublicRouteTable"}}, + {"Fn::ImportValue" : {"Fn::Sub" : "{{$.ClusterName}}-PublicRouteTable{{$subnet.AvailabilityZoneLogicalName}}"}}, {{end}} {{else}} "{{$subnet.RouteTableID}}", {{end}} - "SubnetId": { - "Ref": "{{$.Worker.SubnetLogicalNamePrefix}}{{$subnet.LogicalName}}" - } + "SubnetId": {{$subnet.Ref}} }, "Type": "AWS::EC2::SubnetRouteTableAssociation" } + {{if $.ElasticFileSystemID}} + , + "{{$subnet.LogicalName}}MountTarget": { + "Properties" : { + "FileSystemId": "{{$.ElasticFileSystemID}}", + "SubnetId": {{$subnet.Ref}}, + "SecurityGroups": [ { "Ref": "SecurityGroupMountTarget" } ] + }, + "Type" : "AWS::EFS::MountTarget" + } + {{end}} {{end}} } } diff --git a/test/integration/maincluster_test.go b/test/integration/maincluster_test.go index b07d71bce..a532d01ad 100644 --- a/test/integration/maincluster_test.go +++ b/test/integration/maincluster_test.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/coreos/kube-aws/cluster" "github.com/coreos/kube-aws/config" + "github.com/coreos/kube-aws/model" "github.com/coreos/kube-aws/test/helper" "os" "reflect" @@ -16,7 +17,14 @@ type ConfigTester func(c *config.Cluster, t *testing.T) // Integration testing with real AWS services including S3, KMS, CloudFormation func TestMainClusterConfig(t *testing.T) { hasDefaultEtcdSettings := func(c *config.Cluster, t *testing.T) { + subnet1 := model.NewPublicSubnet("us-west-1c", "10.0.0.0/24") + subnet1.CustomName = "Subnet0" expected := config.EtcdSettings{ + Etcd: model.Etcd{ + Subnets: []model.Subnet{ + subnet1, + }, + }, EtcdCount: 1, EtcdInstanceType: "t2.medium", EtcdRootVolumeSize: 30, @@ -187,6 +195,403 @@ experimental: hasDefaultExperimentalFeatures, }, }, + { + context: "WithNetworkTopologyExplicitSubnets", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +routeTableId: rtb-1a2b3c4d +subnets: +- name: private1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.1.0/24" + private: true +- name: private2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.2.0/24" + private: true +- name: public1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.3.0/24" +- name: public2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.4.0/24" +controller: + subnets: + - name: private1 + - name: private2 + loadBalancer: + subnets: + - name: public1 + - name: public2 + private: false +etcd: + subnets: + - name: private1 + - name: private2 +worker: + subnets: + - name: public1 + - name: public2 +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") + private1.CustomName = "private1" + + private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") + private2.CustomName = "private2" + + public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") + public1.CustomName = "public1" + + public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") + public2.CustomName = "public2" + + subnets := []model.Subnet{ + private1, + private2, + public1, + public2, + } + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) + } + + publicSubnets := []model.Subnet{ + public1, + public2, + } + if !reflect.DeepEqual(c.Worker.Subnets, publicSubnets) { + t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) + } + + privateSubnets := []model.Subnet{ + private1, + private2, + } + if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) { + t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) + } + }, + }, + }, + { + context: "WithNetworkTopologyImplicitSubnets", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +routeTableId: rtb-1a2b3c4d +subnets: +- name: private1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.1.0/24" + private: true +- name: private2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.2.0/24" + private: true +- name: public1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.3.0/24" +- name: public2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.4.0/24" +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") + private1.CustomName = "private1" + + private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") + private2.CustomName = "private2" + + public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") + public1.CustomName = "public1" + + public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") + public2.CustomName = "public2" + + subnets := []model.Subnet{ + private1, + private2, + public1, + public2, + } + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) + } + + publicSubnets := []model.Subnet{ + public1, + public2, + } + if !reflect.DeepEqual(c.Worker.Subnets, publicSubnets) { + t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) + } + + if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) { + t.Errorf("Controller subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, publicSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", publicSubnets, c.Etcd.Subnets) + } + }, + }, + }, + { + context: "WithNetworkTopologyControllerPrivateLB", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +routeTableId: rtb-1a2b3c4d +subnets: +- name: private1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.1.0/24" + private: true +- name: private2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.2.0/24" + private: true +- name: public1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.3.0/24" +- name: public2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.4.0/24" +controller: + subnets: + - name: private1 + - name: private2 + loadBalancer: + private: true +etcd: + subnets: + - name: private1 + - name: private2 +worker: + subnets: + - name: public1 + - name: public2 +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") + private1.CustomName = "private1" + + private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") + private2.CustomName = "private2" + + public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") + public1.CustomName = "public1" + + public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") + public2.CustomName = "public2" + + subnets := []model.Subnet{ + private1, + private2, + public1, + public2, + } + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) + } + + publicSubnets := []model.Subnet{ + public1, + public2, + } + if !reflect.DeepEqual(c.Worker.Subnets, publicSubnets) { + t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) + } + + privateSubnets := []model.Subnet{ + private1, + private2, + } + if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) { + t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, privateSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) + } + }, + }, + }, + { + context: "WithNetworkTopologyControllerPublicLB", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +routeTableId: rtb-1a2b3c4d +subnets: +- name: private1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.1.0/24" + private: true +- name: private2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.2.0/24" + private: true +- name: public1 + availabilityZone: us-west-1a + instanceCIDR: "10.0.3.0/24" +- name: public2 + availabilityZone: us-west-1b + instanceCIDR: "10.0.4.0/24" +controller: + loadBalancer: + private: false +etcd: + subnets: + - name: private1 + - name: private2 +worker: + subnets: + - name: public1 + - name: public2 +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") + private1.CustomName = "private1" + + private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") + private2.CustomName = "private2" + + public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") + public1.CustomName = "public1" + + public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") + public2.CustomName = "public2" + + subnets := []model.Subnet{ + private1, + private2, + public1, + public2, + } + publicSubnets := []model.Subnet{ + public1, + public2, + } + privateSubnets := []model.Subnet{ + private1, + private2, + } + + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) + } + if !reflect.DeepEqual(c.Worker.Subnets, publicSubnets) { + t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) + } + if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) { + t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) + } + }, + }, + }, + { + context: "WithNetworkTopologyExistingSubnets", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +routeTableId: rtb-1a2b3c4d +subnets: +- name: private1 + availabilityZone: us-west-1a + id: subnet-1 + private: true +- name: private2 + availabilityZone: us-west-1b + idFromStackOutput: mycluster-private-subnet-1 + private: true +- name: public1 + availabilityZone: us-west-1a + id: subnet-2 +- name: public2 + availabilityZone: us-west-1b + idFromStackOutput: mycluster-public-subnet-1 +controller: + loadBalancer: + private: false +etcd: + subnets: + - name: private1 + - name: private2 +worker: + subnets: + - name: public1 + - name: public2 +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewExistingPrivateSubnet("us-west-1a", "subnet-1") + private1.CustomName = "private1" + + private2 := model.NewImportedPrivateSubnet("us-west-1b", "mycluster-private-subnet-1") + private2.CustomName = "private2" + + public1 := model.NewExistingPublicSubnet("us-west-1a", "subnet-2") + public1.CustomName = "public1" + + public2 := model.NewImportedPublicSubnet("us-west-1b", "mycluster-public-subnet-1") + public2.CustomName = "public2" + + subnets := []model.Subnet{ + private1, + private2, + public1, + public2, + } + publicSubnets := []model.Subnet{ + public1, + public2, + } + privateSubnets := []model.Subnet{ + private1, + private2, + } + + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) + } + if !reflect.DeepEqual(c.Worker.Subnets, publicSubnets) { + t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) + } + if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) { + t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) + } + }, + }, + }, { context: "WithVpcIdSpecified", configYaml: minimalValidConfigYaml + ` @@ -313,7 +718,15 @@ etcdDataVolumeIOPS: 104 assertConfig: []ConfigTester{ hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { + subnet1 := model.NewPublicSubnet("us-west-1c", "10.0.0.0/24") + subnet1.CustomName = "Subnet0" + subnets := []model.Subnet{ + subnet1, + } expected := config.EtcdSettings{ + Etcd: model.Etcd{ + Subnets: subnets, + }, EtcdCount: 2, EtcdInstanceType: "t2.medium", EtcdRootVolumeSize: 101, From 242783d86977e6e53ae40062256e59a0880c2be3 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Mon, 30 Jan 2017 16:59:32 +0900 Subject: [PATCH 2/6] feat: Re-add support for the deployment to an existing subnet with a preconfigured NAT gateway See https://github.com/coreos/kube-aws/pull/284#issuecomment-275954527 for more context --- config/config.go | 141 ++++++++++++++++----------- config/templates/stack-template.json | 10 +- model/etcd.go | 37 ++++++- model/nat_gateway.go | 53 ++++++++-- model/subnet.go | 22 +++++ test/integration/maincluster_test.go | 132 +++++++++++++++++++++++++ 6 files changed, 324 insertions(+), 71 deletions(-) diff --git a/config/config.go b/config/config.go index 901e871f5..4419abd5d 100644 --- a/config/config.go +++ b/config/config.go @@ -539,8 +539,18 @@ func (c Cluster) Config() (*Config, error) { subnetIndex := etcdIndex % len(config.Etcd.Subnets) subnet := config.Etcd.Subnets[subnetIndex] - instance := model.EtcdInstance{ - Subnet: subnet, + var instance model.EtcdInstance + + if subnet.Private { + ngw, err := c.FindNATGatewayForPrivateSubnet(subnet) + + if err != nil { + return nil, fmt.Errorf("failed getting the NAT gateway for the subnet %s in %v: %v", subnet.LogicalName(), c.NATGateways(), err) + } + + instance = model.NewPrivateEtcdInstance(subnet, *ngw) + } else { + instance = model.NewPublicEtcdInstance(subnet) } config.EtcdInstances[etcdIndex] = instance @@ -564,62 +574,6 @@ func (c Cluster) Config() (*Config, error) { return &config, nil } -func (c *Cluster) FindSubnetMatching(condition model.Subnet) model.Subnet { - for _, s := range c.Subnets { - if s.CustomName == condition.CustomName { - return s - } - } - out := "" - for _, s := range c.Subnets { - out = fmt.Sprintf("%s%+v ", out, s) - } - panic(fmt.Errorf("No subnet matching %v found in %s", condition, out)) -} - -func (c *Cluster) PrivateSubnets() []model.Subnet { - result := []model.Subnet{} - for _, s := range c.Subnets { - if s.Private { - result = append(result, s) - } - } - return result -} - -func (c *Cluster) PublicSubnets() []model.Subnet { - result := []model.Subnet{} - for _, s := range c.Subnets { - if !s.Private { - result = append(result, s) - } - } - return result -} - -func (c *Cluster) NATGateways() []model.NATGateway { - ngws := []model.NATGateway{} - for _, privateSubnet := range c.PrivateSubnets() { - var publicSubnet model.Subnet - config := privateSubnet.NATGateway - if !config.HasIdentifier() { - found := false - for _, s := range c.PublicSubnets() { - if s.AvailabilityZone == privateSubnet.AvailabilityZone { - publicSubnet = s - found = true - } - } - if !found { - panic(fmt.Sprintf("No public subnet found for a NAT gateway associated to private subnet %s", privateSubnet.LogicalName())) - } - } - ngw := model.NewNATGateway(config, privateSubnet, publicSubnet) - ngws = append(ngws, ngw) - } - return ngws -} - // releaseVersionIsGreaterThan will return true if the supplied version is greater then // or equal to the current CoreOS release indicated by the given release // channel. @@ -943,6 +897,12 @@ func (c DeploymentSettings) Valid() (*DeploymentValidationResult, error) { return nil, err } + for i, ngw := range c.NATGateways() { + if err := ngw.Validate(); err != nil { + return nil, fmt.Errorf("NGW %d is not valid: %v", i, err) + } + } + return &DeploymentValidationResult{vpcNet: vpcNet}, nil } @@ -951,6 +911,71 @@ func (s DeploymentSettings) AllSubnets() []model.Subnet { return subnets } +func (c DeploymentSettings) FindSubnetMatching(condition model.Subnet) model.Subnet { + for _, s := range c.Subnets { + if s.CustomName == condition.CustomName { + return s + } + } + out := "" + for _, s := range c.Subnets { + out = fmt.Sprintf("%s%+v ", out, s) + } + panic(fmt.Errorf("No subnet matching %v found in %s", condition, out)) +} + +func (c DeploymentSettings) PrivateSubnets() []model.Subnet { + result := []model.Subnet{} + for _, s := range c.Subnets { + if s.Private { + result = append(result, s) + } + } + return result +} + +func (c DeploymentSettings) PublicSubnets() []model.Subnet { + result := []model.Subnet{} + for _, s := range c.Subnets { + if !s.Private { + result = append(result, s) + } + } + return result +} + +func (c DeploymentSettings) FindNATGatewayForPrivateSubnet(s model.Subnet) (*model.NATGateway, error) { + for _, ngw := range c.NATGateways() { + if ngw.IsConnectedToPrivateSubnet(s) { + return &ngw, nil + } + } + return nil, fmt.Errorf("No NATGateway found for the subnet %v", s) +} + +func (c DeploymentSettings) NATGateways() []model.NATGateway { + ngws := []model.NATGateway{} + for _, privateSubnet := range c.PrivateSubnets() { + var publicSubnet model.Subnet + ngwConfig := privateSubnet.NATGateway + if !ngwConfig.Preconfigured { + found := false + for _, s := range c.PublicSubnets() { + if s.AvailabilityZone == privateSubnet.AvailabilityZone { + publicSubnet = s + found = true + } + } + if !found { + panic(fmt.Sprintf("No appropriate public subnet found for a non-preconfigured NAT gateway associated to private subnet %s", privateSubnet.LogicalName())) + } + } + ngw := model.NewNATGateway(ngwConfig, privateSubnet, publicSubnet) + ngws = append(ngws, ngw) + } + return ngws +} + func (c WorkerSettings) Valid() error { if c.WorkerRootVolumeType == "io1" { if c.WorkerRootVolumeIOPS < 100 || c.WorkerRootVolumeIOPS > 2000 { diff --git a/config/templates/stack-template.json b/config/templates/stack-template.json index ee8f7e362..562f344bd 100644 --- a/config/templates/stack-template.json +++ b/config/templates/stack-template.json @@ -509,7 +509,7 @@ {{range $etcdIndex, $etcdInstance := .EtcdInstances}} "InstanceEtcd{{$etcdIndex}}eni": { "Properties": { - "SubnetId": {{$etcdInstance.Subnet.Ref}}, + "SubnetId": {{$etcdInstance.SubnetRef}}, "GroupSet": [ { "Ref": "SecurityGroupEtcd" @@ -571,8 +571,8 @@ "Tenancy": "{{$.EtcdTenancy}}", "UserData": { "Fn::FindInMap" : [ "EtcdInstanceParams", "UserData", "cloudconfig"] } }, - {{if $etcdInstance.Subnet.Private}} - "DependsOn": ["{{$etcdInstance.Subnet.NATGatewayRouteName}}"], + {{if $etcdInstance.DependencyRef}} + "DependsOn": [{{$etcdInstance.DependencyRef}}], {{end}} "Type": "AWS::EC2::Instance" }, @@ -1254,8 +1254,9 @@ }, "Type": "AWS::EC2::NatGateway" } - , {{end}} + {{if $ngw.ManageRoute}} + , "{{$ngw.NATGatewayRouteName}}": { "Properties": { "DestinationCidrBlock": "0.0.0.0/0", @@ -1265,6 +1266,7 @@ "Type": "AWS::EC2::Route" } {{end}} + {{end}} {{if not .InternetGatewayID}} , diff --git a/model/etcd.go b/model/etcd.go index 3f7654faf..ac5aa9396 100644 --- a/model/etcd.go +++ b/model/etcd.go @@ -1,9 +1,42 @@ package model +import "fmt" + type Etcd struct { Subnets []Subnet `yaml:"subnets,omitempty"` } -type EtcdInstance struct { - Subnet Subnet +type EtcdInstance interface { + SubnetRef() string + DependencyRef() string +} + +type etcdInstanceImpl struct { + subnet Subnet + natGateway NATGateway +} + +func NewPrivateEtcdInstance(s Subnet, ngw NATGateway) EtcdInstance { + return etcdInstanceImpl{ + subnet: s, + natGateway: ngw, + } +} + +func NewPublicEtcdInstance(s Subnet) EtcdInstance { + return etcdInstanceImpl{ + subnet: s, + } +} + +func (i etcdInstanceImpl) SubnetRef() string { + return i.subnet.Ref() +} + +func (i etcdInstanceImpl) DependencyRef() string { + // We have to wait until the route to the NAT gateway if it doesn't exist yet(hence ManageRoute=true) or the etcd node fails due to inability to connect internet + if i.subnet.Private && i.natGateway.ManageRoute() { + return fmt.Sprintf(`"%s"`, i.natGateway.NATGatewayRouteName()) + } + return "" } diff --git a/model/nat_gateway.go b/model/nat_gateway.go index 215701b76..8a35ec867 100644 --- a/model/nat_gateway.go +++ b/model/nat_gateway.go @@ -1,22 +1,28 @@ package model -import "fmt" +import ( + "fmt" +) type NATGatewayConfig struct { Identifier `yaml:",inline"` + Preconfigured bool `yaml:"preconfigured,omitempty"` EIPAllocationID string `yaml:"eipAllocationId,omitempty"` } type NATGateway interface { + EIPAllocationIDRef() string + EIPLogicalName() string + IsConnectedToPrivateSubnet(Subnet) bool LogicalName() string - ManageNATGateway() bool ManageEIP() bool - EIPLogicalName() string - EIPAllocationIDRef() string + ManageNATGateway() bool + ManageRoute() bool + NATGatewayRouteName() string Ref() string - PublicSubnetRef() string PrivateSubnetRouteTableRef() string - NATGatewayRouteName() string + PublicSubnetRef() string + Validate() error } type natGatewayImpl struct { @@ -38,13 +44,17 @@ func (g natGatewayImpl) LogicalName() string { } func (g natGatewayImpl) ManageNATGateway() bool { - return !g.HasIdentifier() + return !g.HasIdentifier() && !g.Preconfigured } func (g natGatewayImpl) ManageEIP() bool { return g.EIPAllocationID == "" } +func (g natGatewayImpl) ManageRoute() bool { + return !g.Preconfigured +} + func (g natGatewayImpl) EIPLogicalName() string { return fmt.Sprintf("%sEIP", g.LogicalName()) } @@ -56,6 +66,10 @@ func (g natGatewayImpl) EIPAllocationIDRef() string { return g.EIPAllocationID } +func (g natGatewayImpl) IsConnectedToPrivateSubnet(s Subnet) bool { + return g.privateSubnet.LogicalName() == s.LogicalName() +} + func (g natGatewayImpl) Ref() string { return g.Identifier.Ref(g.LogicalName()) } @@ -71,3 +85,28 @@ func (g natGatewayImpl) PrivateSubnetRouteTableRef() string { func (g natGatewayImpl) NATGatewayRouteName() string { return g.privateSubnet.NATGatewayRouteName() } + +func (g natGatewayImpl) Validate() error { + if g.Preconfigured { + if !g.privateSubnet.HasIdentifier() { + return fmt.Errorf("an NGW with preconfigured=true must be associated to an existing private subnet: %+v", g) + } + + if g.publicSubnet.Provided() { + return fmt.Errorf("an NGW with preconfigured=true must not be associated to an existing public subnet: %+v", g) + } + + if !g.privateSubnet.RouteTable.HasIdentifier() { + return fmt.Errorf("an NGW with preconfigured=true must have an existing route table provided via routeTable.id or routeTable.idFromStackOutput: %+v", g) + } + + if g.HasIdentifier() { + return fmt.Errorf("an NGW with preconcfigured=true must not have id or idFromStackOutput: %+v", g) + } + + if g.EIPAllocationID != "" { + return fmt.Errorf("an NGW with preconcfigured=true must not have an eipAllocactionID: %+v", g) + } + } + return nil +} diff --git a/model/subnet.go b/model/subnet.go index 7e9e7066a..26cd178be 100644 --- a/model/subnet.go +++ b/model/subnet.go @@ -41,6 +41,24 @@ func NewExistingPrivateSubnet(az string, id string) Subnet { } } +func NewExistingPrivateSubnetWithPreconfiguredNATGateway(az string, id string, rtb string) Subnet { + return Subnet{ + Identifier: Identifier{ + ID: id, + }, + AvailabilityZone: az, + Private: true, + RouteTable: RouteTable{ + Identifier: Identifier{ + ID: rtb, + }, + }, + NATGateway: NATGatewayConfig{ + Preconfigured: true, + }, + } +} + func NewImportedPrivateSubnet(az string, name string) Subnet { return Subnet{ Identifier: Identifier{ @@ -71,6 +89,10 @@ func NewImportedPublicSubnet(az string, name string) Subnet { } } +func (s *Subnet) Provided() bool { + return s.AvailabilityZone != "" +} + func (s *Subnet) Public() bool { return !s.Private } diff --git a/test/integration/maincluster_test.go b/test/integration/maincluster_test.go index a532d01ad..64aee9c34 100644 --- a/test/integration/maincluster_test.go +++ b/test/integration/maincluster_test.go @@ -279,6 +279,12 @@ worker: if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) } + + for i, s := range c.PrivateSubnets() { + if s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) + } + } }, }, }, @@ -345,6 +351,12 @@ subnets: if !reflect.DeepEqual(c.Etcd.Subnets, publicSubnets) { t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", publicSubnets, c.Etcd.Subnets) } + + for i, s := range c.PrivateSubnets() { + if s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) + } + } }, }, }, @@ -429,6 +441,12 @@ worker: if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) } + + for i, s := range c.PrivateSubnets() { + if s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) + } + } }, }, }, @@ -509,6 +527,114 @@ worker: if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) } + + for i, s := range c.PrivateSubnets() { + if s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) + } + } + }, + }, + }, + // See https://github.com/coreos/kube-aws/pull/284#issuecomment-275955785 + { + context: "WithNetworkTopologyExistingPrivateSubnetsWithNonAWSNATGateway", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +# routeTableId must be omitted +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-275962332 +# routeTableId: rtb-1a2b3c4d +subnets: +- name: private1 + availabilityZone: us-west-1a + id: subnet-1 + private: true + natGateway: + preconfigured: true + # this, in combination with "natGateway.preconfigured=true", implies that the route table already has a route to an existing NAT gateway + routeTable: + id: routetable-withpreconfigurednat1a +- name: private2 + availabilityZone: us-west-1b + id: subnet-2 + private: true + natGateway: + preconfigured: true + # this, in combination with "natGateway.preconfigured=true", implies that the route table already has a route to an existing NAT gateway + routeTable: + id: routetable-withpreconfigurednat1b +- name: public1 + availabilityZone: us-west-1a + id: subnet-3 +- name: public2 + availabilityZone: us-west-1b + id: subnet-4 +controller: + subnets: + - name: private1 + - name: private2 + loadBalancer: + private: false +etcd: + subnets: + - name: private1 + - name: private2 +worker: + subnets: + - name: private1 + - name: private2 +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewExistingPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "subnet-1", "routetable-withpreconfigurednat1a") + private1.CustomName = "private1" + + private2 := model.NewExistingPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "subnet-2", "routetable-withpreconfigurednat1b") + private2.CustomName = "private2" + + public1 := model.NewExistingPublicSubnet("us-west-1a", "subnet-3") + public1.CustomName = "public1" + + public2 := model.NewExistingPublicSubnet("us-west-1b", "subnet-4") + public2.CustomName = "public2" + + subnets := []model.Subnet{ + private1, + private2, + public1, + public2, + } + publicSubnets := []model.Subnet{ + public1, + public2, + } + privateSubnets := []model.Subnet{ + private1, + private2, + } + + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) + } + if !reflect.DeepEqual(c.Worker.Subnets, privateSubnets) { + t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) + } + if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) { + t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) + } + + for i, s := range c.PrivateSubnets() { + if !s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d is externally managed and shouldn't created by kube-aws", i) + } + } }, }, }, @@ -589,6 +715,12 @@ worker: if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) } + + for i, s := range c.PrivateSubnets() { + if s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) + } + } }, }, }, From 80885cbe58f6d25e03bf4e514749a84decfe31d6 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Tue, 31 Jan 2017 12:08:21 +0900 Subject: [PATCH 3/6] Re-add the support for existing use-cases of combining vpcId and mapPublicIPs, routeTableId to let kube-aws create all the nodes and the api load-balancer inside either private or public subnets --- config/config.go | 66 ++++++- config/templates/stack-template.json | 18 +- model/internet_gateway.go | 10 ++ model/subnet.go | 40 ++++- test/integration/maincluster_test.go | 246 ++++++++++++++++++++++++++- 5 files changed, 345 insertions(+), 35 deletions(-) create mode 100644 model/internet_gateway.go diff --git a/config/config.go b/config/config.go index 4419abd5d..a60fa2d23 100644 --- a/config/config.go +++ b/config/config.go @@ -181,10 +181,41 @@ func (c *Cluster) SetDefaults() { } } + privateTopologyImplied := c.RouteTableID != "" && !c.MapPublicIPs + publicTopologyImplied := c.RouteTableID != "" && c.MapPublicIPs + for i, s := range c.Subnets { if s.CustomName == "" { c.Subnets[i].CustomName = fmt.Sprintf("Subnet%d", i) } + + // DEPRECATED AND REMOVED IN THE FUTURE + // See https://github.com/coreos/kube-aws/pull/284#issuecomment-275998862 + // + // This implies a deployment to an existing VPC with a route table with a preconfigured Internet Gateway + // and all the subnets created by kube-aws are public + if publicTopologyImplied { + c.Subnets[i].InternetGateway.Preconfigured = true + c.Subnets[i].RouteTable.ID = c.RouteTableID + if s.Private { + panic(fmt.Sprintf("mapPublicIPs(=%v) and subnets[%d].private(=%v) conflicts: %+v", c.MapPublicIPs, i, s.Private, s)) + } + c.Subnets[i].Private = false + } + + // DEPRECATED AND REMOVED IN THE FUTURE + // See https://github.com/coreos/kube-aws/pull/284#issuecomment-275998862 + // + // This implies a deployment to an existing VPC with a route table with a preconfigured NAT Gateway + // and all the subnets created by kube-aws are private + if privateTopologyImplied { + c.Subnets[i].NATGateway.Preconfigured = true + c.Subnets[i].RouteTable.ID = c.RouteTableID + if s.Private { + panic(fmt.Sprintf("mapPublicIPs(=%v) and subnets[%d].private(=%v) conflicts. You don't need to set true to both of them. If you want to make all the subnets private, make mapPublicIPs false. If you want to make only part of subnets private, make subnets[].private true accordingly: %+v", c.MapPublicIPs, i, s.Private, s)) + } + c.Subnets[i].Private = true + } } for i, s := range c.Worker.Subnets { @@ -208,23 +239,36 @@ func (c *Cluster) SetDefaults() { } if len(c.Worker.Subnets) == 0 { - c.Worker.Subnets = c.PublicSubnets() + if privateTopologyImplied { + c.Worker.Subnets = c.PrivateSubnets() + } else { + c.Worker.Subnets = c.PublicSubnets() + } } if len(c.Controller.Subnets) == 0 { - c.Controller.Subnets = c.PublicSubnets() + if privateTopologyImplied { + c.Controller.Subnets = c.PrivateSubnets() + } else { + c.Controller.Subnets = c.PublicSubnets() + } } if len(c.Controller.LoadBalancer.Subnets) == 0 { - if c.Controller.LoadBalancer.Private == true { + if c.Controller.LoadBalancer.Private || privateTopologyImplied { c.Controller.LoadBalancer.Subnets = c.PrivateSubnets() + c.Controller.LoadBalancer.Private = true } else { c.Controller.LoadBalancer.Subnets = c.PublicSubnets() } } if len(c.Etcd.Subnets) == 0 { - c.Etcd.Subnets = c.PublicSubnets() + if privateTopologyImplied { + c.Etcd.Subnets = c.PrivateSubnets() + } else { + c.Etcd.Subnets = c.PublicSubnets() + } } } @@ -863,6 +907,9 @@ func (c DeploymentSettings) Valid() (*DeploymentValidationResult, error) { var instanceCIDRs = make([]*net.IPNet, 0) + allPrivate := true + allPublic := true + for i, subnet := range c.Subnets { if subnet.ID != "" || subnet.IDFromStackOutput != "" { continue @@ -882,6 +929,17 @@ func (c DeploymentSettings) Valid() (*DeploymentValidationResult, error) { i, ) } + + if subnet.RouteTableID() != "" && c.RouteTableID != "" { + return nil, fmt.Errorf("either subnets[].routeTable.id(%s) or routeTableId(%s) but not both can be specified", subnet.RouteTableID(), c.RouteTableID) + } + + allPrivate = allPrivate && subnet.Private + allPublic = allPublic && subnet.Public() + } + + if c.RouteTableID != "" && !allPublic && !allPrivate { + return nil, fmt.Errorf("network topology including both private and public subnets specified while the single route table(%s) is also specified. You must differentiate the route table at least between private and public subnets. Use subets[].routeTable.id instead of routeTableId for that.", c.RouteTableID) } for i, a := range instanceCIDRs { diff --git a/config/templates/stack-template.json b/config/templates/stack-template.json index 562f344bd..eb9c600fc 100644 --- a/config/templates/stack-template.json +++ b/config/templates/stack-template.json @@ -1206,24 +1206,8 @@ }, "Type": "AWS::EC2::RouteTable" } - {{if $subnet.Public}} + {{if $subnet.ManageRouteToInternet}} , - "{{$subnet.RouteTableName}}": { - "Properties": { - "Tags": [ - { - "Key": "Name", - "Value": "{{$.ClusterName}}-{{$subnet.RouteTableName}}" - }, - { - "Key": "KubernetesCluster", - "Value": "{{$.ClusterName}}" - } - ], - "VpcId": {{$.VPCRef}} - }, - "Type": "AWS::EC2::RouteTable" - }, "{{$subnet.RouteTableName}}ToInternet": { "Properties": { "DestinationCidrBlock": "0.0.0.0/0", diff --git a/model/internet_gateway.go b/model/internet_gateway.go new file mode 100644 index 000000000..64d06e359 --- /dev/null +++ b/model/internet_gateway.go @@ -0,0 +1,10 @@ +package model + +type InternetGateway struct { + Identifier `yaml:",inline"` + Preconfigured bool `yaml:"preconfigured,omitempty"` +} + +func (g InternetGateway) ManageInternetGateway() bool { + return !g.HasIdentifier() +} diff --git a/model/subnet.go b/model/subnet.go index 26cd178be..ddd119576 100644 --- a/model/subnet.go +++ b/model/subnet.go @@ -59,6 +59,38 @@ func NewExistingPrivateSubnetWithPreconfiguredNATGateway(az string, id string, r } } +func NewPublicSubnetWithPreconfiguredInternetGateway(az string, cidr string, rtb string) Subnet { + return Subnet{ + AvailabilityZone: az, + InstanceCIDR: cidr, + Private: false, + RouteTable: RouteTable{ + Identifier: Identifier{ + ID: rtb, + }, + }, + InternetGateway: InternetGateway{ + Preconfigured: true, + }, + } +} + +func NewPrivateSubnetWithPreconfiguredNATGateway(az string, cidr string, rtb string) Subnet { + return Subnet{ + AvailabilityZone: az, + InstanceCIDR: cidr, + Private: true, + RouteTable: RouteTable{ + Identifier: Identifier{ + ID: rtb, + }, + }, + NATGateway: NATGatewayConfig{ + Preconfigured: true, + }, + } +} + func NewImportedPrivateSubnet(az string, name string) Subnet { return Subnet{ Identifier: Identifier{ @@ -130,8 +162,8 @@ func (s *Subnet) ManageRouteTable() bool { return !s.RouteTable.HasIdentifier() } -func (s *Subnet) ManageInternetGateway() bool { - return !s.InternetGateway.HasIdentifier() +func (s *Subnet) ManageRouteToInternet() bool { + return s.Public() && !s.InternetGateway.Preconfigured } func (s *Subnet) NATGatewayRouteName() string { @@ -153,10 +185,6 @@ func (s *Subnet) RouteTableRef() string { return s.RouteTable.Ref(logicalName) } -type InternetGateway struct { - Identifier `yaml:",inline"` -} - type RouteTable struct { Identifier `yaml:",inline"` } diff --git a/test/integration/maincluster_test.go b/test/integration/maincluster_test.go index 64aee9c34..c0e250c67 100644 --- a/test/integration/maincluster_test.go +++ b/test/integration/maincluster_test.go @@ -85,6 +85,14 @@ func TestMainClusterConfig(t *testing.T) { } } + everyPublicSubnetHasRouteToIGW := func(c *config.Cluster, t *testing.T) { + for i, s := range c.PublicSubnets() { + if !s.ManageRouteToInternet() { + t.Errorf("Public subnet %d should have a route to the IGW but it doesn't: %+v", i, s) + } + } + } + kubeAwsSettings := newKubeAwsSettingsFromEnv(t) minimalValidConfigYaml := kubeAwsSettings.mainClusterYaml + ` @@ -196,10 +204,172 @@ experimental: }, }, { - context: "WithNetworkTopologyExplicitSubnets", + context: "WithNetworkTopologyAllPreconfiguredPrivateDeprecated", configYaml: kubeAwsSettings.mainClusterYaml + ` vpcId: vpc-1a2b3c4d +# This, in combination with mapPublicIPs=false, implies that the route table contains a route to a preconfigured NAT gateway +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-276008202 routeTableId: rtb-1a2b3c4d +# This means that all the subnets created by kube-aws should be private +mapPublicIPs: false +# This can't be false because kube-aws won't create public subbnets which are required by an external lb when mapPublicIPs=false +controllerLoadBalancerPrivate: true +subnets: +- availabilityZone: us-west-1a + instanceCIDR: "10.0.1.0/24" + # implies + # private: true + # natGateway: + # preconfigured: true + # routeTable + # id: rtb-1a2b3c4d +- availabilityZone: us-west-1b + instanceCIDR: "10.0.2.0/24" + # implies + # private: true + # natGateway: + # preconfigured: true + # routeTable + # id: rtb-1a2b3c4d +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "10.0.1.0/24", "rtb-1a2b3c4d") + private1.CustomName = "Subnet0" + + private2 := model.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "10.0.2.0/24", "rtb-1a2b3c4d") + private2.CustomName = "Subnet1" + + subnets := []model.Subnet{ + private1, + private2, + } + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%+v actual=%+v", subnets, c.AllSubnets()) + } + + privateSubnets := []model.Subnet{ + private1, + private2, + } + if !reflect.DeepEqual(c.Worker.Subnets, privateSubnets) { + t.Errorf("Worker subnets didn't match: expected=%+v actual=%+v", subnets, c.Worker.Subnets) + } + if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) { + t.Errorf("Controller subnets didn't match: expected=%+v actual=%+v", privateSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, privateSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%+v actual=%+v", privateSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%+v actual=%+v", privateSubnets, c.Etcd.Subnets) + } + + for i, s := range c.PrivateSubnets() { + if !s.NATGateway.Preconfigured { + t.Errorf("NAT gateway for the private subnet #%d is externally managed and shouldn't created by kube-aws", i) + } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %+v", s) + } + } + + if len(c.PublicSubnets()) != 0 { + t.Errorf("Number of public subnets should be zero but it wasn't: %d", len(c.PublicSubnets())) + } + }, + }, + }, + { + context: "WithNetworkTopologyAllPreconfiguredPublicDeprecated", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +# This, in combination with mapPublicIPs=true, implies that the route table contains a route to a preconfigured internet gateway +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-276008202 +routeTableId: rtb-1a2b3c4d +# This means that all the subnets created by kube-aws should be public +mapPublicIPs: true +# This can't be true because kube-aws won't create private subnets which are required by an internal lb when mapPublicIPs=true +controllerLoadBalancerPrivate: false +# internetGatewayId should be omitted as we assume that the route table specified by routeTableId already contain a route to one +#internetGatewayId: +subnets: +- availabilityZone: us-west-1a + instanceCIDR: "10.0.1.0/24" + # #implies + # private: false + # internetGateway: + # preconfigured: true + # routeTable + # id: rtb-1a2b3c4d +- availabilityZone: us-west-1b + instanceCIDR: "10.0.2.0/24" + # #implies + # private: false + # internetGateway: + # preconfigured: true + # routeTable + # id: rtb-1a2b3c4d +`, + assertConfig: []ConfigTester{ + hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + private1 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1a", "10.0.1.0/24", "rtb-1a2b3c4d") + private1.CustomName = "Subnet0" + + private2 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1b", "10.0.2.0/24", "rtb-1a2b3c4d") + private2.CustomName = "Subnet1" + + subnets := []model.Subnet{ + private1, + private2, + } + if !reflect.DeepEqual(c.AllSubnets(), subnets) { + t.Errorf("Managed subnets didn't match: expected=%+v actual=%+v", subnets, c.AllSubnets()) + } + + publicSubnets := []model.Subnet{ + private1, + private2, + } + if !reflect.DeepEqual(c.Worker.Subnets, publicSubnets) { + t.Errorf("Worker subnets didn't match: expected=%+v actual=%+v", subnets, c.Worker.Subnets) + } + if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) { + t.Errorf("Controller subnets didn't match: expected=%+v actual=%+v", publicSubnets, c.Controller.Subnets) + } + if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { + t.Errorf("Controller loadbalancer subnets didn't match: expected=%+v actual=%+v", publicSubnets, c.Controller.LoadBalancer.Subnets) + } + if !reflect.DeepEqual(c.Etcd.Subnets, publicSubnets) { + t.Errorf("Etcd subnets didn't match: expected=%+v actual=%+v", publicSubnets, c.Etcd.Subnets) + } + + for i, s := range c.PublicSubnets() { + if s.RouteTableID() != "rtb-1a2b3c4d" { + t.Errorf("Subnet %d should be associated to a route table with an IGW preconfigured but it wasn't", i) + } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a public subnet with a preconfigured IGW: %+v", s) + } + } + + if len(c.PrivateSubnets()) != 0 { + t.Errorf("Number of private subnets should be zero but it wasn't: %d", len(c.PrivateSubnets())) + } + }, + }, + }, + { + context: "WithNetworkTopologyExplicitSubnets", + configYaml: kubeAwsSettings.mainClusterYaml + ` +vpcId: vpc-1a2b3c4d +# routeTableId must be omitted +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-275962332 +# routeTableId: rtb-1a2b3c4d subnets: - name: private1 availabilityZone: us-west-1a @@ -235,6 +405,7 @@ worker: `, assertConfig: []ConfigTester{ hasDefaultExperimentalFeatures, + everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") private1.CustomName = "private1" @@ -274,7 +445,7 @@ worker: t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) } if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { - t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) + t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets) } if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) @@ -284,6 +455,10 @@ worker: if s.NATGateway.Preconfigured { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) + } } }, }, @@ -292,7 +467,9 @@ worker: context: "WithNetworkTopologyImplicitSubnets", configYaml: kubeAwsSettings.mainClusterYaml + ` vpcId: vpc-1a2b3c4d -routeTableId: rtb-1a2b3c4d +# routeTableId must be omitted +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-275962332 +# routeTableId: rtb-1a2b3c4d subnets: - name: private1 availabilityZone: us-west-1a @@ -311,6 +488,7 @@ subnets: `, assertConfig: []ConfigTester{ hasDefaultExperimentalFeatures, + everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") private1.CustomName = "private1" @@ -356,6 +534,10 @@ subnets: if s.NATGateway.Preconfigured { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) + } } }, }, @@ -364,7 +546,9 @@ subnets: context: "WithNetworkTopologyControllerPrivateLB", configYaml: kubeAwsSettings.mainClusterYaml + ` vpcId: vpc-1a2b3c4d -routeTableId: rtb-1a2b3c4d +# routeTableId must be omitted +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-275962332 +# routeTableId: rtb-1a2b3c4d subnets: - name: private1 availabilityZone: us-west-1a @@ -397,6 +581,7 @@ worker: `, assertConfig: []ConfigTester{ hasDefaultExperimentalFeatures, + everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") private1.CustomName = "private1" @@ -446,6 +631,10 @@ worker: if s.NATGateway.Preconfigured { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) + } } }, }, @@ -454,7 +643,9 @@ worker: context: "WithNetworkTopologyControllerPublicLB", configYaml: kubeAwsSettings.mainClusterYaml + ` vpcId: vpc-1a2b3c4d -routeTableId: rtb-1a2b3c4d +# routeTableId must be omitted +# See https://github.com/coreos/kube-aws/pull/284#issuecomment-275962332 +# routeTableId: rtb-1a2b3c4d subnets: - name: private1 availabilityZone: us-west-1a @@ -484,6 +675,7 @@ worker: `, assertConfig: []ConfigTester{ hasDefaultExperimentalFeatures, + everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") private1.CustomName = "private1" @@ -532,6 +724,10 @@ worker: if s.NATGateway.Preconfigured { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) + } } }, }, @@ -634,6 +830,10 @@ worker: if !s.NATGateway.Preconfigured { t.Errorf("NAT gateway for the private subnet #%d is externally managed and shouldn't created by kube-aws", i) } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) + } } }, }, @@ -642,7 +842,6 @@ worker: context: "WithNetworkTopologyExistingSubnets", configYaml: kubeAwsSettings.mainClusterYaml + ` vpcId: vpc-1a2b3c4d -routeTableId: rtb-1a2b3c4d subnets: - name: private1 availabilityZone: us-west-1a @@ -720,6 +919,10 @@ worker: if s.NATGateway.Preconfigured { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } + + if s.ManageRouteToInternet() { + t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) + } } }, }, @@ -741,8 +944,35 @@ vpcId: vpc-1a2b3c4d routeTableId: rtb-1a2b3c4d `, assertConfig: []ConfigTester{ - hasDefaultEtcdSettings, hasDefaultExperimentalFeatures, + func(c *config.Cluster, t *testing.T) { + subnet1 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1c", "10.0.0.0/24", "rtb-1a2b3c4d") + subnet1.CustomName = "Subnet0" + subnets := []model.Subnet{ + subnet1, + } + expected := config.EtcdSettings{ + Etcd: model.Etcd{ + Subnets: subnets, + }, + EtcdCount: 1, + EtcdInstanceType: "t2.medium", + EtcdRootVolumeSize: 30, + EtcdRootVolumeType: "gp2", + EtcdDataVolumeSize: 30, + EtcdDataVolumeType: "gp2", + EtcdDataVolumeEphemeral: false, + EtcdTenancy: "default", + } + actual := c.EtcdSettings + if !reflect.DeepEqual(expected, actual) { + t.Errorf( + "EtcdSettings didn't match: expected=%v actual=%v", + expected, + actual, + ) + } + }, }, }, { @@ -850,7 +1080,7 @@ etcdDataVolumeIOPS: 104 assertConfig: []ConfigTester{ hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { - subnet1 := model.NewPublicSubnet("us-west-1c", "10.0.0.0/24") + subnet1 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1c", "10.0.0.0/24", "rtb-1a2b3c4d") subnet1.CustomName = "Subnet0" subnets := []model.Subnet{ subnet1, From 546865d2e1663924aa866896948e66fd2ba757c8 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Tue, 31 Jan 2017 18:11:59 +0900 Subject: [PATCH 4/6] Drop subnets[].natGateway.preconfigured and subnets[].internetGateway.preconfigured completely and induce these from other settings like before --- config/config.go | 16 ++-- config/templates/stack-template.json | 32 ++++--- e2e/run | 2 + model/etcd.go | 23 +++-- model/identifier.go | 14 +++ model/internet_gateway.go | 3 +- model/nat_gateway.go | 33 +++---- model/subnet.go | 90 ++++++++++++------- test/integration/maincluster_test.go | 128 ++------------------------- 9 files changed, 139 insertions(+), 202 deletions(-) diff --git a/config/config.go b/config/config.go index a60fa2d23..102b434e1 100644 --- a/config/config.go +++ b/config/config.go @@ -195,7 +195,6 @@ func (c *Cluster) SetDefaults() { // This implies a deployment to an existing VPC with a route table with a preconfigured Internet Gateway // and all the subnets created by kube-aws are public if publicTopologyImplied { - c.Subnets[i].InternetGateway.Preconfigured = true c.Subnets[i].RouteTable.ID = c.RouteTableID if s.Private { panic(fmt.Sprintf("mapPublicIPs(=%v) and subnets[%d].private(=%v) conflicts: %+v", c.MapPublicIPs, i, s.Private, s)) @@ -209,7 +208,6 @@ func (c *Cluster) SetDefaults() { // This implies a deployment to an existing VPC with a route table with a preconfigured NAT Gateway // and all the subnets created by kube-aws are private if privateTopologyImplied { - c.Subnets[i].NATGateway.Preconfigured = true c.Subnets[i].RouteTable.ID = c.RouteTableID if s.Private { panic(fmt.Sprintf("mapPublicIPs(=%v) and subnets[%d].private(=%v) conflicts. You don't need to set true to both of them. If you want to make all the subnets private, make mapPublicIPs false. If you want to make only part of subnets private, make subnets[].private true accordingly: %+v", c.MapPublicIPs, i, s.Private, s)) @@ -585,16 +583,16 @@ func (c Cluster) Config() (*Config, error) { var instance model.EtcdInstance - if subnet.Private { + if subnet.ManageNATGateway() { ngw, err := c.FindNATGatewayForPrivateSubnet(subnet) if err != nil { - return nil, fmt.Errorf("failed getting the NAT gateway for the subnet %s in %v: %v", subnet.LogicalName(), c.NATGateways(), err) + return nil, fmt.Errorf("failed getting a NAT gateway for the subnet %s in %v: %v", subnet.LogicalName(), c.NATGateways(), err) } - instance = model.NewPrivateEtcdInstance(subnet, *ngw) + instance = model.NewEtcdInstanceDependsOnNewlyCreatedNGW(subnet, *ngw) } else { - instance = model.NewPublicEtcdInstance(subnet) + instance = model.NewEtcdInstance(subnet) } config.EtcdInstances[etcdIndex] = instance @@ -1016,7 +1014,7 @@ func (c DeploymentSettings) NATGateways() []model.NATGateway { for _, privateSubnet := range c.PrivateSubnets() { var publicSubnet model.Subnet ngwConfig := privateSubnet.NATGateway - if !ngwConfig.Preconfigured { + if privateSubnet.ManageNATGateway() { found := false for _, s := range c.PublicSubnets() { if s.AvailabilityZone == privateSubnet.AvailabilityZone { @@ -1027,9 +1025,9 @@ func (c DeploymentSettings) NATGateways() []model.NATGateway { if !found { panic(fmt.Sprintf("No appropriate public subnet found for a non-preconfigured NAT gateway associated to private subnet %s", privateSubnet.LogicalName())) } + ngw := model.NewNATGateway(ngwConfig, privateSubnet, publicSubnet) + ngws = append(ngws, ngw) } - ngw := model.NewNATGateway(ngwConfig, privateSubnet, publicSubnet) - ngws = append(ngws, ngw) } return ngws } diff --git a/config/templates/stack-template.json b/config/templates/stack-template.json index eb9c600fc..6f4acdd38 100644 --- a/config/templates/stack-template.json +++ b/config/templates/stack-template.json @@ -571,7 +571,7 @@ "Tenancy": "{{$.EtcdTenancy}}", "UserData": { "Fn::FindInMap" : [ "EtcdInstanceParams", "UserData", "cloudconfig"] } }, - {{if $etcdInstance.DependencyRef}} + {{if $etcdInstance.DependencyExists}} "DependsOn": [{{$etcdInstance.DependencyRef}}], {{end}} "Type": "AWS::EC2::Instance" @@ -1148,7 +1148,7 @@ {{end}} {{range $index, $subnet := .Subnets}} - {{if not $subnet.HasIdentifier }} + {{if $subnet.ManageSubnet }} , "{{$subnet.LogicalName}}": { "Properties": { @@ -1169,18 +1169,6 @@ }, "Type": "AWS::EC2::Subnet" } - {{end}} - {{if $.ElasticFileSystemID}} - , - "{{$subnet.LogicalName}}MountTarget": { - "Properties" : { - "FileSystemId": "{{$.ElasticFileSystemID}}", - "SubnetId": {{$subnet.Ref}}, - "SecurityGroups": [ { "Ref": "SecurityGroupMountTarget" } ] - }, - "Type" : "AWS::EFS::MountTarget" - } - {{end}} , "{{$subnet.LogicalName}}RouteTableAssociation": { "Properties": { @@ -1189,6 +1177,7 @@ }, "Type": "AWS::EC2::SubnetRouteTableAssociation" } + {{if $subnet.ManageRouteTable}} , "{{$subnet.RouteTableName}}": { "Properties": { @@ -1206,6 +1195,19 @@ }, "Type": "AWS::EC2::RouteTable" } + {{end}} + {{end}} + {{if $.ElasticFileSystemID}} + , + "{{$subnet.LogicalName}}MountTarget": { + "Properties" : { + "FileSystemId": "{{$.ElasticFileSystemID}}", + "SubnetId": {{$subnet.Ref}}, + "SecurityGroups": [ { "Ref": "SecurityGroupMountTarget" } ] + }, + "Type" : "AWS::EFS::MountTarget" + } + {{end}} {{if $subnet.ManageRouteToInternet}} , "{{$subnet.RouteTableName}}ToInternet": { @@ -1311,12 +1313,14 @@ }, {{end}} {{range $index, $subnet := .Subnets}} + {{if $subnet.ManageRouteTable}} "{{$subnet.RouteTableName}}" : { "Description" : "The route table assigned to the subnet {{$subnet.LogicalName}}", "Value" : {{$subnet.RouteTableRef}}, "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-{{$subnet.RouteTableName}}" }} }, {{end}} + {{end}} "WorkerSecurityGroup" : { "Description" : "The security group assigned to worker nodes", "Value" : { "Ref" : "SecurityGroupWorker" }, diff --git a/e2e/run b/e2e/run index 17aee00ca..02238b2fb 100755 --- a/e2e/run +++ b/e2e/run @@ -107,6 +107,8 @@ configure() { ${KUBE_AWS_CMD} render + ${KUBE_AWS_CMD} up --export --s3-uri ${KUBE_AWS_S3_URI} --pretty-print + ${KUBE_AWS_CMD} validate --s3-uri ${KUBE_AWS_S3_URI} echo Generated configuration files in ${WORK_DIR}: diff --git a/model/etcd.go b/model/etcd.go index ac5aa9396..cb5d2eec6 100644 --- a/model/etcd.go +++ b/model/etcd.go @@ -8,7 +8,8 @@ type Etcd struct { type EtcdInstance interface { SubnetRef() string - DependencyRef() string + DependencyExists() bool + DependencyRef() (string, error) } type etcdInstanceImpl struct { @@ -16,14 +17,14 @@ type etcdInstanceImpl struct { natGateway NATGateway } -func NewPrivateEtcdInstance(s Subnet, ngw NATGateway) EtcdInstance { +func NewEtcdInstanceDependsOnNewlyCreatedNGW(s Subnet, ngw NATGateway) EtcdInstance { return etcdInstanceImpl{ subnet: s, natGateway: ngw, } } -func NewPublicEtcdInstance(s Subnet) EtcdInstance { +func NewEtcdInstance(s Subnet) EtcdInstance { return etcdInstanceImpl{ subnet: s, } @@ -33,10 +34,18 @@ func (i etcdInstanceImpl) SubnetRef() string { return i.subnet.Ref() } -func (i etcdInstanceImpl) DependencyRef() string { +func (i etcdInstanceImpl) DependencyExists() bool { + return i.subnet.Private && i.natGateway != nil && i.natGateway.ManageRoute() +} + +func (i etcdInstanceImpl) DependencyRef() (string, error) { // We have to wait until the route to the NAT gateway if it doesn't exist yet(hence ManageRoute=true) or the etcd node fails due to inability to connect internet - if i.subnet.Private && i.natGateway.ManageRoute() { - return fmt.Sprintf(`"%s"`, i.natGateway.NATGatewayRouteName()) + if i.DependencyExists() { + name, err := i.natGateway.NATGatewayRouteName() + if err != nil { + return "", err + } + return fmt.Sprintf(`"%s"`, name), nil } - return "" + return "", nil } diff --git a/model/identifier.go b/model/identifier.go index e345eacaa..3cba1d3ce 100644 --- a/model/identifier.go +++ b/model/identifier.go @@ -22,3 +22,17 @@ func (i Identifier) Ref(logicalName string) string { return fmt.Sprintf(`{ "Ref" : %q }`, logicalName) } } + +func (i Identifier) IdOrRef(refProvider func() (string, error)) (string, error) { + if i.IDFromStackOutput != "" { + return fmt.Sprintf(`{ "ImportValue" : %q }`, i.IDFromStackOutput), nil + } else if i.ID != "" { + return fmt.Sprintf(`"%s"`, i.ID), nil + } else { + logicalName, err := refProvider() + if err != nil { + return "", fmt.Errorf("failed to get id or ref: %v", err) + } + return fmt.Sprintf(`{ "Ref" : %q }`, logicalName), nil + } +} diff --git a/model/internet_gateway.go b/model/internet_gateway.go index 64d06e359..a9156f7e6 100644 --- a/model/internet_gateway.go +++ b/model/internet_gateway.go @@ -1,8 +1,7 @@ package model type InternetGateway struct { - Identifier `yaml:",inline"` - Preconfigured bool `yaml:"preconfigured,omitempty"` + Identifier `yaml:",inline"` } func (g InternetGateway) ManageInternetGateway() bool { diff --git a/model/nat_gateway.go b/model/nat_gateway.go index 8a35ec867..a2ff9b65c 100644 --- a/model/nat_gateway.go +++ b/model/nat_gateway.go @@ -6,7 +6,6 @@ import ( type NATGatewayConfig struct { Identifier `yaml:",inline"` - Preconfigured bool `yaml:"preconfigured,omitempty"` EIPAllocationID string `yaml:"eipAllocationId,omitempty"` } @@ -18,9 +17,9 @@ type NATGateway interface { ManageEIP() bool ManageNATGateway() bool ManageRoute() bool - NATGatewayRouteName() string + NATGatewayRouteName() (string, error) Ref() string - PrivateSubnetRouteTableRef() string + PrivateSubnetRouteTableRef() (string, error) PublicSubnetRef() string Validate() error } @@ -44,7 +43,7 @@ func (g natGatewayImpl) LogicalName() string { } func (g natGatewayImpl) ManageNATGateway() bool { - return !g.HasIdentifier() && !g.Preconfigured + return g.privateSubnet.ManageNATGateway() } func (g natGatewayImpl) ManageEIP() bool { @@ -52,7 +51,7 @@ func (g natGatewayImpl) ManageEIP() bool { } func (g natGatewayImpl) ManageRoute() bool { - return !g.Preconfigured + return g.privateSubnet.ManageRouteToNATGateway() } func (g natGatewayImpl) EIPLogicalName() string { @@ -78,34 +77,38 @@ func (g natGatewayImpl) PublicSubnetRef() string { return g.publicSubnet.Ref() } -func (g natGatewayImpl) PrivateSubnetRouteTableRef() string { - return g.privateSubnet.RouteTableRef() +func (g natGatewayImpl) PrivateSubnetRouteTableRef() (string, error) { + ref, err := g.privateSubnet.RouteTableRef() + if err != nil { + return "", err + } + return ref, nil } -func (g natGatewayImpl) NATGatewayRouteName() string { - return g.privateSubnet.NATGatewayRouteName() +func (g natGatewayImpl) NATGatewayRouteName() (string, error) { + return fmt.Sprintf("%sRouteToNatGateway", g.privateSubnet.ReferenceName()), nil } func (g natGatewayImpl) Validate() error { - if g.Preconfigured { + if !g.ManageNATGateway() { if !g.privateSubnet.HasIdentifier() { - return fmt.Errorf("an NGW with preconfigured=true must be associated to an existing private subnet: %+v", g) + return fmt.Errorf("a preconfigured NGW must be associated to an existing private subnet: %+v", g) } if g.publicSubnet.Provided() { - return fmt.Errorf("an NGW with preconfigured=true must not be associated to an existing public subnet: %+v", g) + return fmt.Errorf("a preconfigured NGW must not be associated to an existing public subnet: %+v", g) } if !g.privateSubnet.RouteTable.HasIdentifier() { - return fmt.Errorf("an NGW with preconfigured=true must have an existing route table provided via routeTable.id or routeTable.idFromStackOutput: %+v", g) + return fmt.Errorf("a preconfigured NGW must have an existing route table provided via routeTable.id or routeTable.idFromStackOutput: %+v", g) } if g.HasIdentifier() { - return fmt.Errorf("an NGW with preconcfigured=true must not have id or idFromStackOutput: %+v", g) + return fmt.Errorf("a preconfigured NGW must not have id or idFromStackOutput: %+v", g) } if g.EIPAllocationID != "" { - return fmt.Errorf("an NGW with preconcfigured=true must not have an eipAllocactionID: %+v", g) + return fmt.Errorf("a preconfigured NGW must not have an eipAllocactionID: %+v", g) } } return nil diff --git a/model/subnet.go b/model/subnet.go index ddd119576..b571b6da4 100644 --- a/model/subnet.go +++ b/model/subnet.go @@ -1,6 +1,7 @@ package model import ( + "fmt" "strings" ) @@ -41,24 +42,6 @@ func NewExistingPrivateSubnet(az string, id string) Subnet { } } -func NewExistingPrivateSubnetWithPreconfiguredNATGateway(az string, id string, rtb string) Subnet { - return Subnet{ - Identifier: Identifier{ - ID: id, - }, - AvailabilityZone: az, - Private: true, - RouteTable: RouteTable{ - Identifier: Identifier{ - ID: rtb, - }, - }, - NATGateway: NATGatewayConfig{ - Preconfigured: true, - }, - } -} - func NewPublicSubnetWithPreconfiguredInternetGateway(az string, cidr string, rtb string) Subnet { return Subnet{ AvailabilityZone: az, @@ -69,9 +52,7 @@ func NewPublicSubnetWithPreconfiguredInternetGateway(az string, cidr string, rtb ID: rtb, }, }, - InternetGateway: InternetGateway{ - Preconfigured: true, - }, + InternetGateway: InternetGateway{}, } } @@ -85,9 +66,7 @@ func NewPrivateSubnetWithPreconfiguredNATGateway(az string, cidr string, rtb str ID: rtb, }, }, - NATGateway: NATGatewayConfig{ - Preconfigured: true, - }, + NATGateway: NATGatewayConfig{}, } } @@ -147,6 +126,15 @@ func (s *Subnet) ResourcePrefix() string { return t } +func (s *Subnet) ReferenceName() string { + if s.ManageSubnet() { + return s.LogicalName() + } else if s.ID != "" { + return s.ID + } + return s.IDFromStackOutput +} + func (s *Subnet) LogicalName() string { if s.CustomName != "" { return s.CustomName @@ -158,16 +146,47 @@ func (s *Subnet) RouteTableID() string { return s.RouteTable.ID } -func (s *Subnet) ManageRouteTable() bool { - return !s.RouteTable.HasIdentifier() +// ManageNATGateway returns true if a NAT gateway for this subnet must be created or updated by kube-aws +// kube-aws creates or updates a NAT gateway if: +// * the subnet is private and +// * the subnet is going to be managed by kube-aws(an existing subnet is NOT specified) and +// * the route table for the subnet is going to be managed by kube-aws(an existing subnet is NOT specified) and +// * an existing NAT gateway ID is not specified to be reused +func (s *Subnet) ManageNATGateway() bool { + return s.Private && s.ManageRouteTable() && !s.NATGateway.HasIdentifier() +} + +// ManageRouteToNATGateway returns true if a route to a NAT gateway for this subnet must be created or updated by kube-aws +// kube-aws creates or updates a NAT gateway if: +// * the NGW is going to be managed or +// * an existing NAT gateway ID is specified +func (s *Subnet) ManageRouteToNATGateway() bool { + return s.ManageNATGateway() || s.NATGateway.HasIdentifier() } +// ManageRouteTable returns true if a route table for this subnet must be created or updated by kube-aws +// kube-aws creates a route table if and only if the subnet is also going to be managed and an existing route table for it isn't specified +func (s *Subnet) ManageRouteTable() bool { + return s.ManageSubnet() && !s.RouteTable.HasIdentifier() +} + +// ManageRouteToInternet returns true if a route from this subnet to to an IGW must be created or updated by kube-aws +// kube-aws creates a route to an IGW for an subnet if and only if: +// * the subnet is public and +// * the subnet is going to be managed by kube-aws and +// * the route table is going to be managed by kube-aws +// In other words, kube-aws won't create or update a route to an IGW if: +// * the subnet is private or +// * an existing subnet is used or +// * an existing route table is used func (s *Subnet) ManageRouteToInternet() bool { - return s.Public() && !s.InternetGateway.Preconfigured + return s.Public() && s.ManageSubnet() && s.ManageRouteTable() } -func (s *Subnet) NATGatewayRouteName() string { - return s.RouteTableName() + "RouteToNatGateway" +// ManageSubnet returns true if this subnet must be managed(created or updated) by kube-aws +// kube-aws creates a subnet if subnet.id and subnet.idFromStackOutput are not specified +func (s *Subnet) ManageSubnet() bool { + return !s.HasIdentifier() } // Ref returns ID or ref to newly created resource @@ -176,13 +195,16 @@ func (s *Subnet) Ref() string { } // RouteTableName represents the name of the route table to which this subnet is associated. -func (s *Subnet) RouteTableName() string { - return s.ResourcePrefix() + "RouteTable" + s.AvailabilityZoneLogicalName() +func (s *Subnet) RouteTableName() (string, error) { + // There should be no need to call this func if the route table isn't going to be created/updated by kube-aws + if !s.ManageRouteTable() { + return "", fmt.Errorf("[bug] assertion failed: RouteTableName() must be called if and only if ManageRouteTable() returns true") + } + return s.ResourcePrefix() + "RouteTable" + s.AvailabilityZoneLogicalName(), nil } -func (s *Subnet) RouteTableRef() string { - logicalName := s.RouteTableName() - return s.RouteTable.Ref(logicalName) +func (s *Subnet) RouteTableRef() (string, error) { + return s.RouteTable.IdOrRef(s.RouteTableName) } type RouteTable struct { diff --git a/test/integration/maincluster_test.go b/test/integration/maincluster_test.go index c0e250c67..39a84aafc 100644 --- a/test/integration/maincluster_test.go +++ b/test/integration/maincluster_test.go @@ -219,16 +219,12 @@ subnets: instanceCIDR: "10.0.1.0/24" # implies # private: true - # natGateway: - # preconfigured: true # routeTable # id: rtb-1a2b3c4d - availabilityZone: us-west-1b instanceCIDR: "10.0.2.0/24" # implies # private: true - # natGateway: - # preconfigured: true # routeTable # id: rtb-1a2b3c4d `, @@ -267,7 +263,7 @@ subnets: } for i, s := range c.PrivateSubnets() { - if !s.NATGateway.Preconfigured { + if s.ManageNATGateway() { t.Errorf("NAT gateway for the private subnet #%d is externally managed and shouldn't created by kube-aws", i) } @@ -300,16 +296,12 @@ subnets: instanceCIDR: "10.0.1.0/24" # #implies # private: false - # internetGateway: - # preconfigured: true # routeTable # id: rtb-1a2b3c4d - availabilityZone: us-west-1b instanceCIDR: "10.0.2.0/24" # #implies # private: false - # internetGateway: - # preconfigured: true # routeTable # id: rtb-1a2b3c4d `, @@ -452,7 +444,7 @@ worker: } for i, s := range c.PrivateSubnets() { - if s.NATGateway.Preconfigured { + if !s.ManageNATGateway() { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } @@ -531,7 +523,7 @@ subnets: } for i, s := range c.PrivateSubnets() { - if s.NATGateway.Preconfigured { + if !s.ManageNATGateway() { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } @@ -628,7 +620,7 @@ worker: } for i, s := range c.PrivateSubnets() { - if s.NATGateway.Preconfigured { + if !s.ManageNATGateway() { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } @@ -721,7 +713,7 @@ worker: } for i, s := range c.PrivateSubnets() { - if s.NATGateway.Preconfigured { + if !s.ManageNATGateway() { t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) } @@ -732,112 +724,6 @@ worker: }, }, }, - // See https://github.com/coreos/kube-aws/pull/284#issuecomment-275955785 - { - context: "WithNetworkTopologyExistingPrivateSubnetsWithNonAWSNATGateway", - configYaml: kubeAwsSettings.mainClusterYaml + ` -vpcId: vpc-1a2b3c4d -# routeTableId must be omitted -# See https://github.com/coreos/kube-aws/pull/284#issuecomment-275962332 -# routeTableId: rtb-1a2b3c4d -subnets: -- name: private1 - availabilityZone: us-west-1a - id: subnet-1 - private: true - natGateway: - preconfigured: true - # this, in combination with "natGateway.preconfigured=true", implies that the route table already has a route to an existing NAT gateway - routeTable: - id: routetable-withpreconfigurednat1a -- name: private2 - availabilityZone: us-west-1b - id: subnet-2 - private: true - natGateway: - preconfigured: true - # this, in combination with "natGateway.preconfigured=true", implies that the route table already has a route to an existing NAT gateway - routeTable: - id: routetable-withpreconfigurednat1b -- name: public1 - availabilityZone: us-west-1a - id: subnet-3 -- name: public2 - availabilityZone: us-west-1b - id: subnet-4 -controller: - subnets: - - name: private1 - - name: private2 - loadBalancer: - private: false -etcd: - subnets: - - name: private1 - - name: private2 -worker: - subnets: - - name: private1 - - name: private2 -`, - assertConfig: []ConfigTester{ - hasDefaultExperimentalFeatures, - func(c *config.Cluster, t *testing.T) { - private1 := model.NewExistingPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "subnet-1", "routetable-withpreconfigurednat1a") - private1.CustomName = "private1" - - private2 := model.NewExistingPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "subnet-2", "routetable-withpreconfigurednat1b") - private2.CustomName = "private2" - - public1 := model.NewExistingPublicSubnet("us-west-1a", "subnet-3") - public1.CustomName = "public1" - - public2 := model.NewExistingPublicSubnet("us-west-1b", "subnet-4") - public2.CustomName = "public2" - - subnets := []model.Subnet{ - private1, - private2, - public1, - public2, - } - publicSubnets := []model.Subnet{ - public1, - public2, - } - privateSubnets := []model.Subnet{ - private1, - private2, - } - - if !reflect.DeepEqual(c.AllSubnets(), subnets) { - t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets()) - } - if !reflect.DeepEqual(c.Worker.Subnets, privateSubnets) { - t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, c.Worker.Subnets) - } - if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) { - t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets) - } - if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) { - t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets) - } - if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) { - t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets) - } - - for i, s := range c.PrivateSubnets() { - if !s.NATGateway.Preconfigured { - t.Errorf("NAT gateway for the private subnet #%d is externally managed and shouldn't created by kube-aws", i) - } - - if s.ManageRouteToInternet() { - t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s) - } - } - }, - }, - }, { context: "WithNetworkTopologyExistingSubnets", configYaml: kubeAwsSettings.mainClusterYaml + ` @@ -916,8 +802,8 @@ worker: } for i, s := range c.PrivateSubnets() { - if s.NATGateway.Preconfigured { - t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i) + if s.ManageNATGateway() { + t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i) } if s.ManageRouteToInternet() { From b24bc94bcd5790f2d48af5d045de809443730794 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 1 Feb 2017 14:44:14 +0900 Subject: [PATCH 5/6] Generalize subnet names so that we can have mutiple private or public subnets per AZ --- config/config.go | 13 +-- config/config_test.go | 12 +-- config/templates/stack-template.json | 29 ++++-- e2e/run | 6 ++ model/etcd.go | 7 +- model/identifier.go | 14 +-- model/nat_gateway.go | 93 +++++++++++++------ model/subnet.go | 82 ++++++++-------- nodepool/config/config.go | 28 ++++-- nodepool/config/templates/cluster.yaml | 8 +- nodepool/config/templates/stack-template.json | 50 ---------- test/integration/maincluster_test.go | 54 +++++------ 12 files changed, 201 insertions(+), 195 deletions(-) diff --git a/config/config.go b/config/config.go index 102b434e1..db2fda51c 100644 --- a/config/config.go +++ b/config/config.go @@ -185,8 +185,8 @@ func (c *Cluster) SetDefaults() { publicTopologyImplied := c.RouteTableID != "" && c.MapPublicIPs for i, s := range c.Subnets { - if s.CustomName == "" { - c.Subnets[i].CustomName = fmt.Sprintf("Subnet%d", i) + if s.Name == "" { + c.Subnets[i].Name = fmt.Sprintf("Subnet%d", i) } // DEPRECATED AND REMOVED IN THE FUTURE @@ -969,7 +969,7 @@ func (s DeploymentSettings) AllSubnets() []model.Subnet { func (c DeploymentSettings) FindSubnetMatching(condition model.Subnet) model.Subnet { for _, s := range c.Subnets { - if s.CustomName == condition.CustomName { + if s.Name == condition.Name { return s } } @@ -1015,14 +1015,15 @@ func (c DeploymentSettings) NATGateways() []model.NATGateway { var publicSubnet model.Subnet ngwConfig := privateSubnet.NATGateway if privateSubnet.ManageNATGateway() { - found := false + publicSubnetFound := false for _, s := range c.PublicSubnets() { if s.AvailabilityZone == privateSubnet.AvailabilityZone { publicSubnet = s - found = true + publicSubnetFound = true + break } } - if !found { + if !publicSubnetFound { panic(fmt.Sprintf("No appropriate public subnet found for a non-preconfigured NAT gateway associated to private subnet %s", privateSubnet.LogicalName())) } ngw := model.NewNATGateway(ngwConfig, privateSubnet, publicSubnet) diff --git a/config/config_test.go b/config/config_test.go index 5d6982adb..2aee4569a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -332,12 +332,12 @@ subnets: { InstanceCIDR: "10.4.3.0/24", AvailabilityZone: "ap-northeast-1a", - CustomName: "Subnet0", + Name: "Subnet0", }, { InstanceCIDR: "10.4.4.0/24", AvailabilityZone: "ap-northeast-1c", - CustomName: "Subnet1", + Name: "Subnet1", }, }, }, @@ -353,7 +353,7 @@ instanceCIDR: 10.4.3.0/24 { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.4.3.0/24", - CustomName: "Subnet0", + Name: "Subnet0", }, }, }, @@ -370,7 +370,7 @@ subnets: [] { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.4.3.0/24", - CustomName: "Subnet0", + Name: "Subnet0", }, }, }, @@ -384,7 +384,7 @@ subnets: [] { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.0.0.0/24", - CustomName: "Subnet0", + Name: "Subnet0", }, }, }, @@ -397,7 +397,7 @@ availabilityZone: "ap-northeast-1a" { AvailabilityZone: "ap-northeast-1a", InstanceCIDR: "10.0.0.0/24", - CustomName: "Subnet0", + Name: "Subnet0", }, }, }, diff --git a/config/templates/stack-template.json b/config/templates/stack-template.json index 6f4acdd38..ec39120d4 100644 --- a/config/templates/stack-template.json +++ b/config/templates/stack-template.json @@ -1148,7 +1148,7 @@ {{end}} {{range $index, $subnet := .Subnets}} - {{if $subnet.ManageSubnet }} + {{if $subnet.ManageSubnet}} , "{{$subnet.LogicalName}}": { "Properties": { @@ -1179,12 +1179,12 @@ } {{if $subnet.ManageRouteTable}} , - "{{$subnet.RouteTableName}}": { + "{{$subnet.RouteTableLogicalName}}": { "Properties": { "Tags": [ { "Key": "Name", - "Value": "{{$.ClusterName}}-{{$subnet.RouteTableName}}" + "Value": "{{$.ClusterName}}-{{$subnet.RouteTableLogicalName}}" }, { "Key": "KubernetesCluster", @@ -1196,7 +1196,6 @@ "Type": "AWS::EC2::RouteTable" } {{end}} - {{end}} {{if $.ElasticFileSystemID}} , "{{$subnet.LogicalName}}MountTarget": { @@ -1210,7 +1209,7 @@ {{end}} {{if $subnet.ManageRouteToInternet}} , - "{{$subnet.RouteTableName}}ToInternet": { + "{{$subnet.InternetGatewayRouteLogicalName}}": { "Properties": { "DestinationCidrBlock": "0.0.0.0/0", "GatewayId": {{$.InternetGatewayRef}}, @@ -1220,6 +1219,7 @@ } {{end}} {{end}} + {{end}} {{range $i, $ngw := .NATGateways}} {{if $ngw.ManageEIP}} @@ -1241,18 +1241,20 @@ "Type": "AWS::EC2::NatGateway" } {{end}} - {{if $ngw.ManageRoute}} + {{range $_, $s := $ngw.PrivateSubnets}} + {{if $s.ManageRouteToNATGateway}} , - "{{$ngw.NATGatewayRouteName}}": { + "{{$s.NATGatewayRouteLogicalName}}": { "Properties": { "DestinationCidrBlock": "0.0.0.0/0", "NatGatewayId": {{$ngw.Ref}}, - "RouteTableId": {{$ngw.PrivateSubnetRouteTableRef}} + "RouteTableId": {{$s.RouteTableRef}} }, "Type": "AWS::EC2::Route" } {{end}} {{end}} + {{end}} {{if not .InternetGatewayID}} , @@ -1314,10 +1316,17 @@ {{end}} {{range $index, $subnet := .Subnets}} {{if $subnet.ManageRouteTable}} - "{{$subnet.RouteTableName}}" : { + "{{$subnet.RouteTableLogicalName}}" : { "Description" : "The route table assigned to the subnet {{$subnet.LogicalName}}", "Value" : {{$subnet.RouteTableRef}}, - "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-{{$subnet.RouteTableName}}" }} + "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-{{$subnet.RouteTableLogicalName}}" }} + }, + {{end}} + {{if $subnet.ManageSubnet}} + "{{$subnet.LogicalName}}" : { + "Description" : "The subnet id of {{$subnet.LogicalName}}", + "Value" : {{$subnet.Ref}}, + "Export" : { "Name" : {"Fn::Sub": "${AWS::StackName}-{{$subnet.LogicalName}}" }} }, {{end}} {{end}} diff --git a/e2e/run b/e2e/run index 02238b2fb..322123c82 100755 --- a/e2e/run +++ b/e2e/run @@ -488,6 +488,12 @@ nodepools_destroy() { KUBE_AWS_NODE_POOL_INDEX=2 nodepool_destroy } +nodepools_rerun() { + nodepools_destroy + build + nodepools +} + all_destroy() { nodepools_destroy main_destroy diff --git a/model/etcd.go b/model/etcd.go index cb5d2eec6..16fca71cc 100644 --- a/model/etcd.go +++ b/model/etcd.go @@ -35,16 +35,13 @@ func (i etcdInstanceImpl) SubnetRef() string { } func (i etcdInstanceImpl) DependencyExists() bool { - return i.subnet.Private && i.natGateway != nil && i.natGateway.ManageRoute() + return i.subnet.Private && i.subnet.ManageRouteToNATGateway() } func (i etcdInstanceImpl) DependencyRef() (string, error) { // We have to wait until the route to the NAT gateway if it doesn't exist yet(hence ManageRoute=true) or the etcd node fails due to inability to connect internet if i.DependencyExists() { - name, err := i.natGateway.NATGatewayRouteName() - if err != nil { - return "", err - } + name := i.subnet.NATGatewayRouteLogicalName() return fmt.Sprintf(`"%s"`, name), nil } return "", nil diff --git a/model/identifier.go b/model/identifier.go index 3cba1d3ce..c9d995db9 100644 --- a/model/identifier.go +++ b/model/identifier.go @@ -13,23 +13,25 @@ func (i Identifier) HasIdentifier() bool { return i.ID != "" || i.IDFromStackOutput != "" } -func (i Identifier) Ref(logicalName string) string { +func (i Identifier) Ref(logicalNameProvider func() string) string { if i.IDFromStackOutput != "" { - return fmt.Sprintf(`{ "ImportValue" : %q }`, i.IDFromStackOutput) + return fmt.Sprintf(`{ "Fn::ImportValue" : %q }`, i.IDFromStackOutput) } else if i.ID != "" { return fmt.Sprintf(`"%s"`, i.ID) } else { - return fmt.Sprintf(`{ "Ref" : %q }`, logicalName) + return fmt.Sprintf(`{ "Ref" : %q }`, logicalNameProvider()) } } -func (i Identifier) IdOrRef(refProvider func() (string, error)) (string, error) { +// RefOrError should be used instead of Ref where possible so that kube-aws can print a more useful error message with +// the line number for the stack-template.json when there's an error. +func (i Identifier) RefOrError(logicalNameProvider func() (string, error)) (string, error) { if i.IDFromStackOutput != "" { - return fmt.Sprintf(`{ "ImportValue" : %q }`, i.IDFromStackOutput), nil + return fmt.Sprintf(`{ "Fn::ImportValue" : %q }`, i.IDFromStackOutput), nil } else if i.ID != "" { return fmt.Sprintf(`"%s"`, i.ID), nil } else { - logicalName, err := refProvider() + logicalName, err := logicalNameProvider() if err != nil { return "", fmt.Errorf("failed to get id or ref: %v", err) } diff --git a/model/nat_gateway.go b/model/nat_gateway.go index a2ff9b65c..8c3bfcc75 100644 --- a/model/nat_gateway.go +++ b/model/nat_gateway.go @@ -9,6 +9,14 @@ type NATGatewayConfig struct { EIPAllocationID string `yaml:"eipAllocationId,omitempty"` } +func (c NATGatewayConfig) Validate() error { + if c.HasIdentifier() && c.EIPAllocationID != "" { + return fmt.Errorf("eipAllocationId can't be specified for a existing nat gatway. It is an user's responsibility to configure the nat gateway if one tried to reuse an existing one: %+v", c) + } + return nil +} + +// kube-aws manages at most one NAT gateway per subnet type NATGateway interface { EIPAllocationIDRef() string EIPLogicalName() string @@ -17,33 +25,48 @@ type NATGateway interface { ManageEIP() bool ManageNATGateway() bool ManageRoute() bool - NATGatewayRouteName() (string, error) Ref() string - PrivateSubnetRouteTableRef() (string, error) PublicSubnetRef() string + PrivateSubnets() []Subnet Validate() error } type natGatewayImpl struct { NATGatewayConfig - privateSubnet Subnet - publicSubnet Subnet + privateSubnets []Subnet + publicSubnet Subnet } func NewNATGateway(c NATGatewayConfig, private Subnet, public Subnet) NATGateway { return natGatewayImpl{ NATGatewayConfig: c, - privateSubnet: private, + privateSubnets: []Subnet{private}, publicSubnet: public, } } func (g natGatewayImpl) LogicalName() string { - return fmt.Sprintf("NatGateway%s", g.privateSubnet.AvailabilityZoneLogicalName()) + name := "" + for _, s := range g.privateSubnets { + name = name + s.LogicalName() + } + return fmt.Sprintf("NatGateway%s", name) } func (g natGatewayImpl) ManageNATGateway() bool { - return g.privateSubnet.ManageNATGateway() + allTrue := true + allFalse := true + for _, s := range g.privateSubnets { + allTrue = allTrue && s.ManageNATGateway() + allFalse = allFalse && !s.ManageNATGateway() + } + if allTrue { + return true + } else if allFalse { + return false + } + + panic(fmt.Sprintf("[bug] assertion failed: private subnets associated to this nat gateway(%+v) conflicts in their settings. kube-aws is confused and can't decide whether it should manage the nat gateway or not", g)) } func (g natGatewayImpl) ManageEIP() bool { @@ -51,7 +74,19 @@ func (g natGatewayImpl) ManageEIP() bool { } func (g natGatewayImpl) ManageRoute() bool { - return g.privateSubnet.ManageRouteToNATGateway() + allTrue := true + allFalse := true + for _, s := range g.privateSubnets { + allTrue = allTrue && s.ManageRouteToNATGateway() + allFalse = allFalse && !s.ManageRouteToNATGateway() + } + if allTrue { + return true + } else if allFalse { + return false + } + + panic(fmt.Sprintf("[bug] assertion failed: private subnets associated to this nat gateway(%+v) conflicts in their settings. kube-aws is confused and can't decide whether it should manage the route to nat gateway or not", g)) } func (g natGatewayImpl) EIPLogicalName() string { @@ -66,41 +101,39 @@ func (g natGatewayImpl) EIPAllocationIDRef() string { } func (g natGatewayImpl) IsConnectedToPrivateSubnet(s Subnet) bool { - return g.privateSubnet.LogicalName() == s.LogicalName() + for _, ps := range g.privateSubnets { + if ps.LogicalName() == s.LogicalName() { + return true + } + } + return false } func (g natGatewayImpl) Ref() string { - return g.Identifier.Ref(g.LogicalName()) + return g.Identifier.Ref(g.LogicalName) } func (g natGatewayImpl) PublicSubnetRef() string { return g.publicSubnet.Ref() } -func (g natGatewayImpl) PrivateSubnetRouteTableRef() (string, error) { - ref, err := g.privateSubnet.RouteTableRef() - if err != nil { - return "", err - } - return ref, nil -} - -func (g natGatewayImpl) NATGatewayRouteName() (string, error) { - return fmt.Sprintf("%sRouteToNatGateway", g.privateSubnet.ReferenceName()), nil +func (g natGatewayImpl) PrivateSubnets() []Subnet { + return g.privateSubnets } func (g natGatewayImpl) Validate() error { + if err := g.NATGatewayConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate nat gateway: %v", err) + } if !g.ManageNATGateway() { - if !g.privateSubnet.HasIdentifier() { - return fmt.Errorf("a preconfigured NGW must be associated to an existing private subnet: %+v", g) - } - - if g.publicSubnet.Provided() { - return fmt.Errorf("a preconfigured NGW must not be associated to an existing public subnet: %+v", g) - } - - if !g.privateSubnet.RouteTable.HasIdentifier() { - return fmt.Errorf("a preconfigured NGW must have an existing route table provided via routeTable.id or routeTable.idFromStackOutput: %+v", g) + for i, s := range g.privateSubnets { + if !s.HasIdentifier() { + return fmt.Errorf("a preconfigured NGW must be associated to an existing private subnet #%d: %+v", i, g) + } + + if !s.RouteTable.HasIdentifier() { + return fmt.Errorf("a preconfigured NGW must have an existing route table provided via routeTable.id or routeTable.idFromStackOutput: %+v", g) + } } if g.HasIdentifier() { diff --git a/model/subnet.go b/model/subnet.go index b571b6da4..1871897fd 100644 --- a/model/subnet.go +++ b/model/subnet.go @@ -7,13 +7,13 @@ import ( type Subnet struct { Identifier `yaml:",inline"` - CustomName string `yaml:"name,omitempty"` AvailabilityZone string `yaml:"availabilityZone,omitempty"` + Name string `yaml:"name,omitempty"` InstanceCIDR string `yaml:"instanceCIDR,omitempty"` - RouteTable RouteTable `yaml:"routeTable,omitempty"` - NATGateway NATGatewayConfig `yaml:"natGateway,omitempty"` InternetGateway InternetGateway `yaml:"internetGateway,omitempty"` - Private bool + NATGateway NATGatewayConfig `yaml:"natGateway,omitempty"` + Private bool `yaml:"private,omitempty"` + RouteTable RouteTable `yaml:"routeTable,omitempty"` } func NewPublicSubnet(az string, cidr string) Subnet { @@ -100,46 +100,19 @@ func NewImportedPublicSubnet(az string, name string) Subnet { } } -func (s *Subnet) Provided() bool { - return s.AvailabilityZone != "" -} - func (s *Subnet) Public() bool { return !s.Private } -func (s *Subnet) AvailabilityZoneLogicalName() string { - return strings.Replace(strings.Title(s.AvailabilityZone), "-", "", -1) -} - func (s *Subnet) MapPublicIPs() bool { return !s.Private } -func (s *Subnet) ResourcePrefix() string { - var t string - if s.Private { - t = "Private" - } else { - t = "Public" - } - return t -} - -func (s *Subnet) ReferenceName() string { - if s.ManageSubnet() { - return s.LogicalName() - } else if s.ID != "" { - return s.ID - } - return s.IDFromStackOutput -} - func (s *Subnet) LogicalName() string { - if s.CustomName != "" { - return s.CustomName + if s.Name != "" { + return strings.Replace(strings.Title(s.Name), "-", "", -1) } - return s.ResourcePrefix() + "Subnet" + s.AvailabilityZoneLogicalName() + panic(fmt.Sprintf("Name must be set for a subnet: %+v", *s)) } func (s *Subnet) RouteTableID() string { @@ -153,7 +126,7 @@ func (s *Subnet) RouteTableID() string { // * the route table for the subnet is going to be managed by kube-aws(an existing subnet is NOT specified) and // * an existing NAT gateway ID is not specified to be reused func (s *Subnet) ManageNATGateway() bool { - return s.Private && s.ManageRouteTable() && !s.NATGateway.HasIdentifier() + return s.managePrivateRouteTable() && !s.NATGateway.HasIdentifier() } // ManageRouteToNATGateway returns true if a route to a NAT gateway for this subnet must be created or updated by kube-aws @@ -161,7 +134,11 @@ func (s *Subnet) ManageNATGateway() bool { // * the NGW is going to be managed or // * an existing NAT gateway ID is specified func (s *Subnet) ManageRouteToNATGateway() bool { - return s.ManageNATGateway() || s.NATGateway.HasIdentifier() + return s.managePrivateRouteTable() +} + +func (s *Subnet) managePrivateRouteTable() bool { + return s.Private && s.ManageRouteTable() } // ManageRouteTable returns true if a route table for this subnet must be created or updated by kube-aws @@ -170,7 +147,7 @@ func (s *Subnet) ManageRouteTable() bool { return s.ManageSubnet() && !s.RouteTable.HasIdentifier() } -// ManageRouteToInternet returns true if a route from this subnet to to an IGW must be created or updated by kube-aws +// ManageRouteToInternet returns true if a route from this subnet to an IGW must be created or updated by kube-aws // kube-aws creates a route to an IGW for an subnet if and only if: // * the subnet is public and // * the subnet is going to be managed by kube-aws and @@ -191,22 +168,41 @@ func (s *Subnet) ManageSubnet() bool { // Ref returns ID or ref to newly created resource func (s *Subnet) Ref() string { - return s.Identifier.Ref(s.LogicalName()) + return s.Identifier.Ref(s.LogicalName) } -// RouteTableName represents the name of the route table to which this subnet is associated. -func (s *Subnet) RouteTableName() (string, error) { +// RouteTableLogicalName represents the name of the route table to which this subnet is associated. +func (s *Subnet) RouteTableLogicalName() (string, error) { // There should be no need to call this func if the route table isn't going to be created/updated by kube-aws if !s.ManageRouteTable() { - return "", fmt.Errorf("[bug] assertion failed: RouteTableName() must be called if and only if ManageRouteTable() returns true") + return "", fmt.Errorf("[bug] assertion failed: RouteTableLogicalName() must be called if and only if ManageRouteTable() returns true") } - return s.ResourcePrefix() + "RouteTable" + s.AvailabilityZoneLogicalName(), nil + return s.subnetSpecificResourceLogicalName("RouteTable"), nil +} + +func (s *Subnet) InternetGatewayRouteLogicalName() string { + return s.subnetSpecificResourceLogicalName("RouteToInternet") +} + +func (s *Subnet) NATGatewayRouteLogicalName() string { + return s.subnetSpecificResourceLogicalName("RouteToNatGateway") +} + +func (s *Subnet) subnetSpecificResourceLogicalName(resourceName string) string { + return fmt.Sprintf("%s%s", s.LogicalName(), resourceName) } func (s *Subnet) RouteTableRef() (string, error) { - return s.RouteTable.IdOrRef(s.RouteTableName) + return s.RouteTable.RefOrError(s.RouteTableLogicalName) } +// kube-aws manages at most one route table per subnet +// If ID or IDFromStackOutput is non-zero, kube-aws doesn't manage the route table but its users' responsibility to +// provide properly configured one to be reused by kube-aws. +// More concretely: +// * If an user is going to reuse an existing route table for a private subnet, it must have a route to a NAT gateway +// * A NAT gateway can be either a classical one with a NAT EC2 instance or an AWS-managed one +// * IF an user is going to reuse an existing route table for a public subnet, it must have a route to an Internet gateway type RouteTable struct { Identifier `yaml:",inline"` } diff --git a/nodepool/config/config.go b/nodepool/config/config.go index 99e7687f6..185c528dd 100644 --- a/nodepool/config/config.go +++ b/nodepool/config/config.go @@ -144,16 +144,28 @@ func ClusterFromBytes(data []byte, main *cfg.Config) (*ProvidedConfig, error) { return nil, fmt.Errorf("invalid cluster: %v", err) } - // For backward-compatibility - if len(c.Subnets) == 0 { - c.Subnets = []model.Subnet{ - model.NewPublicSubnet(c.AvailabilityZone, c.InstanceCIDR), - } + // Fetch subnets defined in the main cluster by name + for i, s := range c.Worker.Subnets { + linkedSubnet := main.FindSubnetMatching(s) + c.Worker.Subnets[i] = linkedSubnet + } + + // Default to subnets defined in the main cluster + // CAUTION: cluster-autoscaler Won't work if there're 2 or more subnets spanning over different AZs + if len(c.Worker.Subnets) == 0 { + c.Worker.Subnets = main.Worker.Subnets } - for i, s := range c.Subnets { - if s.CustomName == "" { - c.Subnets[i].CustomName = fmt.Sprintf("Subnet%d", i) + // Import all the managed subnets from the main cluster i.e. don't create subnets inside the node pool cfn stack + for i, s := range c.Worker.Subnets { + if !s.HasIdentifier() { + stackOutputName := fmt.Sprintf("%s-%s", main.ClusterName, s.LogicalName()) + az := s.AvailabilityZone + if s.Private { + c.Worker.Subnets[i] = model.NewImportedPublicSubnet(az, stackOutputName) + } else { + c.Worker.Subnets[i] = model.NewImportedPrivateSubnet(az, stackOutputName) + } } } diff --git a/nodepool/config/templates/cluster.yaml b/nodepool/config/templates/cluster.yaml index 59de4289d..0737ecf0f 100644 --- a/nodepool/config/templates/cluster.yaml +++ b/nodepool/config/templates/cluster.yaml @@ -167,11 +167,11 @@ vpcCIDR: "{{.VPCCIDR}}" # instanceCIDR: "10.0.0.0/24" # Kubernetes subnets with their CIDRs and availability zones. Differentiating availability zone for 2 or more subnets result in high-availability (failures of a single availability zone won't result in immediate downtimes) +# However please beware that cluster-autoscaler doesn't support a node pool backed by an ASG assigned to multi AZs. +# If you're planning to use cluster-autoscaler, just use a single subnet for each node pool and create multiple node pools for high-availability. # subnets: -# - availabilityZone: us-west-1a -# instanceCIDR: "10.0.0.0/24" -# - availabilityZone: us-west-1b -# instanceCIDR: "10.0.1.0/24" +# # Fetch subnets managed in the main cluster by name to be reused in this node pool +# - name: PrivateSubnet1a # Required by kubelet to locate the cluster-internal dns hosted on controller nodes in the base cluster dnsServiceIP: "{{.DNSServiceIP}}" diff --git a/nodepool/config/templates/stack-template.json b/nodepool/config/templates/stack-template.json index 6741f84c5..64a89233d 100644 --- a/nodepool/config/templates/stack-template.json +++ b/nodepool/config/templates/stack-template.json @@ -351,55 +351,5 @@ }, "Type": "AWS::IAM::Role" } - - {{range $index, $subnet := .Subnets}} - , - "{{$subnet.LogicalName}}": { - "Properties": { - "AvailabilityZone": "{{$subnet.AvailabilityZone}}", - "CidrBlock": "{{$subnet.InstanceCIDR}}", - "MapPublicIpOnLaunch": {{$subnet.MapPublicIPs}}, - "Tags": [ - { - "Key": "Name", - "Value": "{{$.ClusterName}}-{{$.NodePoolName}}-{{$subnet.LogicalName}}" - }, - { - "Key": "KubernetesCluster", - "Value": "{{$.ClusterName}}" - } - ], - "VpcId": {{$.VPCRef}} - }, - "Type": "AWS::EC2::Subnet" - }, - "{{$subnet.LogicalName}}RouteTableAssociation": { - "Properties": { - "RouteTableId": - {{if not $subnet.RouteTableID}} - {{if $subnet.Private}} - {"Fn::ImportValue" : {"Fn::Sub" : "{{$.ClusterName}}-PrivateRouteTable{{$subnet.AvailabilityZoneLogicalName}}"}}, - {{else}} - {"Fn::ImportValue" : {"Fn::Sub" : "{{$.ClusterName}}-PublicRouteTable{{$subnet.AvailabilityZoneLogicalName}}"}}, - {{end}} - {{else}} - "{{$subnet.RouteTableID}}", - {{end}} - "SubnetId": {{$subnet.Ref}} - }, - "Type": "AWS::EC2::SubnetRouteTableAssociation" - } - {{if $.ElasticFileSystemID}} - , - "{{$subnet.LogicalName}}MountTarget": { - "Properties" : { - "FileSystemId": "{{$.ElasticFileSystemID}}", - "SubnetId": {{$subnet.Ref}}, - "SecurityGroups": [ { "Ref": "SecurityGroupMountTarget" } ] - }, - "Type" : "AWS::EFS::MountTarget" - } - {{end}} - {{end}} } } diff --git a/test/integration/maincluster_test.go b/test/integration/maincluster_test.go index 39a84aafc..053f46dc8 100644 --- a/test/integration/maincluster_test.go +++ b/test/integration/maincluster_test.go @@ -18,7 +18,7 @@ type ConfigTester func(c *config.Cluster, t *testing.T) func TestMainClusterConfig(t *testing.T) { hasDefaultEtcdSettings := func(c *config.Cluster, t *testing.T) { subnet1 := model.NewPublicSubnet("us-west-1c", "10.0.0.0/24") - subnet1.CustomName = "Subnet0" + subnet1.Name = "Subnet0" expected := config.EtcdSettings{ Etcd: model.Etcd{ Subnets: []model.Subnet{ @@ -232,10 +232,10 @@ subnets: hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "10.0.1.0/24", "rtb-1a2b3c4d") - private1.CustomName = "Subnet0" + private1.Name = "Subnet0" private2 := model.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "10.0.2.0/24", "rtb-1a2b3c4d") - private2.CustomName = "Subnet1" + private2.Name = "Subnet1" subnets := []model.Subnet{ private1, @@ -309,10 +309,10 @@ subnets: hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { private1 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1a", "10.0.1.0/24", "rtb-1a2b3c4d") - private1.CustomName = "Subnet0" + private1.Name = "Subnet0" private2 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1b", "10.0.2.0/24", "rtb-1a2b3c4d") - private2.CustomName = "Subnet1" + private2.Name = "Subnet1" subnets := []model.Subnet{ private1, @@ -400,16 +400,16 @@ worker: everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") - private1.CustomName = "private1" + private1.Name = "private1" private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") - private2.CustomName = "private2" + private2.Name = "private2" public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") - public1.CustomName = "public1" + public1.Name = "public1" public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") - public2.CustomName = "public2" + public2.Name = "public2" subnets := []model.Subnet{ private1, @@ -483,16 +483,16 @@ subnets: everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") - private1.CustomName = "private1" + private1.Name = "private1" private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") - private2.CustomName = "private2" + private2.Name = "private2" public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") - public1.CustomName = "public1" + public1.Name = "public1" public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") - public2.CustomName = "public2" + public2.Name = "public2" subnets := []model.Subnet{ private1, @@ -576,16 +576,16 @@ worker: everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") - private1.CustomName = "private1" + private1.Name = "private1" private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") - private2.CustomName = "private2" + private2.Name = "private2" public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") - public1.CustomName = "public1" + public1.Name = "public1" public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") - public2.CustomName = "public2" + public2.Name = "public2" subnets := []model.Subnet{ private1, @@ -670,16 +670,16 @@ worker: everyPublicSubnetHasRouteToIGW, func(c *config.Cluster, t *testing.T) { private1 := model.NewPrivateSubnet("us-west-1a", "10.0.1.0/24") - private1.CustomName = "private1" + private1.Name = "private1" private2 := model.NewPrivateSubnet("us-west-1b", "10.0.2.0/24") - private2.CustomName = "private2" + private2.Name = "private2" public1 := model.NewPublicSubnet("us-west-1a", "10.0.3.0/24") - public1.CustomName = "public1" + public1.Name = "public1" public2 := model.NewPublicSubnet("us-west-1b", "10.0.4.0/24") - public2.CustomName = "public2" + public2.Name = "public2" subnets := []model.Subnet{ private1, @@ -759,16 +759,16 @@ worker: hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { private1 := model.NewExistingPrivateSubnet("us-west-1a", "subnet-1") - private1.CustomName = "private1" + private1.Name = "private1" private2 := model.NewImportedPrivateSubnet("us-west-1b", "mycluster-private-subnet-1") - private2.CustomName = "private2" + private2.Name = "private2" public1 := model.NewExistingPublicSubnet("us-west-1a", "subnet-2") - public1.CustomName = "public1" + public1.Name = "public1" public2 := model.NewImportedPublicSubnet("us-west-1b", "mycluster-public-subnet-1") - public2.CustomName = "public2" + public2.Name = "public2" subnets := []model.Subnet{ private1, @@ -833,7 +833,7 @@ routeTableId: rtb-1a2b3c4d hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { subnet1 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1c", "10.0.0.0/24", "rtb-1a2b3c4d") - subnet1.CustomName = "Subnet0" + subnet1.Name = "Subnet0" subnets := []model.Subnet{ subnet1, } @@ -967,7 +967,7 @@ etcdDataVolumeIOPS: 104 hasDefaultExperimentalFeatures, func(c *config.Cluster, t *testing.T) { subnet1 := model.NewPublicSubnetWithPreconfiguredInternetGateway("us-west-1c", "10.0.0.0/24", "rtb-1a2b3c4d") - subnet1.CustomName = "Subnet0" + subnet1.Name = "Subnet0" subnets := []model.Subnet{ subnet1, } From 1b75cbebdcb110acfc68cb2d780457aeded6281f Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Wed, 1 Feb 2017 15:43:59 +0900 Subject: [PATCH 6/6] Update cluster.yaml to cover all the changes and additions from #284 --- config/templates/cluster.yaml | 218 ++++++++++++++++++++++++++++++---- 1 file changed, 193 insertions(+), 25 deletions(-) diff --git a/config/templates/cluster.yaml b/config/templates/cluster.yaml index 65946055b..08bb9d6c1 100644 --- a/config/templates/cluster.yaml +++ b/config/templates/cluster.yaml @@ -59,11 +59,27 @@ kmsKeyArn: "{{.KMSKeyARN}}" # minSize: 1 # maxSize: 3 # rollingUpdateMinInstancesInService: 2 +# # If omitted, public subnets are created by kube-aws and used for controller nodes # subnets: -# - availabilityZone: us-west-1a -# instanceCIDR: "10.0.4.0/24" -# - availabilityZone: us-west-1b -# instanceCIDR: "10.0.5.0/24" +# # References subnets defined under the top-level `subnets` key by their names +# - name: ManagedPublicSubnet1 +# - name: ManagedPublicSubnet2 +# loadBalancer: +# # kube-aws creates the ELB for the k8s API endpoint as `Schema=internet-facing` by default and +# # only public subnets defined under the top-level `subnets` key are used for the ELB +# # private: false +# # +# # If you like specific public subnets to be used by the internet-facing ELB: +# # subnets: +# # - name: ManagedPublicSubnet0 +# +# # When explicitly set to true, the ELB for the k8s API endpoint becomes `Schema=internal` rather than default `Schema=internet-facing` one and +# # only private subnets defined under the top-level `subnets` key are used for the ELB +# # private: true +# # +# # If you like specific private subnets to be used by the internal ELB: +# # subnets: +# # - name: ManagedPrivateSubnet0 # Set to true to put the ELB on the public subnet although having controller nodes on private subnets #controllerLoadBalancerPrivate: false @@ -93,11 +109,10 @@ kmsKeyArn: "{{.KMSKeyARN}}" # minSize: 1 # maxSize: 3 # rollingUpdateMinInstancesInService: 2 +# # If omitted, public subnets are created by kube-aws and used for worker nodes # subnets: -# - availabilityZone: us-west-1a -# instanceCIDR: "10.0.6.0/24" -# - availabilityZone: us-west-1b -# instanceCIDR: "10.0.7.0/24" +# # References subnets defined under the top-level `subnets` key by their names +# - name: ManagedPublicSubnet1 # Setting worker topology private ensures NAT Gateways are created for use in node pools. #workerTopologyPrivate: false @@ -140,11 +155,11 @@ kmsKeyArn: "{{.KMSKeyARN}}" # etcdCount: 1 #etcd: +# # If omitted, public subnets are created by kube-aws and used for etcd nodes # subnets: -# - availabilityZone: us-west-1a -# instanceCIDR: "10.0.2.0/24" -# - availabilityZone: us-west-1b -# instanceCIDR: "10.0.3.0/24" +# # References subnets defined under the top-level `subnets` key by their names +# - name: ManagedPrivateSubnet1 +# - name: ManagedPrivateSubnet2 # Instance type for etcd node # etcdInstanceType: t2.medium @@ -184,8 +199,21 @@ kmsKeyArn: "{{.KMSKeyARN}}" # ID of existing Internet Gateway to associate subnet with. Leave blank to create a new Internet Gateway # internetGatewayId: -# ID of existing route table in existing VPC to attach subnet to. Leave blank to use the VPC's main route table. -# routeTableId: +# Advanced: ID of existing route table in existing VPC to attach subnet to. +# Leave blank to use the VPC's main route table. +# This should be specified if and only if vpcId is specified. +# +# IMPORTANT NOTICE: +# +# If routeTableId is specified, it's your responsibility to add an appropriate route to +# an internet gateway(IGW) or a NAT gateway(NGW) to the route table. +# +# More concretely, +# * If you like to make all the subnets private, pre-configure an NGW yourself and add a route to the NGW beforehand +# * If you like to make all the subnets public, pre-configure an IGW yourself and add a route to the IGW beforehand +# * If you like to mix private and public subnets, omit routeTableId but specify subnets[].routeTable.id per subnet +# +# routeTableId: rtb-xxxxxxxx # CIDR for Kubernetes VPC. If vpcId is specified, must match the CIDR of existing vpc. # vpcCIDR: "10.0.0.0/16" @@ -193,20 +221,160 @@ kmsKeyArn: "{{.KMSKeyARN}}" # CIDR for Kubernetes subnet when placing nodes in a single availability zone (not highly-available) Leave commented out for multi availability zone setting and use the below `subnets` section instead. # instanceCIDR: "10.0.0.0/24" -# Kubernetes subnets with their CIDRs and availability zones. Differentiating availability zone for 2 or more subnets result in high-availability (failures of a single availability zone won't result in immediate downtimes) +# Kubernetes subnets with their CIDRs and availability zones. +# Differentiating availability zone for 2 or more subnets result in high-availability (failures of a single availability zone won't result in immediate downtimes) # subnets: -# - availabilityZone: us-west-1a +# # +# # Managed public subnet managed by kube-aws +# # +# - name: ManagedPublicSubnet1 +# # Set to false if this subnet is public +# # private: false +# availabilityZone: us-west-1a # instanceCIDR: "10.0.0.0/24" -# subnetId: "subnet-xxxxxxxx" #optional -# - availabilityZone: us-west-1b +# +# # +# # Managed private subnet managed by kube-aws +# # +# - name: ManagedPrivateSubnet1 +# # Set to true if this subnet is private +# private: true +# availabilityZone: us-west-1a +# instanceCIDR: "10.0.1.0/24" +# +# # +# # Advanced: Unmanaged/existing public subnet reused but not managed by kube-aws +# # +# # An internet gateway(igw) and a route table contains the route to the igw must have been properly configured by YOU. +# # kube-aws tries to reuse the subnet specified by id or idFromStackOutput but kube-aws never modify the subnet +# # +# - name: ExistingPublicSubnet1 +# # Beware that `availabilityZone` can't be omitted; it must be the one in which the subnet exists. +# availabilityZone: us-west-1a +# # ID of existing subnet to be reused. +# # availabilityZone should still be provided but instanceCIDR can be omitted when id is specified. +# id: "subnet-xxxxxxxx" +# # Exported output's name from another stack +# # Only specify either id or idFromStackOutput but not both +# #idFromStackOutput: myinfra-PublicSubnet1 +# +# # +# # Advanced: Unmanaged/existing private subnet reused but not managed by kube-aws +# # +# # A nat gateway(ngw) and a route table contains the route to the ngw must have been properly configured by YOU. +# # kube-aws tries to reuse the subnet specified by id or idFromStackOutput but kube-aws never modify the subnet +# # +# - name: ExistingPrivateSubnet1 +# # Beware that `availabilityZone` can't be omitted; it must be the one in which the subnet exists. +# availabilityZone: us-west-1a +# # Existing subnet. +# id: "subnet-xxxxxxxx" +# # Exported output's name from another stack +# # Only specify either id or idFromStackOutput but not both +# #idFromStackOutput: myinfra-PrivateSubnet1 +# +# # +# # Advanced: Managed private subnet with an existing NAT gateway +# # +# # kube-aws tries to reuse the ngw specified by id or idFromStackOutput +# # by adding a route to the ngw to a route table managed by kube-aws +# # +# # Please be sure that the NGW is properly deployed. kube-aws will never modify ngw itself. +# # +# - name: ManagedPrivateSubnetWithExistingNGW +# private: true +# availabilityZone: us-west-1a # instanceCIDR: "10.0.1.0/24" -# # natGateway: -# # # Pre-allocated NAT Gateway. Used with private subnets. -# # id: "ngw-abcdef12" -# # # Pre-allocated EIP for NAT Gateways. Used with private subnets. -# # eipAllocationId: "eipalloc-abcdef12" -# # Existing subnet. Beware that `availabilityZone` can't be omitted; it must be the one in which the subnet exists. -# # id: "subnet-xxxxxxxx" #optional +# natGateway: +# id: "ngw-xxxxxxxx" +# # Exported output's name from another stack +# # Only specify either id or idFromStackOutput but not both +# #idFromStackOutput: myinfra-PrivateSubnet1 +# +# # +# # Advanced: Managed private subnet with an existing NAT gateway +# # +# # kube-aws tries to reuse the ngw specified by id or idFromStackOutput +# # by adding a route to the ngw to a route table managed by kube-aws +# # +# # Please be sure that the NGW is properly deployed. kube-aws will never modify ngw itself. +# # For example, kube-aws won't assign a pre-allocated EIP to the existing ngw for you. +# # +# - name: ManagedPrivateSubnetWithExistingNGW +# private: true +# availabilityZone: us-west-1a +# instanceCIDR: "10.0.1.0/24" +# natGateway: +# # Pre-allocated NAT Gateway. Used with private subnets. +# id: "ngw-xxxxxxxx" +# # Exported output's name from another stack +# # Only specify either id or idFromStackOutput but not both +# #idFromStackOutput: myinfra-PrivateSubnet1 +# +# # +# # Advanced: Managed private subnet with an existing EIP for kube-aws managed NGW +# # +# # kube-aws tries to reuse the EIP specified by eipAllocationId +# # by associating the EIP to a NGW managed by kube-aws. +# # Please be sure that kube-aws won't assign an EIP to an existing NGW i.e. +# # either natGateway.id or eipAllocationId can be specified but not both. +# # +# - name: ManagedPrivateSubnetWithManagedNGWWithExistingEIP +# private: true +# availabilityZone: us-west-1a +# instanceCIDR: "10.0.1.0/24" +# natGateway: +# # Pre-allocated EIP for NAT Gateways. Used with private subnets. +# eipAllocationId: eipalloc-xxxxxxxx +# +# # +# # Advanced: Managed private subnet with an existing route table +# # +# # kube-aws tries to reuse the route table specified by id or idFromStackOutput +# # by assigning this subnet to the route table. +# # +# # Please be sure that it's your responsibility to: +# # * Configure an AWS managed NAT or a NAT instance or an another NAT and +# # * Add a route to the NAT to the route table being reused +# # +# # i.e. kube-aws neither modify route table nor create other related resources like +# # ngw, route to nat gateway, eip for ngw, etc. +# # +# - name: ManagedPrivateSubnetWithExistingRouteTable +# private: true +# availabilityZone: us-west-1a +# instanceCIDR: "10.0.1.0/24" +# routeTable: +# # Pre-allocated route table +# id: "rtb-xxxxxxxx" +# # Exported output's name from another stack +# # Only specify either id or idFromStackOutput but not both +# #idFromStackOutput: myinfra-PrivateRouteTable1 +# +# # +# # Advanced: Managed public subnet with an existing route table +# # +# # kube-aws tries to reuse the route table specified by id or idFromStackOutput +# # by assigning this subnet to the route table. +# # +# # Please be sure that it's your responsibility to: +# # * Configure an internet gateway(IGW) and +# # * Attach the IGW to the VPC you're deploying to +# # * Add a route to the IGW to the route table being reused +# # +# # i.e. kube-aws neither modify route table nor create other related resources like +# # igw, route to igw, igw vpc attachment, etc. +# # +# - name: ManagedPublicSubnetWithExistingRouteTable +# availabilityZone: us-west-1a +# instanceCIDR: "10.0.1.0/24" +# routeTable: +# # Pre-allocated route table +# id: "rtb-xxxxxxxx" +# # Exported output's name from another stack +# # Only specify either id or idFromStackOutput but not both +# #idFromStackOutput: myinfra-PublicRouteTable1 + # CIDR for all service IP addresses # serviceCIDR: "10.3.0.0/24"