From c705400955aa9225c8f6b50de50fbee3f0e0811f Mon Sep 17 00:00:00 2001 From: Colin Hom Date: Tue, 24 May 2016 14:58:19 -0700 Subject: [PATCH 1/3] config: use Subnets for existing vpc validation --- multi-node/aws/pkg/config/config.go | 30 +++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/multi-node/aws/pkg/config/config.go b/multi-node/aws/pkg/config/config.go index 2e445e828e..e0cd330fb6 100644 --- a/multi-node/aws/pkg/config/config.go +++ b/multi-node/aws/pkg/config/config.go @@ -570,10 +570,7 @@ func (c *Cluster) ValidateExistingVPC(existingVPCCIDR string, existingSubnetCIDR ) } } - _, instanceNet, err := net.ParseCIDR(c.InstanceCIDR) - if err != nil { - return fmt.Errorf("error parsing instances cidr %s : %v", c.InstanceCIDR, err) - } + _, vpcNet, err := net.ParseCIDR(c.VPCCIDR) if err != nil { return fmt.Errorf("error parsing vpc cidr %s: %v", c.VPCCIDR, err) @@ -588,14 +585,23 @@ func (c *Cluster) ValidateExistingVPC(existingVPCCIDR string, existingSubnetCIDR ) } - //Loop through all existing subnets in the VPC and look for conflicting CIDRS - for _, existingSubnet := range existingSubnets { - if cidrOverlap(instanceNet, existingSubnet) { - return fmt.Errorf( - "instance cidr (%s) conflicts with existing subnet cidr=%s", - instanceNet, - existingSubnet, - ) + // Loop through all subnets + // Note: legacy instanceCIDR/availabilityZone stuff has already been marshalled into this format + for _, subnet := range c.Subnets { + _, instanceNet, err := net.ParseCIDR(subnet.InstanceCIDR) + if err != nil { + return fmt.Errorf("error parsing instances cidr %s : %v", c.InstanceCIDR, err) + } + + //Loop through all existing subnets in the VPC and look for conflicting CIDRS + for _, existingSubnet := range existingSubnets { + if cidrOverlap(instanceNet, existingSubnet) { + return fmt.Errorf( + "instance cidr (%s) conflicts with existing subnet cidr=%s", + instanceNet, + existingSubnet, + ) + } } } From d470f08f2d334dccf04d7cd1b7666a6f86db51a9 Mon Sep 17 00:00:00 2001 From: Colin Hom Date: Tue, 24 May 2016 19:41:30 -0700 Subject: [PATCH 2/3] config: add omitempty yaml tags for Cluster --- multi-node/aws/pkg/config/config.go | 64 ++++++++++++++--------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/multi-node/aws/pkg/config/config.go b/multi-node/aws/pkg/config/config.go index e0cd330fb6..1a166d5c72 100644 --- a/multi-node/aws/pkg/config/config.go +++ b/multi-node/aws/pkg/config/config.go @@ -98,41 +98,41 @@ func ClusterFromBytes(data []byte) (*Cluster, error) { } type Cluster struct { - ClusterName string `yaml:"clusterName"` - ExternalDNSName string `yaml:"externalDNSName"` - KeyName string `yaml:"keyName"` - Region string `yaml:"region"` - AvailabilityZone string `yaml:"availabilityZone"` - ReleaseChannel string `yaml:"releaseChannel"` - ControllerInstanceType string `yaml:"controllerInstanceType"` - ControllerRootVolumeSize int `yaml:"controllerRootVolumeSize"` - WorkerCount int `yaml:"workerCount"` - WorkerInstanceType string `yaml:"workerInstanceType"` - WorkerRootVolumeSize int `yaml:"workerRootVolumeSize"` - WorkerSpotPrice string `yaml:"workerSpotPrice"` - VPCID string `yaml:"vpcId"` - RouteTableID string `yaml:"routeTableId"` - VPCCIDR string `yaml:"vpcCIDR"` - InstanceCIDR string `yaml:"instanceCIDR"` - ControllerIP string `yaml:"controllerIP"` - PodCIDR string `yaml:"podCIDR"` - ServiceCIDR string `yaml:"serviceCIDR"` - DNSServiceIP string `yaml:"dnsServiceIP"` - K8sVer string `yaml:"kubernetesVersion"` - HyperkubeImageRepo string `yaml:"hyperkubeImageRepo"` - KMSKeyARN string `yaml:"kmsKeyArn"` - CreateRecordSet bool `yaml:"createRecordSet"` - RecordSetTTL int `yaml:"recordSetTTL"` - HostedZone string `yaml:"hostedZone"` - HostedZoneID string `yaml:"hostedZoneId"` - StackTags map[string]string `yaml:"stackTags"` - UseCalico bool `yaml:"useCalico"` - Subnets []Subnet `yaml:"subnets"` + ClusterName string `yaml:"clusterName,omitempty"` + ExternalDNSName string `yaml:"externalDNSName,omitempty"` + KeyName string `yaml:"keyName,omitempty"` + Region string `yaml:"region,omitempty"` + AvailabilityZone string `yaml:"availabilityZone,omitempty"` + ReleaseChannel string `yaml:"releaseChannel,omitempty"` + ControllerInstanceType string `yaml:"controllerInstanceType,omitempty"` + ControllerRootVolumeSize int `yaml:"controllerRootVolumeSize,omitempty"` + WorkerCount int `yaml:"workerCount,omitempty"` + WorkerInstanceType string `yaml:"workerInstanceType,omitempty"` + WorkerRootVolumeSize int `yaml:"workerRootVolumeSize,omitempty"` + WorkerSpotPrice string `yaml:"workerSpotPrice,omitempty"` + VPCID string `yaml:"vpcId,omitempty"` + RouteTableID string `yaml:"routeTableId,omitempty"` + VPCCIDR string `yaml:"vpcCIDR,omitempty"` + InstanceCIDR string `yaml:"instanceCIDR,omitempty"` + ControllerIP string `yaml:"controllerIP,omitempty"` + PodCIDR string `yaml:"podCIDR,omitempty"` + ServiceCIDR string `yaml:"serviceCIDR,omitempty"` + DNSServiceIP string `yaml:"dnsServiceIP,omitempty"` + K8sVer string `yaml:"kubernetesVersion,omitempty"` + HyperkubeImageRepo string `yaml:"hyperkubeImageRepo,omitempty"` + KMSKeyARN string `yaml:"kmsKeyArn,omitempty"` + CreateRecordSet bool `yaml:"createRecordSet,omitempty"` + RecordSetTTL int `yaml:"recordSetTTL,omitempty"` + HostedZone string `yaml:"hostedZone,omitempty"` + HostedZoneID string `yaml:"hostedZoneId,omitempty"` + StackTags map[string]string `yaml:"stackTags,omitempty"` + UseCalico bool `yaml:"useCalico,omitempty"` + Subnets []Subnet `yaml:"subnets,omitempty"` } type Subnet struct { - AvailabilityZone string `yaml:"availabilityZone"` - InstanceCIDR string `yaml:"instanceCIDR"` + AvailabilityZone string `yaml:"availabilityZone,omitempty"` + InstanceCIDR string `yaml:"instanceCIDR,omitempty"` } const ( From 43e7ac37bf713ac27f4191361cdd322a9e420827 Mon Sep 17 00:00:00 2001 From: Colin Hom Date: Tue, 24 May 2016 18:50:18 -0700 Subject: [PATCH 3/3] cluster: unit tests for multi-subnet / existing vpc --- multi-node/aws/pkg/cluster/cluster_test.go | 61 +++++++++++++++++++--- 1 file changed, 53 insertions(+), 8 deletions(-) diff --git a/multi-node/aws/pkg/cluster/cluster_test.go b/multi-node/aws/pkg/cluster/cluster_test.go index b2cd77c3b1..3ee77e10ac 100644 --- a/multi-node/aws/pkg/cluster/cluster_test.go +++ b/multi-node/aws/pkg/cluster/cluster_test.go @@ -11,16 +11,44 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/route53" "github.com/coreos/coreos-kubernetes/multi-node/aws/pkg/config" + yaml "gopkg.in/yaml.v2" ) -const minimalConfigYaml = ` +/* +TODO(colhom): when we fully deprecate instanceCIDR/availabilityZone, this block of +logic will go away and be replaced by a single constant string +*/ +func defaultConfigValues(t *testing.T, configYaml string) string { + defaultYaml := ` externalDNSName: test.staging.core-os.net keyName: test-key-name region: us-west-1 -availabilityZone: us-west-1c clusterName: test-cluster-name kmsKeyArn: "arn:aws:kms:us-west-1:xxxxxxxxx:key/xxxxxxxxxxxxxxxxxxx" ` + yamlStr := defaultYaml + configYaml + + c := config.Cluster{} + if err := yaml.Unmarshal([]byte(yamlStr), &c); err != nil { + t.Errorf("failed umarshalling config yaml: %v :\n%s", err, yamlStr) + } + + if len(c.Subnets) > 0 { + for i := range c.Subnets { + c.Subnets[i].AvailabilityZone = fmt.Sprintf("dummy-az-%d", i) + } + } else { + //Legacy behavior + c.AvailabilityZone = "dummy-az-0" + } + + out, err := yaml.Marshal(&c) + if err != nil { + t.Errorf("error marshalling cluster: %v", err) + } + + return string(out) +} type VPC struct { cidr string @@ -103,6 +131,14 @@ vpcCIDR: 192.168.1.0/24 vpcId: vpc-xxx2 instanceCIDR: 192.168.1.50/28 controllerIP: 192.168.1.50 +`, ` +vpcCIDR: 192.168.1.0/24 +vpcId: vpc-xxx2 +controllerIP: 192.168.1.5 +subnets: + - instanceCIDR: 192.168.1.0/28 + - instanceCIDR: 192.168.1.32/28 + - instanceCIDR: 192.168.1.64/28 `, } @@ -131,6 +167,14 @@ instanceCIDR: 192.168.1.100/26 #instance cidr conflicts with existing subnet controllerIP: 192.168.1.80 vpcId: vpc-xxx2 routeTableId: rtb-xxxxxx +`, ` +vpcCIDR: 192.168.1.0/24 +controllerIP: 192.168.1.80 +vpcId: vpc-xxx2 +routeTableId: rtb-xxxxxx +subnets: + - instanceCIDR: 192.168.1.100/26 #instance cidr conflicts with existing subnet + - instanceCIDR: 192.168.1.0/26 `, } @@ -156,7 +200,7 @@ routeTableId: rtb-xxxxxx } validateCluster := func(networkConfig string) error { - configBody := minimalConfigYaml + networkConfig + configBody := defaultConfigValues(t, networkConfig) clusterConfig, err := config.ClusterFromBytes([]byte(configBody)) if err != nil { t.Errorf("could not get valid cluster config: %v", err) @@ -185,7 +229,7 @@ routeTableId: rtb-xxxxxx func TestValidateKeyPair(t *testing.T) { - clusterConfig, err := config.ClusterFromBytes([]byte(minimalConfigYaml)) + clusterConfig, err := config.ClusterFromBytes([]byte(defaultConfigValues(t, ""))) if err != nil { t.Errorf("could not get valid cluster config: %v", err) } @@ -241,6 +285,7 @@ func (r53 dummyR53Service) ListResourceRecordSets(input *route53.ListResourceRec } return output, nil } + func (r53 dummyR53Service) GetHostedZone(input *route53.GetHostedZoneInput) (*route53.GetHostedZoneOutput, error) { for _, zone := range r53.HostedZones { if zone.Id == aws.StringValue(input.Id) { @@ -252,9 +297,9 @@ func (r53 dummyR53Service) GetHostedZone(input *route53.GetHostedZoneInput) (*ro }, nil } } - return nil, fmt.Errorf("dummy route53 service: no hosted zone with id '%s'", aws.StringValue(input.Id)) } + func TestValidateDNSConfig(t *testing.T) { r53 := dummyR53Service{ HostedZones: []Zone{ @@ -321,7 +366,7 @@ hostedZone: unicorns.core-os.net #non-existant hostedZone DNS name } for _, validConfig := range validDNSConfigs { - configBody := minimalConfigYaml + validConfig + configBody := defaultConfigValues(t, validConfig) clusterConfig, err := config.ClusterFromBytes([]byte(configBody)) if err != nil { t.Errorf("could not get valid cluster config: %v", err) @@ -335,7 +380,7 @@ hostedZone: unicorns.core-os.net #non-existant hostedZone DNS name } for _, invalidConfig := range invalidDNSConfigs { - configBody := minimalConfigYaml + invalidConfig + configBody := defaultConfigValues(t, invalidConfig) clusterConfig, err := config.ClusterFromBytes([]byte(configBody)) if err != nil { t.Errorf("could not get valid cluster config: %v", err) @@ -427,7 +472,7 @@ stackTags: } for _, testCase := range testCases { - configBody := minimalConfigYaml + testCase.clusterYaml + configBody := defaultConfigValues(t, testCase.clusterYaml) clusterConfig, err := config.ClusterFromBytes([]byte(configBody)) if err != nil { t.Errorf("could not get valid cluster config: %v", err)