diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 9bcf28b598..96c04c2e50 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -96,3 +96,4 @@ vrrp-password watch-namespace weak-stable-jobs whitelist-override-label +config-file-path diff --git a/ingress/controllers/gce/controller/cluster_manager.go b/ingress/controllers/gce/controller/cluster_manager.go index be3e899f54..860c8e20ad 100644 --- a/ingress/controllers/gce/controller/cluster_manager.go +++ b/ingress/controllers/gce/controller/cluster_manager.go @@ -18,7 +18,9 @@ package controller import ( "fmt" + "io" "net/http" + "os" "time" "k8s.io/contrib/ingress/controllers/gce/backends" @@ -184,13 +186,13 @@ func defaultInstanceGroupName(clusterName string) string { return fmt.Sprintf("%v-%v", instanceGroupPrefix, clusterName) } -func getGCEClient() *gce.GCECloud { +func getGCEClient(config io.Reader) *gce.GCECloud { // Creating the cloud interface involves resolving the metadata server to get // an oauth token. If this fails, the token provider assumes it's not on GCE. // No errors are thrown. So we need to keep retrying till it works because // we know we're on GCE. for { - cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil) + cloudInterface, err := cloudprovider.GetCloudProvider("gce", config) if err == nil { cloud := cloudInterface.(*gce.GCECloud) @@ -217,15 +219,28 @@ func getGCEClient() *gce.GCECloud { // the kubernetes Service that serves the 404 page if no urls match. // - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz" func NewClusterManager( + configFilePath string, name string, defaultBackendNodePort int64, defaultHealthCheckPath string) (*ClusterManager, error) { + var config *os.File + var err error + if configFilePath != "" { + glog.Infof("Reading config from path %v", configFilePath) + config, err = os.Open(configFilePath) + if err != nil { + return nil, err + } + defer config.Close() + } + // TODO: Make this more resilient. Currently we create the cloud client // and pass it through to all the pools. This makes unittesting easier. // However if the cloud client suddenly fails, we should try to re-create it // and continue. - cloud := getGCEClient() + cloud := getGCEClient(config) + glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath) // Names are fundamental to the cluster, the uid allocator makes sure names don't collide. cluster := ClusterManager{ClusterNamer: &utils.Namer{name}} diff --git a/ingress/controllers/gce/main.go b/ingress/controllers/gce/main.go index 6095990e57..f4199147f8 100644 --- a/ingress/controllers/gce/main.go +++ b/ingress/controllers/gce/main.go @@ -110,6 +110,13 @@ var ( verbose = flags.Bool("verbose", false, `If true, logs are displayed at V(4), otherwise V(2).`) + + configFilePath = flags.String("config-file-path", "", + `Path to a file containing the gce config. If left unspecified this + controller only works with default zones.`) + + healthzPort = flags.Int("healthz-port", lbApiPort, + `Port to run healthz server. Must match the health check port in yaml.`) ) func registerHandlers(lbc *controller.LoadBalancerController) { @@ -127,7 +134,7 @@ func registerHandlers(lbc *controller.LoadBalancerController) { lbc.Stop(true) }) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", lbApiPort), nil)) + glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", *healthzPort), nil)) } func handleSigterm(lbc *controller.LoadBalancerController, deleteAll bool) { @@ -196,7 +203,7 @@ func main() { if err != nil { glog.Fatalf("%v", err) } - clusterManager, err = controller.NewClusterManager(name, defaultBackendNodePort, *healthCheckPath) + clusterManager, err = controller.NewClusterManager(*configFilePath, name, defaultBackendNodePort, *healthCheckPath) if err != nil { glog.Fatalf("%v", err) }