Skip to content

Commit

Permalink
Manual cherry-pick: fix nil reference when hub-only controllers are d…
Browse files Browse the repository at this point in the history
  • Loading branch information
zyjjay committed Nov 23, 2023
1 parent a5f3904 commit b1c2319
Showing 1 changed file with 140 additions and 0 deletions.
140 changes: 140 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,12 @@ func main() {
hubMgr = getHubManager(mgrOptionsBase, hubMgrHealthAddr, hubCfg, managedCfg)
}

<<<<<<< HEAD
=======
log.Info("Adding controllers to managers")
addControllers(mgrCtx, hubCfg, hubMgr, mgr)

>>>>>>> ed2f1c3 (fix nil reference when hub-only controllers are disabled)
log.Info("Starting the controller managers")

var wg sync.WaitGroup
Expand Down Expand Up @@ -800,3 +806,137 @@ func getFreeLocalAddr() (string, error) {

return fmt.Sprintf("127.0.0.1:%d", l.Addr().(*net.TCPAddr).Port), nil
}

// addControllers sets up all controllers with their respective managers
func addControllers(ctx context.Context, hubCfg *rest.Config, hubMgr manager.Manager, managedMgr manager.Manager) {
// Set up all controllers for manager on managed cluster
var hubClient client.Client

if hubMgr == nil {
hubCache, err := cache.New(
hubCfg, cache.Options{Namespaces: []string{tool.Options.ClusterNamespaceOnHub}, Scheme: scheme},
)
if err != nil {
log.Error(err, "Failed to generate a cache to the hub cluster")
os.Exit(1)
}

go func() {
err := hubCache.Start(ctx)
if err != nil {
log.Error(err, "Failed to start the cache to the hub cluster")
os.Exit(1)
}
}()

hubClient, err = client.New(
hubCfg, client.Options{Scheme: scheme, Cache: &client.CacheOptions{Reader: hubCache}},
)

if err != nil {
log.Error(err, "Failed to generate a client to the hub cluster")
os.Exit(1)
}
} else {
hubClient = hubMgr.GetClient()
}

var kubeClientHub kubernetes.Interface = kubernetes.NewForConfigOrDie(hubCfg)

eventBroadcasterHub := record.NewBroadcaster()

eventBroadcasterHub.StartRecordingToSink(
&corev1.EventSinkImpl{Interface: kubeClientHub.CoreV1().Events(tool.Options.ClusterNamespaceOnHub)},
)

hubRecorder := eventBroadcasterHub.NewRecorder(eventsScheme, v1.EventSource{Component: statussync.ControllerName})

if err := (&statussync.PolicyReconciler{
ClusterNamespaceOnHub: tool.Options.ClusterNamespaceOnHub,
HubClient: hubClient,
HubRecorder: hubRecorder,
ManagedClient: managedMgr.GetClient(),
ManagedRecorder: managedMgr.GetEventRecorderFor(statussync.ControllerName),
Scheme: managedMgr.GetScheme(),
ConcurrentReconciles: int(tool.Options.EvaluationConcurrency),
}).SetupWithManager(managedMgr); err != nil {
log.Error(err, "unable to create controller", "controller", "Policy")
os.Exit(1)
}

depReconciler, depEvents := depclient.NewControllerRuntimeSource()

watcher, err := depclient.New(managedMgr.GetConfig(), depReconciler, nil)
if err != nil {
log.Error(err, "Unable to create dependency watcher")
os.Exit(1)
}

instanceName, _ := os.Hostname() // on an error, instanceName will be empty, which is ok

templateReconciler := &templatesync.PolicyReconciler{
Client: managedMgr.GetClient(),
DynamicWatcher: watcher,
Scheme: managedMgr.GetScheme(),
Config: managedMgr.GetConfig(),
Recorder: managedMgr.GetEventRecorderFor(templatesync.ControllerName),
ClusterNamespace: tool.Options.ClusterNamespace,
Clientset: kubernetes.NewForConfigOrDie(managedMgr.GetConfig()),
InstanceName: instanceName,
DisableGkSync: tool.Options.DisableGkSync,
ConcurrentReconciles: int(tool.Options.EvaluationConcurrency),
}

go func() {
err := watcher.Start(ctx)
if err != nil {
panic(err)
}
}()

// Wait until the dynamic watcher has started.
<-watcher.Started()

if err := templateReconciler.Setup(managedMgr, depEvents); err != nil {
log.Error(err, "Unable to create the controller", "controller", templatesync.ControllerName)
os.Exit(1)
}

// Set up all controllers for manager on hub cluster
if tool.Options.DisableSpecSync {
return
}

var kubeClient kubernetes.Interface = kubernetes.NewForConfigOrDie(managedMgr.GetConfig())

eventBroadcaster := record.NewBroadcaster()

eventBroadcaster.StartRecordingToSink(
&corev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events(tool.Options.ClusterNamespace)},
)

managedRecorder := eventBroadcaster.NewRecorder(eventsScheme, v1.EventSource{Component: specsync.ControllerName})

if err = (&specsync.PolicyReconciler{
HubClient: hubClient,
ManagedClient: managedMgr.GetClient(),
ManagedRecorder: managedRecorder,
Scheme: hubMgr.GetScheme(),
TargetNamespace: tool.Options.ClusterNamespace,
ConcurrentReconciles: int(tool.Options.EvaluationConcurrency),
}).SetupWithManager(hubMgr); err != nil {
log.Error(err, "Unable to create the controller", "controller", specsync.ControllerName)
os.Exit(1)
}

if err = (&secretsync.SecretReconciler{
Client: hubClient,
ManagedClient: managedMgr.GetClient(),
Scheme: hubMgr.GetScheme(),
TargetNamespace: tool.Options.ClusterNamespace,
ConcurrentReconciles: int(tool.Options.EvaluationConcurrency),
}).SetupWithManager(hubMgr); err != nil {
log.Error(err, "Unable to create the controller", "controller", secretsync.ControllerName)
os.Exit(1)
}
}

0 comments on commit b1c2319

Please sign in to comment.