Skip to content

Commit

Permalink
Merge pull request #12 from marcomarinodev/cluster-status-api
Browse files Browse the repository at this point in the history
cluster status api + tests
  • Loading branch information
gianlucam76 authored May 20, 2024
2 parents a97c69d + 91b1ba4 commit 393757e
Show file tree
Hide file tree
Showing 11 changed files with 543 additions and 68 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,6 @@ manager_pull_policy.yaml-e
manager_auth_proxy_patch.yaml-e

version.txt

# vs code
.vscode
1 change: 0 additions & 1 deletion .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ linters:
- containedctx
- bodyclose
- dogsled
- dupl
- durationcheck
- errcheck
- errname
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ build: generate fmt vet ## Build manager binary.

.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
go run cmd/main.go

.PHONY: docker-build
docker-build: ## Build docker image with the manager.
Expand Down
18 changes: 18 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ func main() {
setupChecks(mgr)

go startClusterController(ctx, mgr, setupLog)
go startClusterSummaryController(mgr)

server.InitializeManagerInstance(ctx, mgr.GetClient(), scheme,
httpPort, ctrl.Log.WithName("gin"))
Expand Down Expand Up @@ -314,3 +315,20 @@ func getSveltosClusterReconciler(mgr manager.Manager) *controller.SveltosCluster
ConcurrentReconciles: concurrentReconciles,
}
}

func startClusterSummaryController(mgr manager.Manager) {
clusterSummaryReconciler := getClusterSummaryReconciler(mgr)
err := clusterSummaryReconciler.SetupWithManager(mgr)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ClusterSummary")
os.Exit(1)
}
}

func getClusterSummaryReconciler(mgr manager.Manager) *controller.ClusterSummaryReconciler {
return &controller.ClusterSummaryReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
ConcurrentReconciles: concurrentReconciles,
}
}
89 changes: 89 additions & 0 deletions internal/controller/clustersummary_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
package controller

import (
"context"

"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

logs "github.com/projectsveltos/libsveltos/lib/logsettings"
"github.com/projectsveltos/ui-backend/internal/server"

configv1alpha1 "github.com/projectsveltos/addon-controller/api/v1alpha1"
)

type ClusterSummaryReconciler struct {
client.Client
Scheme *runtime.Scheme
ConcurrentReconciles int
}

func (r *ClusterSummaryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := ctrl.LoggerFrom(ctx)
logger.V(logs.LogInfo).Info("Reconciling")

clusterSummary := &configv1alpha1.ClusterSummary{}
if err := r.Get(ctx, req.NamespacedName, clusterSummary); err != nil {
if apierrors.IsNotFound(err) {
r.removeClusterSummary(req.Namespace, req.Name, logger)
return reconcile.Result{}, nil
}
logger.Error(err, "Failed to fetch ClusterSummary")
return reconcile.Result{}, errors.Wrapf(
err,
"Failed to fetch ClusterSummary %s",
req.NamespacedName,
)
}

// Handle deleted ClusterSummary
if !clusterSummary.DeletionTimestamp.IsZero() {
r.removeClusterSummary(clusterSummary.Namespace, clusterSummary.Name, logger)
} else {
// Handle non-deleted ClusterSummary
r.reconcileNormal(clusterSummary, logger)
}

return reconcile.Result{}, nil
}

func (r *ClusterSummaryReconciler) removeClusterSummary(clusterSummaryNamespace, clusterSummaryName string, logger logr.Logger) {
logger.V(logs.LogInfo).Info("Reconciling Cluster delete")

manager := server.GetManagerInstance()

manager.RemoveClusterProfileStatus(clusterSummaryNamespace, clusterSummaryName)

logger.V(logs.LogInfo).Info("Reconcile delete success")
}

func (r *ClusterSummaryReconciler) reconcileNormal(clusterSummary *configv1alpha1.ClusterSummary, logger logr.Logger) {
logger.V(logs.LogInfo).Info("Reconciling new ClusterSummary")

manager := server.GetManagerInstance()

manager.AddClusterProfileStatus(clusterSummary)

logger.V(logs.LogInfo).Info("Reconciling new ClusterSummary success")
}

// SetupWithManager sets up the controller with the Manager.
func (r *ClusterSummaryReconciler) SetupWithManager(mgr ctrl.Manager) error {
_, err := ctrl.NewControllerManagedBy(mgr).
For(&configv1alpha1.ClusterSummary{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.ConcurrentReconciles,
}).
Build(r)
if err != nil {
return errors.Wrap(err, "error creating controller")
}

return nil
}
41 changes: 10 additions & 31 deletions internal/server/addons.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,52 +273,31 @@ func addDeployedResourcesForFeature(profileName string,
}
}

func getHelmReleaseInRange(helmReleases []HelmRelease, limit, skip int) ([]HelmRelease, error) {
if len(helmReleases) == 0 {
return helmReleases, nil
}

func getSliceInRange[T any](items []T, limit, skip int) ([]T, error) {
if skip < 0 {
return nil, errors.New("skip cannot be negative")
}
if limit < 0 {
return nil, errors.New("limit cannot be negative")
}
if skip >= len(helmReleases) {
if skip >= len(items) {
return nil, errors.New("skip cannot be greater than or equal to the length of the slice")
}

// Adjust limit based on slice length and skip
adjustedLimit := limit
if skip+limit > len(helmReleases) {
adjustedLimit = len(helmReleases) - skip
if skip+limit > len(items) {
adjustedLimit = len(items) - skip
}

// Use slicing to extract the desired sub-slice
return helmReleases[skip : skip+adjustedLimit], nil
return items[skip : skip+adjustedLimit], nil
}

func getResourcesInRange(resources []Resource, limit, skip int) ([]Resource, error) {
if len(resources) == 0 {
return resources, nil
}

if skip < 0 {
return nil, errors.New("skip cannot be negative")
}
if limit < 0 {
return nil, errors.New("limit cannot be negative")
}
if skip >= len(resources) {
return nil, errors.New("skip cannot be greater than or equal to the length of the slice")
}

// Adjust limit based on slice length and skip
adjustedLimit := limit
if skip+limit > len(resources) {
adjustedLimit = len(resources) - skip
}
func getHelmReleaseInRange(helmReleases []HelmRelease, limit, skip int) ([]HelmRelease, error) {
return getSliceInRange(helmReleases, limit, skip)
}

// Use slicing to extract the desired sub-slice
return resources[skip : skip+adjustedLimit], nil
func getResourcesInRange(resources []Resource, limit, skip int) ([]Resource, error) {
return getSliceInRange(resources, limit, skip)
}
44 changes: 44 additions & 0 deletions internal/server/clustersummary_status.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
package server

import (
"errors"
"fmt"
"sort"
)

// getMapInRange extracts a subset of key-value pairs from the given map, skipping the first 'skip' pairs and then taking up to 'limit' pairs.
func getMapInRange[K comparable, V any](m map[K]V, limit, skip int) (map[K]V, error) {
if skip < 0 {
return nil, errors.New("skip cannot be negative")
}
if limit < 0 {
return nil, errors.New("limit cannot be negative")
}
if skip >= len(m) {
return nil, errors.New("skip cannot be greater than or equal to the length of the map")
}

// Extract keys and sort them
keys := make([]K, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Slice(keys, func(i, j int) bool {
return fmt.Sprintf("%v", keys[i]) < fmt.Sprintf("%v", keys[j])
})

// Create a new map for the result
result := make(map[K]V)

// Iterate over the sorted keys and collect the desired key-value pairs
for i := skip; i < skip+limit && i < len(keys); i++ {
k := keys[i]
result[k] = m[k]
}

return result, nil
}

func getProfileStatusesInRange(profileStatuses map[string][]ClusterFeatureSummary, limit, skip int) (map[string][]ClusterFeatureSummary, error) {
return getMapInRange(profileStatuses, limit, skip)
}
42 changes: 42 additions & 0 deletions internal/server/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,46 @@ var (
// Return JSON response
c.JSON(http.StatusOK, response)
}

getClusterStatus = func(c *gin.Context) {
ginLogger.V(logs.LogDebug).Info("get deployed Kubernetes resources")

limit, skip := getLimitAndSkipFromQuery(c)
namespace, name, clusterType := getClusterFromQuery(c)
ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("cluster %s:%s/%s", clusterType, namespace, name))
ginLogger.V(logs.LogDebug).Info(fmt.Sprintf("limit %d skip %d", limit, skip))

manager := GetManagerInstance()
clusterProfileStatuses := manager.GetClusterProfileStatusesByCluster(&namespace, &name, clusterType)
profiles := make(map[string][]ClusterFeatureSummary, len(clusterProfileStatuses))

for _, clusterProfileStatus := range clusterProfileStatuses {
if _, ok := profiles[*clusterProfileStatus.Name]; !ok {
profiles[*clusterProfileStatus.Name] = make([]ClusterFeatureSummary, 0)
}

for _, summary := range clusterProfileStatus.Summary {
// Add it only if the status is not Provisioned (includes removed as well)
for _, failingClusterSummaryType := range failingClusterSummaryTypes {
if summary.Status == failingClusterSummaryType {
profiles[*clusterProfileStatus.Name] = append(profiles[*clusterProfileStatus.Name], summary)
break
}
}
}
}

result, err := getProfileStatusesInRange(profiles, limit, skip)
if err != nil {
ginLogger.V(logs.LogInfo).Info(fmt.Sprintf("bad request %s: %v", c.Request.URL, err))
_ = c.AbortWithError(http.StatusBadRequest, err)
}

c.JSON(http.StatusOK, gin.H{
"clusterName": name,
"profiles": result,
})
}
)

func (m *instance) start(ctx context.Context, port string, logger logr.Logger) {
Expand All @@ -189,6 +229,8 @@ func (m *instance) start(ctx context.Context, port string, logger logr.Logger) {
r.GET("/helmcharts", getDeployedHelmCharts)
// Return resources deployed in a given managed cluster
r.GET("/resources", getDeployedResources)
// Return the specified cluster status
r.GET("/getClusterStatus", getClusterStatus)

errCh := make(chan error)

Expand Down
Loading

0 comments on commit 393757e

Please sign in to comment.