Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle policy dependencies of Gatekeeper types #105

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 77 additions & 38 deletions controllers/templatesync/template_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"regexp"
"sort"
"strings"
"time"

Expand Down Expand Up @@ -1135,71 +1136,112 @@ func (r *PolicyReconciler) cleanUpExcessTemplates(
return errorList.Aggregate()
}

// processDependencies iterates through all dependencies of a template and returns an array of any that are not met
const (
DepFailNoAPIMapping = "Could not find an API mapping for the dependency"
DepFailObjNotFound = "Dependency object not found"
DepFailGet = "Failed to get the dependency object"
DepFailCompNotFound = "Failed to find complianceState on the dependency object"
DepFailWrongCompliance = "Compliance mismatch on the dependency object"
)

// processDependencies iterates through all dependencies of a template and returns an map of
// unmet dependencies to the reason that dependency was not satisfied.
func (r *PolicyReconciler) processDependencies(
ctx context.Context,
dClient dynamic.Interface,
discoveryClient discovery.DiscoveryInterface,
templateDeps map[depclient.ObjectIdentifier]string,
tLogger logr.Logger,
) []depclient.ObjectIdentifier {
var dependencyFailures []depclient.ObjectIdentifier
) map[depclient.ObjectIdentifier]string {
dependencyFailures := make(map[depclient.ObjectIdentifier]string)

for dep := range templateDeps {
depGvk := schema.GroupVersionKind{
Group: dep.Group,
Version: dep.Version,
Kind: dep.Kind,
}

rsrc, _, err := utils.GVRFromGVK(discoveryClient, depGvk)
rsrc, namespaced, err := utils.GVRFromGVK(discoveryClient, dep.GroupVersionKind())
if err != nil {
tLogger.Error(err, "Could not find an API mapping for the dependency", "object", dep)

dependencyFailures = append(dependencyFailures, dep)
dependencyFailures[dep] = DepFailNoAPIMapping
tLogger.Error(err, dependencyFailures[dep], "object", dep)

continue
}

// set up namespace for replicated policy dependencies
ns := dep.Namespace
if ns == "" {
ns = r.ClusterNamespace
}
var res dynamic.ResourceInterface

// query object and compare compliance status to desired
res := dClient.Resource(rsrc).Namespace(ns)
if namespaced {
ns := dep.Namespace
if ns == "" && dep.Group == policiesv1.GroupVersion.Group {
// ocm policies should always be in the cluster namespace
ns = r.ClusterNamespace
}

res = dClient.Resource(rsrc).Namespace(ns)
} else {
res = dClient.Resource(rsrc)
}

depObj, err := res.Get(ctx, dep.Name, metav1.GetOptions{})
if err != nil {
tLogger.Info("Failed to get dependency object", "object", dep)
if k8serrors.IsNotFound(err) {
mprahl marked this conversation as resolved.
Show resolved Hide resolved
if dep.Group == utils.GvkConstraintTemplate.Group && templateDeps[dep] != "Compliant" {
mprahl marked this conversation as resolved.
Show resolved Hide resolved
tLogger.V(1).Info("ConstraintTemplate 'NonCompliant' dependency satisfied", "object", dep)

dependencyFailures = append(dependencyFailures, dep)
} else {
continue
}

dependencyFailures[dep] = DepFailObjNotFound

tLogger.V(1).Info("Dependency not satisfied", "reason", DepFailObjNotFound, "object", dep)

continue
} else if err != nil {
dependencyFailures[dep] = DepFailGet

tLogger.Error(err, DepFailGet, "object", dep)

continue
}

switch dep.Group {
case utils.GvkConstraintTemplate.Group:
if templateDeps[dep] != "Compliant" {
// The ConstraintTemplate was found, but the policy wants it to not be found
dependencyFailures[dep] = DepFailWrongCompliance
}
case utils.GConstraint:
violations, found, err := unstructured.NestedInt64(depObj.Object, "status", "totalViolations")
mprahl marked this conversation as resolved.
Show resolved Hide resolved
if err != nil || !found {
// Note that not finding the field is *not* considered "Compliant"
dependencyFailures[dep] = DepFailCompNotFound
} else if (violations == 0) != (templateDeps[dep] == "Compliant") {
dependencyFailures[dep] = DepFailWrongCompliance
}
default:
depCompliance, found, err := unstructured.NestedString(depObj.Object, "status", "compliant")
if err != nil || !found {
tLogger.Info("Failed to get compliance for dependency object", "object", dep)

dependencyFailures = append(dependencyFailures, dep)
dependencyFailures[dep] = DepFailCompNotFound
} else if depCompliance != templateDeps[dep] {
tLogger.Info("Compliance mismatch for dependency object", "object", dep)

dependencyFailures = append(dependencyFailures, dep)
dependencyFailures[dep] = DepFailWrongCompliance
}
}

if reason, failed := dependencyFailures[dep]; failed {
tLogger.V(1).Info("Dependency not satisfied", "reason", reason, "object", dep)
} else {
tLogger.V(1).Info("Dependency satisfied", "object", dep)
}
}

return dependencyFailures
}

// generatePendingMsg formats the list of failed dependencies into a readable error.
// Example: `Dependencies were not satisfied: 1 is still pending (FooPolicy foo)`
mprahl marked this conversation as resolved.
Show resolved Hide resolved
func generatePendingMsg(dependencyFailures []depclient.ObjectIdentifier) string {
names := make([]string, len(dependencyFailures))
for i, dep := range dependencyFailures {
names[i] = fmt.Sprintf("%s %s", dep.Kind, dep.Name)
func generatePendingMsg(dependencyFailures map[depclient.ObjectIdentifier]string) string {
names := make([]string, 0, len(dependencyFailures))
for dep := range dependencyFailures {
names = append(names, fmt.Sprintf("%s %s", dep.Kind, dep.Name))
JustinKuli marked this conversation as resolved.
Show resolved Hide resolved
}

sort.Strings(names)

nameStr := strings.Join(names, ", ")

fmtStr := "Dependencies were not satisfied: %d are still pending (%s)"
Expand Down Expand Up @@ -1523,13 +1565,10 @@ func finalizerCleanup(
continue
}

// Instantiate a dynamic client
res := dClient.Resource(rsrc)

// Delete clusterwide objects
if !namespaced {
// Delete object, ignoring not found errors
err := res.Delete(ctx, tName, metav1.DeleteOptions{})
err := dClient.Resource(rsrc).Delete(ctx, tName, metav1.DeleteOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
policySystemErrorsCounter.WithLabelValues(pol.Name, tName, "delete-error").Inc()

Expand Down
209 changes: 209 additions & 0 deletions test/e2e/case17_gatekeeper_sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,4 +495,213 @@ var _ = Describe("Test Gatekeeper ConstraintTemplate and constraint sync", Order
"", false, defaultTimeoutSeconds,
)
})

Describe("Test policy ordering with gatekeeper objects", func() {
const (
waitForTemplateName = "case17-gk-dep-on-tmpl"
waitForTemplateYaml = yamlBasePath + waitForTemplateName + ".yaml"
waitForConstraintName = "case17-gk-dep-on-constraint"
waitForConstraintYaml = yamlBasePath + waitForConstraintName + ".yaml"
)

BeforeAll(func(ctx context.Context) {
By("Deleting any ConfigMaps in the test namespace, to prevent any initial violations")
err := clientManaged.CoreV1().ConfigMaps(configMapNamespace).DeleteCollection(
ctx, metav1.DeleteOptions{}, metav1.ListOptions{},
)
Expect(err).ToNot(HaveOccurred())
})

AfterAll(func() {
for _, pName := range []string{waitForTemplateName, waitForConstraintName} {
By("Deleting policy " + pName + " on the hub in ns:" + clusterNamespaceOnHub)
err := clientHubDynamic.Resource(gvrPolicy).Namespace(clusterNamespaceOnHub).Delete(
context.TODO(), pName, metav1.DeleteOptions{},
)
if !k8serrors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}

By("Cleaning up the events for the policy " + pName)
_, err = kubectlManaged(
"delete",
"events",
"-n",
clusterNamespace,
"--field-selector=involvedObject.name="+pName,
"--ignore-not-found",
)
Expect(err).ToNot(HaveOccurred())
}
})

It("should not progress until the ConstraintTemplate is created", func() {
By("Creating policy " + waitForTemplateName + " on the hub in ns:" + clusterNamespaceOnHub)
_, err := kubectlHub("apply", "-f", waitForTemplateYaml, "-n", clusterNamespaceOnHub)
Expect(err).ShouldNot(HaveOccurred())
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
waitForTemplateName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)).NotTo(BeNil())

By("Checking that the configuration policy is not found, because it should be pending")
Consistently(func() interface{} {
return propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrConfigurationPolicy,
waitForTemplateName,
clusterNamespace,
false,
defaultTimeoutSeconds)
}, 10, 1).Should(BeNil())

By("Creating the policy with the ConstraintTemplate")
_, err = kubectlHub("apply", "-f", policyYaml, "-n", clusterNamespaceOnHub)
Expect(err).ShouldNot(HaveOccurred())
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
policyName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)).NotTo(BeNil())

By("Checking that the configuration policy can now be found")
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrConfigurationPolicy,
waitForTemplateName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)).NotTo(BeNil())
})

It("should progress initially when the constraint has no violations", func() {
By("Verifying that the policy status of the constraint is compliant")
Eventually(func(g Gomega) {
plc := propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
policyName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)

compliance, found, err := unstructured.NestedString(plc.Object, "status", "compliant")
g.Expect(err).ToNot(HaveOccurred())
g.Expect(found).To(BeTrue())
g.Expect(compliance).To(Equal("Compliant"))
}, gkAuditFrequency*3, 1).Should(Succeed())

By("Creating policy " + waitForConstraintName + " on the hub in ns:" + clusterNamespaceOnHub)
_, err := kubectlHub("apply", "-f", waitForConstraintYaml, "-n", clusterNamespaceOnHub)
Expect(err).ShouldNot(HaveOccurred())
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
waitForConstraintName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)).NotTo(BeNil())

By("Checking that the configuration policy is found; the policy is not pending")
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrConfigurationPolicy,
waitForConstraintName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)).NotTo(BeNil())
})

It("should become Pending when there are violations on the constraint", func() {
By("Adding a ConfigMap that violates the constraint")
configMap := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Namespace: configMapNamespace,
},
}

_, err := clientManaged.CoreV1().ConfigMaps(configMapNamespace).Create(
context.TODO(), configMap, metav1.CreateOptions{},
)
Expect(err).ToNot(HaveOccurred())

By("Waiting for policy status of the constraint to be noncompliant")
Eventually(func(g Gomega) {
plc := propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
policyName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)

compliance, found, err := unstructured.NestedString(plc.Object, "status", "compliant")
g.Expect(err).ToNot(HaveOccurred())
g.Expect(found).To(BeTrue())
g.Expect(compliance).To(Equal("NonCompliant"))
}, gkAuditFrequency*3, 1).Should(Succeed())

By("Checking that the configuration policy is not found, because it should be pending")
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrConfigurationPolicy,
waitForConstraintName,
clusterNamespace,
false,
defaultTimeoutSeconds,
)).To(BeNil())
})

It("should progress again when the violations are addressed", func() {
By("Deleting the ConfigMap causing the violation")
err := clientManaged.CoreV1().ConfigMaps(configMapNamespace).Delete(
context.TODO(), configMapName, metav1.DeleteOptions{},
)
Expect(err).ToNot(HaveOccurred())

By("Waiting for policy status of the constraint to be compliant")
Eventually(func(g Gomega) {
plc := propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
policyName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)

compliance, found, err := unstructured.NestedString(plc.Object, "status", "compliant")
g.Expect(err).ToNot(HaveOccurred())
g.Expect(found).To(BeTrue())
g.Expect(compliance).To(Equal("Compliant"))
}, gkAuditFrequency*3, 1).Should(Succeed())

By("Checking that the configuration policy is found; the policy is no longer pending")
Expect(propagatorutils.GetWithTimeout(
clientManagedDynamic,
gvrConfigurationPolicy,
waitForConstraintName,
clusterNamespace,
true,
defaultTimeoutSeconds,
)).NotTo(BeNil())
})
})
})
Loading