Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OCPEDGE-1000: feat: implement thick provisioning #596

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ kustomize: ## Download kustomize locally if necessary.

ENVTEST = $(shell pwd)/bin/setup-envtest
envtest: ## Download envtest-setup locally if necessary.
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@bf15e44028f908c790721fc8fe67c7bf2d06a611)

JSONNET = $(shell pwd)/bin/jsonnet
jsonnet: ## Download jsonnet locally if necessary.
Expand Down
7 changes: 7 additions & 0 deletions api/v1alpha1/lvmcluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ var _ = Describe("webhook acceptance tests", func() {
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})

It("minimum viable create for ReadWriteMany configuration", func(ctx SpecContext) {
resource := defaultLVMClusterInUniqueNamespace(ctx)
resource.Spec.Storage.DeviceClasses[0].ThinPoolConfig = nil
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})

It("duplicate LVMClusters get rejected", func(ctx SpecContext) {
generatedName := generateUniqueNameForTestCase(ctx)
GinkgoT().Setenv(cluster.OperatorNamespaceEnvVar, generatedName)
Expand Down
5 changes: 2 additions & 3 deletions api/v1alpha1/lvmcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,8 @@ type DeviceClass struct {
NodeSelector *corev1.NodeSelector `json:"nodeSelector,omitempty"`

// ThinPoolConfig contains configurations for the thin-pool
// +kubebuilder:validation:Required
// +required
ThinPoolConfig *ThinPoolConfig `json:"thinPoolConfig"`
// +optional
ThinPoolConfig *ThinPoolConfig `json:"thinPoolConfig,omitempty"`

// Default is a flag to indicate whether the device-class is the default
// +optional
Expand Down
5 changes: 2 additions & 3 deletions api/v1alpha1/lvmvolumegroup_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,8 @@ type LVMVolumeGroupSpec struct {
NodeSelector *corev1.NodeSelector `json:"nodeSelector,omitempty"`

// ThinPoolConfig contains configurations for the thin-pool
// +kubebuilder:validation:Required
// +required
ThinPoolConfig *ThinPoolConfig `json:"thinPoolConfig"`
// +optional
ThinPoolConfig *ThinPoolConfig `json:"thinPoolConfig,omitempty"`

// Default is a flag to indicate whether the device-class is the default
// +optional
Expand Down
2 changes: 0 additions & 2 deletions bundle/manifests/lvm.topolvm.io_lvmclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,6 @@ spec:
- name
- overprovisionRatio
type: object
required:
- thinPoolConfig
type: object
type: array
type: object
Expand Down
2 changes: 0 additions & 2 deletions bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,6 @@ spec:
- name
- overprovisionRatio
type: object
required:
- thinPoolConfig
type: object
status:
description: LVMVolumeGroupStatus defines the observed state of LVMVolumeGroup
Expand Down
2 changes: 0 additions & 2 deletions config/crd/bases/lvm.topolvm.io_lvmclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -207,8 +207,6 @@ spec:
- name
- overprovisionRatio
type: object
required:
- thinPoolConfig
type: object
type: array
type: object
Expand Down
2 changes: 0 additions & 2 deletions config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,6 @@ spec:
- name
- overprovisionRatio
type: object
required:
- thinPoolConfig
type: object
status:
description: LVMVolumeGroupStatus defines the observed state of LVMVolumeGroup
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,9 @@ func getTopolvmSnapshotClasses(lvmCluster *lvmv1alpha1.LVMCluster) []*snapapi.Vo
var vsc []*snapapi.VolumeSnapshotClass

for _, deviceClass := range lvmCluster.Spec.Storage.DeviceClasses {
if deviceClass.ThinPoolConfig == nil {
continue
}
snapshotClass := &snapapi.VolumeSnapshotClass{
ObjectMeta: metav1.ObjectMeta{
Name: GetVolumeSnapshotClassName(deviceClass.Name),
Expand Down
11 changes: 8 additions & 3 deletions internal/controllers/lvmcluster/resource/vgmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ func (v vgManager) EnsureCreated(r Reconciler, ctx context.Context, lvmCluster *
r.GetVGManagerCommand(),
r.GetLogPassthroughOptions().VGManager.AsArgs(),
)
if err := ctrl.SetControllerReference(lvmCluster, &dsTemplate, r.Scheme()); err != nil {
return fmt.Errorf("failed to set controller reference on vgManager daemonset %q. %v", dsTemplate.Name, err)
}

// create desired daemonset or update mutable fields on existing one
ds := &appsv1.DaemonSet{
Expand All @@ -66,14 +69,16 @@ func (v vgManager) EnsureCreated(r Reconciler, ctx context.Context, lvmCluster *
// the anonymous mutate function modifies the daemonset object after fetching it.
// if the daemonset does not already exist, it creates it, otherwise, it updates it
result, err := ctrl.CreateOrUpdate(ctx, r, ds, func() error {
if err := ctrl.SetControllerReference(lvmCluster, ds, r.Scheme()); err != nil {
return fmt.Errorf("failed to set controller reference on vgManager daemonset %q. %v", dsTemplate.Name, err)
}
// at creation, deep copy the whole daemonset
if ds.CreationTimestamp.IsZero() {
dsTemplate.DeepCopyInto(ds)
return nil
}

if err := ctrl.SetControllerReference(lvmCluster, ds, r.Scheme()); err != nil {
return fmt.Errorf("failed to set controller reference on vgManager daemonset %q. %v", dsTemplate.Name, err)
}

// if update, update only mutable fields
initMapIfNil(&ds.ObjectMeta.Labels)
initMapIfNil(&ds.Spec.Template.ObjectMeta.Labels)
Expand Down
13 changes: 7 additions & 6 deletions internal/controllers/lvmcluster/resource/vgmanager_daemonset.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,12 +329,13 @@ func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace, vgImag
},

Spec: corev1.PodSpec{
PriorityClassName: constants.PriorityClassNameUserCritical,
Volumes: volumes,
Containers: containers,
HostPID: true,
Tolerations: tolerations,
ServiceAccountName: constants.VGManagerServiceAccount,
TerminationGracePeriodSeconds: ptr.To(int64(30)),
PriorityClassName: constants.PriorityClassNameUserCritical,
Volumes: volumes,
Containers: containers,
HostPID: true,
Tolerations: tolerations,
ServiceAccountName: constants.VGManagerServiceAccount,
},
},
},
Expand Down
66 changes: 38 additions & 28 deletions internal/controllers/vgmanager/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/openshift/lvm-operator/internal/controllers/vgmanager/lvm"
"github.com/openshift/lvm-operator/internal/controllers/vgmanager/lvmd"
"github.com/openshift/lvm-operator/internal/controllers/vgmanager/wipefs"
"k8s.io/utils/ptr"

corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -227,14 +228,17 @@ func (r *Reconciler) reconcile(

logger.V(1).Info("no new available devices discovered, verifying existing setup")

// since the last reconciliation there could have been corruption on the LVs, so we need to verify them again
if err := r.validateLVs(ctx, volumeGroup); err != nil {
err := fmt.Errorf("error while validating logical volumes in existing volume group: %w", err)
r.WarningEvent(ctx, volumeGroup, EventReasonErrorInconsistentLVs, err)
if _, err := r.setVolumeGroupFailedStatus(ctx, volumeGroup, vgs, devices, err); err != nil {
logger.Error(err, "failed to set status to failed")
// If we are provisioning a thin pool, we need to verify that the thin pool and its LVs are in a consistent state
if volumeGroup.Spec.ThinPoolConfig != nil {
// since the last reconciliation there could have been corruption on the LVs, so we need to verify them again
if err := r.validateLVs(ctx, volumeGroup); err != nil {
err := fmt.Errorf("error while validating logical volumes in existing volume group: %w", err)
r.WarningEvent(ctx, volumeGroup, EventReasonErrorInconsistentLVs, err)
if _, err := r.setVolumeGroupFailedStatus(ctx, volumeGroup, vgs, devices, err); err != nil {
logger.Error(err, "failed to set status to failed")
}
return ctrl.Result{}, err
}
return ctrl.Result{}, err
}

if err := r.applyLVMDConfig(ctx, volumeGroup, vgs, devices); err != nil {
Expand Down Expand Up @@ -272,23 +276,24 @@ func (r *Reconciler) reconcile(
}

// Create thin pool
if err = r.addThinPoolToVG(ctx, volumeGroup.Name, volumeGroup.Spec.ThinPoolConfig); err != nil {
err := fmt.Errorf("failed to create thin pool %s for volume group %s: %w", volumeGroup.Spec.ThinPoolConfig.Name, volumeGroup.Name, err)
r.WarningEvent(ctx, volumeGroup, EventReasonErrorThinPoolCreateOrExtendFailed, err)
if _, err := r.setVolumeGroupFailedStatus(ctx, volumeGroup, vgs, devices, err); err != nil {
logger.Error(err, "failed to set status to failed")
if volumeGroup.Spec.ThinPoolConfig != nil {
if err = r.addThinPoolToVG(ctx, volumeGroup.Name, volumeGroup.Spec.ThinPoolConfig); err != nil {
err := fmt.Errorf("failed to create thin pool %s for volume group %s: %w", volumeGroup.Spec.ThinPoolConfig.Name, volumeGroup.Name, err)
r.WarningEvent(ctx, volumeGroup, EventReasonErrorThinPoolCreateOrExtendFailed, err)
if _, err := r.setVolumeGroupFailedStatus(ctx, volumeGroup, vgs, devices, err); err != nil {
logger.Error(err, "failed to set status to failed")
}
return ctrl.Result{}, err
}
return ctrl.Result{}, err
}

// Validate the LVs created from the Thin-Pool to make sure the adding went as planned.
if err := r.validateLVs(ctx, volumeGroup); err != nil {
err := fmt.Errorf("error while validating logical volumes in existing volume group: %w", err)
r.WarningEvent(ctx, volumeGroup, EventReasonErrorInconsistentLVs, err)
if _, err := r.setVolumeGroupFailedStatus(ctx, volumeGroup, vgs, devices, err); err != nil {
logger.Error(err, "failed to set status to failed")
// Validate the LVs created from the Thin-Pool to make sure the adding went as planned.
if err := r.validateLVs(ctx, volumeGroup); err != nil {
err := fmt.Errorf("error while validating logical volumes in existing volume group: %w", err)
r.WarningEvent(ctx, volumeGroup, EventReasonErrorInconsistentLVs, err)
if _, err := r.setVolumeGroupFailedStatus(ctx, volumeGroup, vgs, devices, err); err != nil {
logger.Error(err, "failed to set status to failed")
}
return ctrl.Result{}, err
}
return ctrl.Result{}, err
}

// refresh list of vgs to be used in status
Expand Down Expand Up @@ -347,16 +352,21 @@ func (r *Reconciler) applyLVMDConfig(ctx context.Context, volumeGroup *lvmv1alph
}
if !found {
dc := &lvmd.DeviceClass{
Name: volumeGroup.Name,
VolumeGroup: volumeGroup.Name,
Default: volumeGroup.Spec.Default,
ThinPoolConfig: &lvmd.ThinPoolConfig{},
Name: volumeGroup.Name,
VolumeGroup: volumeGroup.Name,
Default: volumeGroup.Spec.Default,
}

if volumeGroup.Spec.ThinPoolConfig != nil {
dc.Type = lvmd.TypeThin
dc.ThinPoolConfig.Name = volumeGroup.Spec.ThinPoolConfig.Name
dc.ThinPoolConfig.OverprovisionRatio = float64(volumeGroup.Spec.ThinPoolConfig.OverprovisionRatio)
dc.ThinPoolConfig = &lvmd.ThinPoolConfig{
Name: volumeGroup.Spec.ThinPoolConfig.Name,
OverprovisionRatio: float64(volumeGroup.Spec.ThinPoolConfig.OverprovisionRatio),
}
} else {
dc.Type = lvmd.TypeThick
// set SpareGB to 0 to avoid automatic default to 10GiB
dc.SpareGB = ptr.To(uint64(0))
}

lvmdConfig.DeviceClasses = append(lvmdConfig.DeviceClasses, dc)
Expand Down
Loading