Skip to content

Commit

Permalink
feat: allow dlv debugging for lvm operator and vgmanager
Browse files Browse the repository at this point in the history
Signed-off-by: Jakob Möller <[email protected]>
  • Loading branch information
jakobmoellerdev committed Oct 4, 2023
1 parent 5e45008 commit 2c0d019
Show file tree
Hide file tree
Showing 10 changed files with 117 additions and 7 deletions.
12 changes: 10 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -182,20 +182,23 @@ ARCH ?= amd64
all: build

build: generate fmt vet ## Build manager binary.
GOOS=$(OS) GOARCH=$(ARCH) go build -o bin/lvms cmd/main.go
GOOS=$(OS) GOARCH=$(ARCH) go build -gcflags='all=-N -l' -o bin/lvms cmd/main.go

build-prometheus-alert-rules: jsonnet monitoring/mixin.libsonnet monitoring/alerts/alerts.jsonnet monitoring/alerts/*.libsonnet
$(JSONNET) -S monitoring/alerts/alerts.jsonnet > config/prometheus/prometheus_rules.yaml

docker-build: ## Build docker image with the manager.
$(IMAGE_BUILD_CMD) build --platform=${OS}/${ARCH} -t ${IMG} .

docker-build-debug: ## Build remote-debugging enabled docker image with the manager.
$(IMAGE_BUILD_CMD) build -f debug.Dockerfile --platform=${OS}/${ARCH} -t ${IMG} .

docker-push: ## Push docker image with the manager.
$(IMAGE_BUILD_CMD) push ${IMG}

lvms-must-gather:
@echo "Building the lvms-must-gather image"
$(IMAGE_BUILD_CMD) build -f must-gather/Dockerfile -t "${MUST_GATHER_IMG}" must-gather/
$(IMAGE_BUILD_CMD) build -f must-gather/Dockerfile -t "${MUST_GATHER_IMG}" must-gather/

##@ Deployment

Expand All @@ -210,6 +213,11 @@ deploy: update-mgr-env manifests kustomize ## Deploy controller to the K8s clust
cd config/webhook && $(KUSTOMIZE) edit set nameprefix ${MANAGER_NAME_PREFIX}
$(KUSTOMIZE) build config/default | kubectl apply -f -

deploy-debug: update-mgr-env manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} && $(KUSTOMIZE) edit set nameprefix ${MANAGER_NAME_PREFIX}
cd config/webhook && $(KUSTOMIZE) edit set nameprefix ${MANAGER_NAME_PREFIX}
$(KUSTOMIZE) build config/debug | kubectl apply -f -

undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/default | kubectl delete -f -

Expand Down
9 changes: 9 additions & 0 deletions cmd/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,17 @@ const (
DefaultEnableLeaderElection = false
)

var DefaultVGManagerCommand = []string{"/lvms", "vgmanager"}

type Options struct {
Scheme *runtime.Scheme
SetupLog logr.Logger

diagnosticsAddr string
healthProbeAddr string
enableLeaderElection bool

vgManagerCommand []string
}

// NewCmd creates a new CLI command
Expand All @@ -88,6 +92,10 @@ func NewCmd(opts *Options) *cobra.Command {
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.",
)

cmd.Flags().StringSliceVar(
&opts.vgManagerCommand, "vgmanager-cmd", DefaultVGManagerCommand, "The address the probe endpoint binds to.",
)

return cmd
}

Expand Down Expand Up @@ -165,6 +173,7 @@ func run(cmd *cobra.Command, _ []string, opts *Options) error {
Namespace: operatorNamespace,
TopoLVMLeaderElectionPassthrough: leaderElectionConfig,
EnableSnapshotting: enableSnapshotting,
VGManagerCommand: opts.vgManagerCommand,
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create LVMCluster controller: %w", err)
}
Expand Down
6 changes: 6 additions & 0 deletions config/debug/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
patchesStrategicMerge:
- manager_debug_patch.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../default
27 changes: 27 additions & 0 deletions config/debug/manager_debug_patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: operator
namespace: system
spec:
template:
spec:
containers:
- name: manager
command:
- "/usr/sbin/dlv"
- "exec"
- "--listen=:2345"
- "--headless=true"
- "--log=true"
- "--accept-multiclient"
- "--api-version=2"
- "--continue"
- "/usr/sbin/lvms"
- "--"
- "operator"
args:
- "--vgmanager-cmd=/usr/sbin/dlv,exec,--listen=:2345,--headless=true,--log=true,--accept-multiclient,--api-version=2,--continue,/usr/sbin/lvms,--,vgmanager"
ports:
- containerPort: 2345
name: "debug"
2 changes: 1 addition & 1 deletion config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/lvms_dev/lvms-operator
newName: quay.io/jmoller/lvms-operator
newTag: latest
namePrefix: lvms-
52 changes: 52 additions & 0 deletions debug.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
ARG TARGETOS
ARG TARGETARCH
ARG TARGETPLATFORM
FROM golang:1.20 as builder

WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum

# since we use vendoring we don't need to redownload our dependencies every time. Instead we can simply
# reuse our vendored directory and verify everything is good. If not we can abort here and ask for a revendor.
COPY vendor vendor/
RUN go mod verify

# Copy the go source
COPY api/ api/
COPY controllers/ controllers/
COPY cmd/ cmd/
COPY pkg/ pkg/

ENV GOARCH=$TARGETARCH
ENV GOOS=$TARGETOS
ENV CGO_ENABLED=0

# Build
RUN go build -gcflags "all=-N -l" -mod=vendor -a -o lvms cmd/main.go

FROM golang:1.20 as dlv
RUN go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest

# vgmanager needs 'nsenter' and other basic linux utils to correctly function
FROM --platform=$TARGETPLATFORM registry.access.redhat.com/ubi9/ubi-minimal:9.2

# Update the image to get the latest CVE updates
RUN microdnf update -y && \
microdnf install -y util-linux && \
microdnf clean all


WORKDIR /app

COPY --from=builder /workspace/lvms /usr/sbin/lvms
COPY --from=dlv /go/bin/dlv /usr/sbin/dlv

USER 65532:65532

EXPOSE 2345

# '/usr/sbin/dlv' is the entrypoint for all LVMS binaries
ENTRYPOINT ["/usr/sbin/dlv"]
7 changes: 7 additions & 0 deletions internal/controllers/lvmcluster/lvmcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ type LVMClusterReconciler struct {
Namespace string
ImageName string

// VGManagerCommand is the command that will be used to start vgmanager
VGManagerCommand []string

// TopoLVMLeaderElectionPassthrough uses the given leaderElection when initializing TopoLVM to synchronize
// leader election configuration
TopoLVMLeaderElectionPassthrough configv1.LeaderElection
Expand All @@ -85,6 +88,10 @@ func (r *LVMClusterReconciler) SnapshotsEnabled() bool {
return r.EnableSnapshotting
}

func (r *LVMClusterReconciler) GetVGManagerCommand() []string {
return r.VGManagerCommand
}

func (r *LVMClusterReconciler) GetTopoLVMLeaderElectionPassthrough() configv1.LeaderElection {
return r.TopoLVMLeaderElectionPassthrough
}
Expand Down
1 change: 1 addition & 0 deletions internal/controllers/lvmcluster/resource/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ type Reconciler interface {
GetNamespace() string
GetImageName() string
SnapshotsEnabled() bool
GetVGManagerCommand() []string

// GetTopoLVMLeaderElectionPassthrough uses the given leaderElection when initializing TopoLVM to synchronize
// leader election configuration
Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/lvmcluster/resource/vgmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ func (v vgManager) EnsureCreated(r Reconciler, ctx context.Context, lvmCluster *
logger := log.FromContext(ctx).WithValues("resourceManager", v.GetName())

// get desired daemonset spec
dsTemplate := newVGManagerDaemonset(lvmCluster, r.GetNamespace(), r.GetImageName())
dsTemplate := newVGManagerDaemonset(lvmCluster, r.GetNamespace(), r.GetImageName(), r.GetVGManagerCommand())

// create desired daemonset or update mutable fields on existing one
ds := &appsv1.DaemonSet{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,16 +137,16 @@ var (
)

// newVGManagerDaemonset returns the desired vgmanager daemonset for a given LVMCluster
func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, vgImage string) appsv1.DaemonSet {
func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace, vgImage string, command []string) appsv1.DaemonSet {
// aggregate nodeSelector and tolerations from all deviceClasses
nodeSelector, tolerations := selector.ExtractNodeSelectorAndTolerations(lvmCluster)
volumes := []corev1.Volume{LVMDConfVol, DevHostDirVol, UDevHostDirVol, SysHostDirVol, MetricsCertsDirVol}
volumeMounts := []corev1.VolumeMount{LVMDConfVolMount, DevHostDirVolMount, UDevHostDirVolMount, SysHostDirVolMount, MetricsCertsDirVolMount}
privileged := true
var zero int64 = 0

command := []string{
"/lvms", "vgmanager",
if len(command) == 0 {
command = []string{"/lvms", "vgmanager"}
}

resourceRequirements := corev1.ResourceRequirements{
Expand Down

0 comments on commit 2c0d019

Please sign in to comment.