From 3fcf84a092773e0bc26499fd7426a9d1fa9eb2e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakob=20M=C3=B6ller?= Date: Tue, 5 Sep 2023 15:50:09 +0200 Subject: [PATCH] chore: refactor vgmanager into different interfaces and introduce tests --- Dockerfile | 2 +- cmd/vgmanager/main.go | 16 +- controllers/constants.go | 1 - controllers/topolvm_node.go | 11 +- go.mod | 2 +- go.sum | 20 ++ pkg/{internal => exec}/exec.go | 5 +- pkg/lsblk/filter.go | 110 +++++++++++ pkg/lsblk/filter_test.go | 131 +++++++++++++ .../block_device.go => lsblk/lsblk.go} | 93 +++++---- pkg/{vgmanager => lvm}/lvm.go | 170 +++++++++++------ pkg/{vgmanager => lvm}/lvm_test.go | 26 +-- pkg/lvmd/lvmd_config.go | 65 +++++++ pkg/vgmanager/devices.go | 64 +++---- pkg/vgmanager/devices_test.go | 60 +++--- pkg/vgmanager/filter.go | 90 --------- pkg/vgmanager/filter_test.go | 132 ------------- pkg/vgmanager/status.go | 2 +- pkg/vgmanager/suite_test.go | 180 ++++++++++++++++++ pkg/vgmanager/vgmanager_controller.go | 132 +++++-------- pkg/vgmanager/vgmanager_controller_test.go | 86 ++++++++- 21 files changed, 886 insertions(+), 512 deletions(-) rename pkg/{internal => exec}/exec.go (94%) create mode 100644 pkg/lsblk/filter.go create mode 100644 pkg/lsblk/filter_test.go rename pkg/{internal/block_device.go => lsblk/lsblk.go} (66%) rename pkg/{vgmanager => lvm}/lvm.go (54%) rename pkg/{vgmanager => lvm}/lvm_test.go (86%) create mode 100644 pkg/lvmd/lvmd_config.go delete mode 100644 pkg/vgmanager/filter.go delete mode 100644 pkg/vgmanager/filter_test.go create mode 100644 pkg/vgmanager/suite_test.go diff --git a/Dockerfile b/Dockerfile index 2169b965c..b0bb30aac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ ARG TARGETOS ARG TARGETARCH ARG TARGETPLATFORM # Build the manager binary -FROM golang:1.20 as builder +FROM golang:1.21 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/cmd/vgmanager/main.go b/cmd/vgmanager/main.go index 61be8ede4..2d9f1bbc1 100644 --- a/cmd/vgmanager/main.go +++ b/cmd/vgmanager/main.go @@ -21,6 +21,10 @@ import ( "os" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" + "github.com/openshift/lvm-operator/pkg/exec" + "github.com/openshift/lvm-operator/pkg/lsblk" + "github.com/openshift/lvm-operator/pkg/lvm" + "github.com/openshift/lvm-operator/pkg/lvmd" "github.com/openshift/lvm-operator/pkg/vgmanager" "k8s.io/apimachinery/pkg/runtime" @@ -72,11 +76,15 @@ func main() { os.Exit(1) } + executor := &exec.CommandExecutor{} if err = (&vgmanager.VGReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - NodeName: os.Getenv("NODE_NAME"), - Namespace: os.Getenv("POD_NAMESPACE"), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + NodeName: os.Getenv("NODE_NAME"), + Namespace: os.Getenv("POD_NAMESPACE"), + LVM: lvm.NewHostLVM(executor), + LSBLK: lsblk.NewHostLSBLK(executor, lsblk.DefaultMountinfo, lsblk.DefaultLosetup), + LVMDConfig: lvmd.NewDefaultLMVDFileConfig(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "VGManager") os.Exit(1) diff --git a/controllers/constants.go b/controllers/constants.go index 18b0b19dd..583aca421 100644 --- a/controllers/constants.go +++ b/controllers/constants.go @@ -81,7 +81,6 @@ const ( CSIKubeletRootDir = "/var/lib/kubelet/" NodeContainerName = "topolvm-node" TopolvmNodeContainerHealthzName = "healthz" - LvmdConfigFile = "/etc/topolvm/lvmd.yaml" DefaultCSISocket = "/run/topolvm/csi-topolvm.sock" DefaultLVMdSocket = "/run/lvmd/lvmd.sock" diff --git a/controllers/topolvm_node.go b/controllers/topolvm_node.go index 639427980..67475ff96 100644 --- a/controllers/topolvm_node.go +++ b/controllers/topolvm_node.go @@ -23,6 +23,7 @@ import ( "strings" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" + "github.com/openshift/lvm-operator/pkg/lvmd" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -137,7 +138,7 @@ func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, init {Name: "lvmd-config-dir", VolumeSource: corev1.VolumeSource{ HostPath: &corev1.HostPathVolumeSource{ - Path: filepath.Dir(LvmdConfigFile), + Path: filepath.Dir(lvmd.DefaultLVMDConfigFilePath), Type: &hostPathDirectory}}}, {Name: "lvmd-socket-dir", VolumeSource: corev1.VolumeSource{ @@ -202,11 +203,11 @@ func getNodeInitContainer(initImage string) *corev1.Container { command := []string{ "/usr/bin/bash", "-c", - fmt.Sprintf("until [ -f %s ]; do echo waiting for lvmd config file; sleep 5; done", LvmdConfigFile), + fmt.Sprintf("until [ -f %s ]; do echo waiting for lvmd config file; sleep 5; done", lvmd.DefaultLVMDConfigFilePath), } volumeMounts := []corev1.VolumeMount{ - {Name: "lvmd-config-dir", MountPath: filepath.Dir(LvmdConfigFile)}, + {Name: "lvmd-config-dir", MountPath: filepath.Dir(lvmd.DefaultLVMDConfigFilePath)}, } fileChecker := &corev1.Container{ @@ -228,7 +229,7 @@ func getNodeInitContainer(initImage string) *corev1.Container { func getLvmdContainer() *corev1.Container { command := []string{ "/lvmd", - fmt.Sprintf("--config=%s", LvmdConfigFile), + fmt.Sprintf("--config=%s", lvmd.DefaultLVMDConfigFilePath), "--container=true", } @@ -241,7 +242,7 @@ func getLvmdContainer() *corev1.Container { volumeMounts := []corev1.VolumeMount{ {Name: "lvmd-socket-dir", MountPath: filepath.Dir(DefaultLVMdSocket)}, - {Name: "lvmd-config-dir", MountPath: filepath.Dir(LvmdConfigFile)}, + {Name: "lvmd-config-dir", MountPath: filepath.Dir(lvmd.DefaultLVMDConfigFilePath)}, } privilege := true diff --git a/go.mod b/go.mod index 64c177953..9fa50cc29 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openshift/lvm-operator -go 1.20 +go 1.21 require ( github.com/aws/aws-sdk-go v1.44.329 diff --git a/go.sum b/go.sum index e6033cfec..c2eaff069 100644 --- a/go.sum +++ b/go.sum @@ -26,7 +26,9 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -35,8 +37,10 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.329 h1:Rqy+wYI8h+iq+FphR59KKTsHR1Lz7YiwRqFzWa7xoYU= github.com/aws/aws-sdk-go v1.44.329/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= @@ -95,6 +99,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -102,6 +107,7 @@ github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -147,6 +153,7 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v0.2.5 h1:H6vvsv2an0lalEaCDRThvtBfmg44W/QHXBCYUXf/6S4= +github.com/gobuffalo/flect v0.2.5/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -247,6 +254,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -265,7 +273,9 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -284,6 +294,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 h1:28i1IjGcx8AofiB4N3q5Yls55VEaitzuEPkFJEVgGkA= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -296,6 +307,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= @@ -336,10 +348,13 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/pseudomuto/protoc-gen-doc v1.5.0 h1:pHZp0MEiT68jrZV8js8BS7E9ZEnlSLegoQbbtXj5lfo= +github.com/pseudomuto/protoc-gen-doc v1.5.0/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -403,6 +418,7 @@ go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= @@ -419,6 +435,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -445,6 +462,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -636,6 +654,7 @@ google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -729,6 +748,7 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/controller-tools v0.9.0 h1:b/vSEPpA8hiMiyzDfLbZdCn3hoAcy3/868OHhYtHY9w= +sigs.k8s.io/controller-tools v0.9.0/go.mod h1:NUkn8FTV3Sad3wWpSK7dt/145qfuQ8CKJV6j4jHC5rM= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= diff --git a/pkg/internal/exec.go b/pkg/exec/exec.go similarity index 94% rename from pkg/internal/exec.go rename to pkg/exec/exec.go index 44ea9ae3c..6b42826e6 100644 --- a/pkg/internal/exec.go +++ b/pkg/exec/exec.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package internal +package exec import ( "fmt" @@ -24,7 +24,6 @@ import ( var ( nsenterPath = "/usr/bin/nsenter" - losetupPath = "/usr/sbin/losetup" ) // Executor is the interface for running exec commands @@ -42,7 +41,7 @@ func (*CommandExecutor) ExecuteCommandWithOutput(command string, arg ...string) return runCommandWithOutput(cmd) } -// ExecuteCommandWithOutput executes a command with output using nsenter +// ExecuteCommandWithOutputAsHost executes a command with output using nsenter func (*CommandExecutor) ExecuteCommandWithOutputAsHost(command string, arg ...string) (string, error) { args := append([]string{"-m", "-u", "-i", "-n", "-p", "-t", "1", command}, arg...) cmd := exec.Command(nsenterPath, args...) diff --git a/pkg/lsblk/filter.go b/pkg/lsblk/filter.go new file mode 100644 index 000000000..8b734b2a7 --- /dev/null +++ b/pkg/lsblk/filter.go @@ -0,0 +1,110 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lsblk + +import ( + "strings" +) + +const ( + // StateSuspended is a possible value of BlockDevice.State + StateSuspended = "suspended" + + // DeviceTypeLoop is the device type for loop devices in lsblk output + DeviceTypeLoop = "loop" + + // DeviceTypeROM is the device type for ROM devices in lsblk output + DeviceTypeROM = "rom" + + // DeviceTypeLVM is the device type for lvm devices in lsblk output + DeviceTypeLVM = "lvm" + + // filter names: + notReadOnly = "notReadOnly" + notSuspended = "notSuspended" + noBiosBootInPartLabel = "noBiosBootInPartLabel" + noReservedInPartLabel = "noReservedInPartLabel" + noFilesystemSignature = "noFilesystemSignature" + noBindMounts = "noBindMounts" + noChildren = "noChildren" + usableDeviceType = "usableDeviceType" +) + +func (lsblk *HostLSBLK) ResetFilterMap() { + lsblk.filterMap = lsblk.defaultFilterMap() +} + +func (lsblk *HostLSBLK) GetFilterMap() map[string]func(BlockDevice) (bool, error) { + return lsblk.filterMap +} + +func (lsblk *HostLSBLK) SetFilterMap(newFilterMap map[string]func(BlockDevice) (bool, error)) { + lsblk.filterMap = newFilterMap +} + +func (lsblk *HostLSBLK) defaultFilterMap() map[string]func(BlockDevice) (bool, error) { + return map[string]func(BlockDevice) (bool, error){ + notReadOnly: func(dev BlockDevice) (bool, error) { + return !dev.ReadOnly, nil + }, + + notSuspended: func(dev BlockDevice) (bool, error) { + matched := dev.State != StateSuspended + return matched, nil + }, + + noBiosBootInPartLabel: func(dev BlockDevice) (bool, error) { + biosBootInPartLabel := strings.Contains(strings.ToLower(dev.PartLabel), strings.ToLower("bios")) || + strings.Contains(strings.ToLower(dev.PartLabel), strings.ToLower("boot")) + return !biosBootInPartLabel, nil + }, + + noReservedInPartLabel: func(dev BlockDevice) (bool, error) { + reservedInPartLabel := strings.Contains(strings.ToLower(dev.PartLabel), "reserved") + return !reservedInPartLabel, nil + }, + + noFilesystemSignature: func(dev BlockDevice) (bool, error) { + matched := dev.FSType == "" + return matched, nil + }, + + noChildren: func(dev BlockDevice) (bool, error) { + hasChildren := dev.HasChildren() + return !hasChildren, nil + }, + + noBindMounts: func(dev BlockDevice) (bool, error) { + hasBindMounts, _, err := lsblk.HasBindMounts(dev) + return !hasBindMounts, err + }, + + usableDeviceType: func(dev BlockDevice) (bool, error) { + switch dev.Type { + case DeviceTypeLoop: + // check loop device isn't being used by kubernetes + return lsblk.IsUsableLoopDev(dev) + case DeviceTypeROM: + return false, nil + case DeviceTypeLVM: + return false, nil + default: + return true, nil + } + }, + } +} diff --git a/pkg/lsblk/filter_test.go b/pkg/lsblk/filter_test.go new file mode 100644 index 000000000..4ca9db670 --- /dev/null +++ b/pkg/lsblk/filter_test.go @@ -0,0 +1,131 @@ +package lsblk + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type filterTestCase struct { + label string + device BlockDevice + expected bool + expectErr bool +} + +func TestNotReadOnly(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc false", device: BlockDevice{ReadOnly: false}, expected: true, expectErr: false}, + {label: "tc true", device: BlockDevice{ReadOnly: true}, expected: false, expectErr: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[notReadOnly](tc.device) + assert.Equal(t, tc.expected, result) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} + +func TestNotSuspended(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc suspended", device: BlockDevice{State: "suspended"}, expected: false, expectErr: false}, + {label: "tc live", device: BlockDevice{State: "live"}, expected: true, expectErr: false}, + {label: "tc running", device: BlockDevice{State: "running"}, expected: true, expectErr: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[notSuspended](tc.device) + assert.Equal(t, tc.expected, result) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} + +func TestNoFilesystemSignature(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc no fs", device: BlockDevice{FSType: ""}, expected: true, expectErr: false}, + {label: "tc xfs", device: BlockDevice{FSType: "xfs"}, expected: false, expectErr: false}, + {label: "tc swap", device: BlockDevice{FSType: "swap"}, expected: false, expectErr: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[noFilesystemSignature](tc.device) + assert.Equal(t, tc.expected, result) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} + +func TestNoChildren(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc child", device: BlockDevice{Name: "dev1", Children: []BlockDevice{{Name: "child1"}}}, expected: false, expectErr: false}, + {label: "tc no child", device: BlockDevice{Name: "dev2", Children: []BlockDevice{}}, expected: true, expectErr: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[noChildren](tc.device) + assert.Equal(t, tc.expected, result) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} + +func TestIsUsableDeviceType(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc ROM", device: BlockDevice{Name: "dev1", Type: "rom"}, expected: false, expectErr: false}, + {label: "tc Disk", device: BlockDevice{Name: "dev2", Type: "disk"}, expected: true, expectErr: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[usableDeviceType](tc.device) + assert.Equal(t, tc.expected, result) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} + +func TestNoBiosBootInPartLabel(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc 1", device: BlockDevice{Name: "dev1", PartLabel: ""}, expected: true, expectErr: false}, + {label: "tc 2", device: BlockDevice{Name: "dev2", PartLabel: "abc"}, expected: true, expectErr: false}, + {label: "tc 3", device: BlockDevice{Name: "dev3", PartLabel: "bios"}, expected: false, expectErr: false}, + {label: "tc 4", device: BlockDevice{Name: "dev4", PartLabel: "BIOS"}, expected: false, expectErr: false}, + {label: "tc 5", device: BlockDevice{Name: "dev5", PartLabel: "boot"}, expected: false, expectErr: false}, + {label: "tc 6", device: BlockDevice{Name: "dev6", PartLabel: "BOOT"}, expected: false, expectErr: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[noBiosBootInPartLabel](tc.device) + assert.Equal(t, tc.expected, result) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} + +func TestNoReservedInPartLabel(t *testing.T) { + testcases := []filterTestCase{ + {label: "tc 1", device: BlockDevice{Name: "dev1", PartLabel: ""}, expected: true}, + {label: "tc 2", device: BlockDevice{Name: "dev2", PartLabel: "abc"}, expected: true}, + {label: "tc 3", device: BlockDevice{Name: "dev3", PartLabel: "reserved"}, expected: false}, + {label: "tc 4", device: BlockDevice{Name: "dev4", PartLabel: "RESERVED"}, expected: false}, + {label: "tc 5", device: BlockDevice{Name: "dev5", PartLabel: "Reserved"}, expected: false}, + } + for _, tc := range testcases { + result, err := NewHostLSBLK(nil, "", "").GetFilterMap()[noReservedInPartLabel](tc.device) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result) + } +} diff --git a/pkg/internal/block_device.go b/pkg/lsblk/lsblk.go similarity index 66% rename from pkg/internal/block_device.go rename to pkg/lsblk/lsblk.go index 73c514121..967899632 100644 --- a/pkg/internal/block_device.go +++ b/pkg/lsblk/lsblk.go @@ -1,20 +1,4 @@ -/* -Copyright © 2023 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal +package lsblk import ( "encoding/json" @@ -22,25 +6,16 @@ import ( "os" "path/filepath" "strings" + + "github.com/openshift/lvm-operator/pkg/exec" ) var ( - mountFile = "/proc/1/mountinfo" + DefaultMountinfo = "/proc/1/mountinfo" + DefaultLosetup = "/usr/sbin/losetup" ) const ( - // StateSuspended is a possible value of BlockDevice.State - StateSuspended = "suspended" - - // DeviceTypeLoop is the device type for loop devices in lsblk output - DeviceTypeLoop = "loop" - - // DeviceTypeROM is the device type for ROM devices in lsblk output - DeviceTypeROM = "rom" - - // DeviceTypeLVM is the device type for lvm devices in lsblk output - DeviceTypeLVM = "lvm" - // mount string to find if a path is part of kubernetes pluginString = "plugins/kubernetes.io" ) @@ -66,14 +41,44 @@ type BlockDevice struct { DevicePath string } +type LSBLK interface { + ListBlockDevices() ([]BlockDevice, error) + IsUsableLoopDev(b BlockDevice) (bool, error) + GetFilterMap() map[string]func(BlockDevice) (bool, error) + SetFilterMap(newFilterMap map[string]func(BlockDevice) (bool, error)) + ResetFilterMap() +} + +type HostLSBLK struct { + exec.Executor + mountInfo string + losetup string + filterMap map[string]func(BlockDevice) (bool, error) +} + +func NewHostLSBLK(executor exec.Executor, mountInfo, losetup string) *HostLSBLK { + lsblk := &HostLSBLK{ + Executor: executor, + mountInfo: mountInfo, + losetup: losetup, + } + lsblk.ResetFilterMap() + return lsblk +} + +// HasChildren checks if the disk has partitions +func (b BlockDevice) HasChildren() bool { + return len(b.Children) > 0 +} + // ListBlockDevices lists the block devices using the lsblk command -func ListBlockDevices(exec Executor) ([]BlockDevice, error) { +func (lsblk *HostLSBLK) ListBlockDevices() ([]BlockDevice, error) { // var output bytes.Buffer var blockDeviceMap map[string][]BlockDevice columns := "NAME,ROTA,TYPE,SIZE,MODEL,VENDOR,RO,STATE,KNAME,SERIAL,PARTLABEL,FSTYPE" args := []string{"--json", "--paths", "-o", columns} - output, err := exec.ExecuteCommandWithOutput("lsblk", args...) + output, err := lsblk.ExecuteCommandWithOutput("lsblk", args...) if err != nil { return []BlockDevice{}, err } @@ -89,46 +94,40 @@ func ListBlockDevices(exec Executor) ([]BlockDevice, error) { // IsUsableLoopDev returns true if the loop device isn't in use by Kubernetes // by matching the back file path against a standard string used to mount devices // from host into pods -func (b BlockDevice) IsUsableLoopDev(exec Executor) (bool, error) { +func (lsblk *HostLSBLK) IsUsableLoopDev(b BlockDevice) (bool, error) { // holds back-file string of the loop device var loopDeviceMap map[string][]struct { BackFile string `json:"back-file"` } - usable := true args := []string{b.Name, "-O", "BACK-FILE", "--json"} - output, err := exec.ExecuteCommandWithOutput(losetupPath, args...) + output, err := lsblk.ExecuteCommandWithOutput(lsblk.losetup, args...) if err != nil { - return usable, err + return true, err } err = json.Unmarshal([]byte(output), &loopDeviceMap) if err != nil { - return usable, err + return false, err } for _, backFile := range loopDeviceMap["loopdevices"] { if strings.Contains(backFile.BackFile, pluginString) { // this loop device is being used by kubernetes and can't be // added to volume group - usable = false + return false, nil } } - return usable, nil -} - -// HasChildren checks if the disk has partitions -func (b BlockDevice) HasChildren() bool { - return len(b.Children) > 0 + return true, nil } // HasBindMounts checks for bind mounts and returns mount point for a device by parsing `proc/1/mountinfo`. // HostPID should be set to true inside the POD spec to get details of host's mount points inside `proc/1/mountinfo`. -func (b BlockDevice) HasBindMounts() (bool, string, error) { - data, err := os.ReadFile(mountFile) +func (lsblk *HostLSBLK) HasBindMounts(b BlockDevice) (bool, string, error) { + data, err := os.ReadFile(lsblk.mountInfo) if err != nil { - return false, "", fmt.Errorf("failed to read file %s: %v", mountFile, err) + return false, "", fmt.Errorf("failed to read file %s: %v", lsblk.mountInfo, err) } mountString := string(data) diff --git a/pkg/vgmanager/lvm.go b/pkg/lvm/lvm.go similarity index 54% rename from pkg/vgmanager/lvm.go rename to pkg/lvm/lvm.go index 12a2426ab..cb972d6d9 100644 --- a/pkg/vgmanager/lvm.go +++ b/pkg/lvm/lvm.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package vgmanager +package lvm import ( "encoding/json" "fmt" + "strings" - "github.com/openshift/lvm-operator/pkg/internal" + "github.com/openshift/lvm-operator/pkg/exec" ) type lvmError string @@ -32,6 +33,8 @@ var ( ) const ( + DefaultChunkSize = "128" + lvmCmd = "/usr/sbin/lvm" vgCreateCmd = "/usr/sbin/vgcreate" vgExtendCmd = "/usr/sbin/vgextend" @@ -43,8 +46,8 @@ const ( lvChangeCmd = "/usr/sbin/lvchange" ) -// vgsOutput represents the output of the `vgs --reportformat json` command -type vgsOutput struct { +// VGReport represents the output of the `vgs --reportformat json` command +type VGReport struct { Report []struct { Vg []struct { Name string `json:"vg_name"` @@ -53,8 +56,8 @@ type vgsOutput struct { } `json:"report"` } -// pvsOutput represents the output of the `pvs --reportformat json` command -type pvsOutput struct { +// PVReport represents the output of the `pvs --reportformat json` command +type PVReport struct { Report []struct { Pv []struct { Name string `json:"pv_name"` @@ -63,8 +66,8 @@ type pvsOutput struct { } `json:"report"` } -// lvsOutput represents the output of the `lvs --reportformat json` command -type lvsOutput struct { +// LVReport represents the output of the `lvs --reportformat json` command +type LVReport struct { Report []struct { Lv []struct { Name string `json:"lv_name"` @@ -77,6 +80,31 @@ type lvsOutput struct { } `json:"report"` } +type LVM interface { + CreateVG(vg VolumeGroup) error + ExtendVG(vg VolumeGroup, pvs []string) (VolumeGroup, error) + DeleteVG(vg VolumeGroup) error + GetVG(name string) (VolumeGroup, error) + + ListPVs(vgName string) ([]string, error) + ListVGs() ([]VolumeGroup, error) + ListLVsByName(vgName string) ([]string, error) + ListLVs(vgName string) (*LVReport, error) + + LVExists(lvName, vgName string) (bool, error) + CreateLV(lvName, vgName string, sizePercent int) error + ExtendLV(lvName, vgName string, sizePercent int) error + DeleteLV(lvName, vgName string) error +} + +type HostLVM struct { + exec.Executor +} + +func NewHostLVM(executor exec.Executor) LVM { + return &HostLVM{executor} +} + // VolumeGroup represents a volume group of linux lvm. type VolumeGroup struct { // Name is the name of the volume group @@ -90,19 +118,19 @@ type VolumeGroup struct { } // Create creates a new volume group -func (vg VolumeGroup) Create(exec internal.Executor, pvs []string) error { +func (hlvm *HostLVM) CreateVG(vg VolumeGroup) error { if vg.Name == "" { return fmt.Errorf("failed to create volume group. Volume group name is empty") } - if len(pvs) == 0 { + if len(vg.PVs) == 0 { return fmt.Errorf("failed to create volume group. Physical volume list is empty") } args := []string{vg.Name} - args = append(args, pvs...) + args = append(args, vg.PVs...) - _, err := exec.ExecuteCommandWithOutputAsHost(vgCreateCmd, args...) + _, err := hlvm.ExecuteCommandWithOutputAsHost(vgCreateCmd, args...) if err != nil { return fmt.Errorf("failed to create volume group %q. %v", vg.Name, err) } @@ -110,39 +138,41 @@ func (vg VolumeGroup) Create(exec internal.Executor, pvs []string) error { return nil } -// Extend extends the volume group only if new physical volumes are available -func (vg VolumeGroup) Extend(exec internal.Executor, pvs []string) error { +// ExtendVG Extend extends the volume group only if new physical volumes are available +func (hlvm *HostLVM) ExtendVG(vg VolumeGroup, pvs []string) (VolumeGroup, error) { if vg.Name == "" { - return fmt.Errorf("failed to extend volume group. Volume group name is empty") + return VolumeGroup{}, fmt.Errorf("failed to extend volume group. Volume group name is empty") } if len(pvs) == 0 { - return fmt.Errorf("failed to extend volume group. Physical volume list is empty") + return VolumeGroup{}, fmt.Errorf("failed to extend volume group. Physical volume list is empty") } args := []string{vg.Name} args = append(args, pvs...) - _, err := exec.ExecuteCommandWithOutputAsHost(vgExtendCmd, args...) + _, err := hlvm.ExecuteCommandWithOutputAsHost(vgExtendCmd, args...) if err != nil { - return fmt.Errorf("failed to extend volume group %q. %v", vg.Name, err) + return VolumeGroup{}, fmt.Errorf("failed to extend volume group %q. %v", vg.Name, err) } - return nil + vg.PVs = append(vg.PVs, pvs...) + + return vg, nil } // Delete deletes a volume group and the physical volumes associated with it -func (vg VolumeGroup) Delete(exec internal.Executor) error { +func (hlvm *HostLVM) DeleteVG(vg VolumeGroup) error { // Remove Volume Group vgArgs := []string{vg.Name} - _, err := exec.ExecuteCommandWithOutputAsHost(vgRemoveCmd, vgArgs...) + _, err := hlvm.ExecuteCommandWithOutputAsHost(vgRemoveCmd, vgArgs...) if err != nil { return fmt.Errorf("failed to remove volume group %q. %v", vg.Name, err) } // Remove physical volumes pvArgs := vg.PVs - _, err = exec.ExecuteCommandWithOutputAsHost(pvRemoveCmd, pvArgs...) + _, err = hlvm.ExecuteCommandWithOutputAsHost(pvRemoveCmd, pvArgs...) if err != nil { return fmt.Errorf("failed to remove physical volumes for the volume group %q. %v", vg.Name, err) } @@ -150,18 +180,18 @@ func (vg VolumeGroup) Delete(exec internal.Executor) error { } // GetVolumeGroup returns a volume group along with the associated physical volumes -func GetVolumeGroup(exec internal.Executor, name string) (*VolumeGroup, error) { - res := new(vgsOutput) +func (hlvm *HostLVM) GetVG(name string) (VolumeGroup, error) { + res := new(VGReport) args := []string{ "vgs", "--units", "g", "--reportformat", "json", } - if err := execute(exec, res, args...); err != nil { - return nil, fmt.Errorf("failed to list volume groups. %v", err) + if err := hlvm.execute(res, args...); err != nil { + return VolumeGroup{}, fmt.Errorf("failed to list volume groups. %v", err) } vgFound := false - volumeGroup := &VolumeGroup{} + volumeGroup := VolumeGroup{} for _, report := range res.Report { for _, vg := range report.Vg { if vg.Name == name { @@ -174,13 +204,13 @@ func GetVolumeGroup(exec internal.Executor, name string) (*VolumeGroup, error) { } if !vgFound { - return nil, ErrVolumeGroupNotFound + return VolumeGroup{}, ErrVolumeGroupNotFound } // Get Physical Volumes associated with the Volume Group - pvs, err := ListPhysicalVolumes(exec, name) + pvs, err := hlvm.ListPVs(name) if err != nil { - return nil, fmt.Errorf("failed to list physical volumes for volume group %q. %v", name, err) + return VolumeGroup{}, fmt.Errorf("failed to list physical volumes for volume group %q. %v", name, err) } volumeGroup.PVs = pvs @@ -188,12 +218,12 @@ func GetVolumeGroup(exec internal.Executor, name string) (*VolumeGroup, error) { } // ListPhysicalVolumes returns list of physical volumes used to create the given volume group -func ListPhysicalVolumes(exec internal.Executor, vgName string) ([]string, error) { - res := new(pvsOutput) +func (hlvm *HostLVM) ListPVs(vgName string) ([]string, error) { + res := new(PVReport) args := []string{ "pvs", "-S", fmt.Sprintf("vgname=%s", vgName), "--reportformat", "json", } - if err := execute(exec, res, args...); err != nil { + if err := hlvm.execute(res, args...); err != nil { return []string{}, err } @@ -206,18 +236,19 @@ func ListPhysicalVolumes(exec internal.Executor, vgName string) ([]string, error return pvs, nil } +func VGSCommandArgs() []string { + return []string{"vgs", "--reportformat", "json"} +} + // ListVolumeGroups lists all volume groups and the physical volumes associated with them. -func ListVolumeGroups(exec internal.Executor) ([]VolumeGroup, error) { - res := new(vgsOutput) - args := []string{ - "vgs", "--reportformat", "json", - } +func (hlvm *HostLVM) ListVGs() ([]VolumeGroup, error) { + res := new(VGReport) - if err := execute(exec, res, args...); err != nil { + if err := hlvm.execute(res, VGSCommandArgs()...); err != nil { return nil, fmt.Errorf("failed to list volume groups. %v", err) } - vgList := []VolumeGroup{} + var vgList []VolumeGroup for _, report := range res.Report { for _, vg := range report.Vg { vgList = append(vgList, VolumeGroup{Name: vg.Name, PVs: []string{}}) @@ -226,7 +257,7 @@ func ListVolumeGroups(exec internal.Executor) ([]VolumeGroup, error) { // Get Physical Volumes associated with the Volume Group for i, vg := range vgList { - pvs, err := ListPhysicalVolumes(exec, vg.Name) + pvs, err := hlvm.ListPVs(vg.Name) if err != nil { return nil, fmt.Errorf("failed to list physical volumes for volume group %q. %v", vg.Name, err) } @@ -237,8 +268,8 @@ func ListVolumeGroups(exec internal.Executor) ([]VolumeGroup, error) { } // ListLogicalVolumes returns list of logical volumes for a volume group -func ListLogicalVolumes(exec internal.Executor, vgName string) ([]string, error) { - res, err := GetLVSOutput(exec, vgName) +func (hlvm *HostLVM) ListLVsByName(vgName string) ([]string, error) { + res, err := hlvm.ListLVs(vgName) if err != nil { return []string{}, err } @@ -252,13 +283,13 @@ func ListLogicalVolumes(exec internal.Executor, vgName string) ([]string, error) return lvs, nil } -// GetLVSOutput returns the output for `lvs` command in json format -func GetLVSOutput(exec internal.Executor, vgName string) (*lvsOutput, error) { - res := new(lvsOutput) +// LVReport returns the output for `lvs` command in json format +func (hlvm *HostLVM) ListLVs(vgName string) (*LVReport, error) { + res := new(LVReport) args := []string{ "lvs", "-S", fmt.Sprintf("vgname=%s", vgName), "--units", "g", "--reportformat", "json", } - if err := execute(exec, res, args...); err != nil { + if err := hlvm.execute(res, args...); err != nil { return nil, err } @@ -266,8 +297,8 @@ func GetLVSOutput(exec internal.Executor, vgName string) (*lvsOutput, error) { } // LVExists checks if a logical volume exists in a volume group -func LVExists(exec internal.Executor, lvName, vgName string) (bool, error) { - lvs, err := ListLogicalVolumes(exec, vgName) +func (hlvm *HostLVM) LVExists(lvName, vgName string) (bool, error) { + lvs, err := hlvm.ListLVsByName(vgName) if err != nil { return false, err } @@ -282,26 +313,53 @@ func LVExists(exec internal.Executor, lvName, vgName string) (bool, error) { } // DeleteLV deactivates the logical volume and deletes it -func DeleteLV(exec internal.Executor, lvName, vgName string) error { +func (hlvm *HostLVM) DeleteLV(lvName, vgName string) error { lv := fmt.Sprintf("%s/%s", vgName, lvName) // deactivate logical volume - _, err := exec.ExecuteCommandWithOutputAsHost(lvChangeCmd, "-an", lv) + _, err := hlvm.ExecuteCommandWithOutputAsHost(lvChangeCmd, "-an", lv) if err != nil { - return fmt.Errorf("failed to deactivate thin pool %q in volume group %q. %v", lvName, vgName, err) + return fmt.Errorf("failed to deactivate thin pool %q in volume group %q. %w", lvName, vgName, err) } // delete logical volume - _, err = exec.ExecuteCommandWithOutputAsHost(lvRemoveCmd, lv) + _, err = hlvm.ExecuteCommandWithOutputAsHost(lvRemoveCmd, lv) if err != nil { - return fmt.Errorf("failed to delete logical volume %q in volume group %q. %v", lvName, vgName, err) + return fmt.Errorf("failed to delete logical volume %q in volume group %q. %w", lvName, vgName, err) + } + + return nil +} + +// CreateLV creates the logical volume +func (hlvm *HostLVM) CreateLV(lvName, vgName string, sizePercent int) error { + + args := []string{"-l", fmt.Sprintf("%d%%FREE", sizePercent), + "-c", DefaultChunkSize, "-Z", "y", "-T", fmt.Sprintf("%s/%s", vgName, lvName)} + + if _, err := hlvm.ExecuteCommandWithOutputAsHost(lvCreateCmd, args...); err != nil { + return fmt.Errorf("failed to create logical volume %q in the volume group %q using command '%s': %w", + lvName, vgName, fmt.Sprintf("%s %s", lvCreateCmd, strings.Join(args, " ")), err) + } + + return nil +} + +// ExtendLV extends the logical volume +func (hlvm *HostLVM) ExtendLV(lvName, vgName string, sizePercent int) error { + + args := []string{"-l", fmt.Sprintf("%d%%Vg", sizePercent), fmt.Sprintf("%s/%s", vgName, lvName)} + + if _, err := hlvm.ExecuteCommandWithOutputAsHost(lvExtendCmd, args...); err != nil { + return fmt.Errorf("failed to extend logical volume %q in the volume group %q using command '%s': %w", + lvName, vgName, fmt.Sprintf("%s %s", lvExtendCmd, strings.Join(args, " ")), err) } return nil } -func execute(exec internal.Executor, v interface{}, args ...string) error { - output, err := exec.ExecuteCommandWithOutputAsHost(lvmCmd, args...) +func (hlvm *HostLVM) execute(v interface{}, args ...string) error { + output, err := hlvm.ExecuteCommandWithOutputAsHost(lvmCmd, args...) if err != nil { return fmt.Errorf("failed to execute command. %v", err) } diff --git a/pkg/vgmanager/lvm_test.go b/pkg/lvm/lvm_test.go similarity index 86% rename from pkg/vgmanager/lvm_test.go rename to pkg/lvm/lvm_test.go index 49ea8c63f..0583ab05b 100644 --- a/pkg/vgmanager/lvm_test.go +++ b/pkg/lvm/lvm_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package vgmanager +package lvm import ( "fmt" @@ -91,7 +91,7 @@ func TestGetVolumeGroup(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - vg, err := GetVolumeGroup(executor, tt.vgName) + vg, err := NewHostLVM(executor).GetVG(tt.vgName) if tt.wantErr { assert.Error(t, err) } else { @@ -127,7 +127,7 @@ func TestListVolumeGroup(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - vgs, err := ListVolumeGroups(executor) + vgs, err := NewHostLVM(executor).ListVGs() if tt.wantErr { assert.Error(t, err) } else { @@ -147,13 +147,12 @@ func TestListVolumeGroup(t *testing.T) { func TestCreateVolumeGroup(t *testing.T) { tests := []struct { name string - volumeGroup *VolumeGroup - pvs []string + volumeGroup VolumeGroup wantErr bool }{ - {"No Volume Group Name", &VolumeGroup{}, []string{}, true}, - {"No Physical Volumes", &VolumeGroup{Name: "vg1"}, []string{}, true}, - {"Volume Group created successfully", &VolumeGroup{Name: "vg1"}, []string{"/dev/sdb"}, false}, + {"No Volume Group Name", VolumeGroup{}, true}, + {"No Physical Volumes", VolumeGroup{Name: "vg1"}, true}, + {"Volume Group created successfully", VolumeGroup{Name: "vg1", PVs: []string{"/dev/sdb"}}, false}, } executor := &mockExec.MockExecutor{ @@ -164,7 +163,7 @@ func TestCreateVolumeGroup(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.volumeGroup.Create(executor, tt.pvs) + err := NewHostLVM(executor).CreateVG(tt.volumeGroup) if tt.wantErr { assert.Error(t, err) } else { @@ -177,12 +176,12 @@ func TestCreateVolumeGroup(t *testing.T) { func TestExtendVolumeGroup(t *testing.T) { tests := []struct { name string - volumeGroup *VolumeGroup + volumeGroup VolumeGroup PVs []string wantErr bool }{ - {"No PVs are available", &VolumeGroup{Name: "vg1"}, []string{}, true}, - {"New PVs are available", &VolumeGroup{Name: "vg1"}, []string{"/dev/sdb", "/dev/sdc"}, false}, + {"No PVs are available", VolumeGroup{Name: "vg1"}, []string{}, true}, + {"New PVs are available", VolumeGroup{Name: "vg1"}, []string{"/dev/sdb", "/dev/sdc"}, false}, } executor := &mockExec.MockExecutor{ @@ -193,12 +192,13 @@ func TestExtendVolumeGroup(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.volumeGroup.Extend(executor, tt.PVs) + newVG, err := NewHostLVM(executor).ExtendVG(tt.volumeGroup, tt.PVs) if tt.wantErr { assert.Error(t, err) } else { assert.NoError(t, err) } + assert.Equal(t, newVG.PVs, tt.PVs) }) } } diff --git a/pkg/lvmd/lvmd_config.go b/pkg/lvmd/lvmd_config.go new file mode 100644 index 000000000..0c4180bd5 --- /dev/null +++ b/pkg/lvmd/lvmd_config.go @@ -0,0 +1,65 @@ +package lvmd + +import ( + "fmt" + "os" + "path/filepath" + + lvmdCMD "github.com/topolvm/topolvm/pkg/lvmd/cmd" + "sigs.k8s.io/yaml" +) + +var DefaultLVMDConfigFilePath = filepath.Join("etc", "topolvm", "lvmd.yaml") + +func NewDefaultLMVDFileConfig() LVMDFileConfig { + return NewLVMDFileConfig(DefaultLVMDConfigFilePath) +} + +func NewLVMDFileConfig(path string) LVMDFileConfig { + return LVMDFileConfig{path: path} +} + +type LVMDConfig interface { + Load() (*lvmdCMD.Config, error) + Save(config *lvmdCMD.Config) error + Delete() error +} + +type LVMDFileConfig struct { + path string +} + +func (c LVMDFileConfig) Load() (*lvmdCMD.Config, error) { + cfgBytes, err := os.ReadFile(c.path) + if os.IsNotExist(err) { + // If the file does not exist, return nil for both + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("failed to load config file %s: %w", c.path, err) + } else { + lvmdconfig := &lvmdCMD.Config{} + if err = yaml.Unmarshal(cfgBytes, lvmdconfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal config file %s: %w", c.path, err) + } + return lvmdconfig, nil + } +} + +func (c LVMDFileConfig) Save(config *lvmdCMD.Config) error { + out, err := yaml.Marshal(config) + if err == nil { + err = os.WriteFile(c.path, out, 0600) + } + if err != nil { + return fmt.Errorf("failed to save config file %s: %w", c.path, err) + } + return nil +} + +func (c LVMDFileConfig) Delete() error { + err := os.Remove(c.path) + if err != nil { + return fmt.Errorf("failed to delete config file %s: %w", c.path, err) + } + return err +} diff --git a/pkg/vgmanager/devices.go b/pkg/vgmanager/devices.go index 5de7d2d7b..bd9c9bd88 100644 --- a/pkg/vgmanager/devices.go +++ b/pkg/vgmanager/devices.go @@ -21,15 +21,15 @@ import ( "errors" "fmt" "path/filepath" - "strings" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" - "github.com/openshift/lvm-operator/pkg/internal" + "github.com/openshift/lvm-operator/pkg/lsblk" + "github.com/openshift/lvm-operator/pkg/lvm" "sigs.k8s.io/controller-runtime/pkg/log" ) // addDevicesToVG creates or extends a volume group using the provided devices. -func (r *VGReconciler) addDevicesToVG(ctx context.Context, vgs []VolumeGroup, vgName string, devices []internal.BlockDevice) error { +func (r *VGReconciler) addDevicesToVG(ctx context.Context, vgs []lvm.VolumeGroup, vgName string, devices []lsblk.BlockDevice) error { logger := log.FromContext(ctx) if len(devices) < 1 { @@ -37,24 +37,14 @@ func (r *VGReconciler) addDevicesToVG(ctx context.Context, vgs []VolumeGroup, vg } // check if volume group is already present - vgFound := false + var existingVolumeGroup *lvm.VolumeGroup for _, vg := range vgs { if vg.Name == vgName { - vgFound = true + existingVolumeGroup = &vg } } - // TODO: Check if we can use functions from lvm.go here - var cmd string - if vgFound { - logger.Info("extending an existing volume group", "VGName", vgName) - cmd = "/usr/sbin/vgextend" - } else { - logger.Info("creating a new volume group", "VGName", vgName) - cmd = "/usr/sbin/vgcreate" - } - - args := []string{vgName} + var args []string for _, device := range devices { if device.DevicePath != "" { args = append(args, device.DevicePath) @@ -63,16 +53,22 @@ func (r *VGReconciler) addDevicesToVG(ctx context.Context, vgs []VolumeGroup, vg } } - _, err := r.executor.ExecuteCommandWithOutputAsHost(cmd, args...) - if err != nil { - return fmt.Errorf("failed to create or extend volume group %q using command '%s': %v", vgName, fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")), err) + if existingVolumeGroup != nil { + logger.Info("extending an existing volume group", "VGName", vgName) + if _, err := r.LVM.ExtendVG(*existingVolumeGroup, args); err != nil { + return fmt.Errorf("failed to extend volume group %s: %w", vgName, err) + } + } else { + logger.Info("creating a new volume group", "VGName", vgName) + if err := r.LVM.CreateVG(lvm.VolumeGroup{Name: vgName, PVs: args}); err != nil { + return fmt.Errorf("failed to create volume group %s: %w", vgName, err) + } } - return nil } // getAvailableDevicesForVG determines the available devices that can be used to create a volume group. -func (r *VGReconciler) getAvailableDevicesForVG(ctx context.Context, blockDevices []internal.BlockDevice, vgs []VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]internal.BlockDevice, error) { +func (r *VGReconciler) getAvailableDevicesForVG(ctx context.Context, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]lsblk.BlockDevice, error) { // filter devices based on DeviceSelector.Paths if specified availableDevices, err := r.filterMatchingDevices(ctx, blockDevices, vgs, volumeGroup) if err != nil { @@ -84,10 +80,10 @@ func (r *VGReconciler) getAvailableDevicesForVG(ctx context.Context, blockDevice // filterAvailableDevices returns: // availableDevices: the list of blockdevices considered available -func (r *VGReconciler) filterAvailableDevices(ctx context.Context, blockDevices []internal.BlockDevice) []internal.BlockDevice { +func (r *VGReconciler) filterAvailableDevices(ctx context.Context, blockDevices []lsblk.BlockDevice) []lsblk.BlockDevice { logger := log.FromContext(ctx) - var availableDevices []internal.BlockDevice + var availableDevices []lsblk.BlockDevice // using a label so `continue DeviceLoop` can be used to skip devices DeviceLoop: for _, blockDevice := range blockDevices { @@ -98,9 +94,9 @@ DeviceLoop: } logger = logger.WithValues("Device.Name", blockDevice.Name) - for name, filter := range FilterMap { + for name, filter := range r.LSBLK.GetFilterMap() { logger := logger.WithValues("filter.Name", name) - valid, err := filter(blockDevice, r.executor) + valid, err := filter(blockDevice) if err != nil { logger.Error(err, "filter error") continue DeviceLoop @@ -115,10 +111,10 @@ DeviceLoop: } // filterMatchingDevices filters devices based on DeviceSelector.Paths if specified. -func (r *VGReconciler) filterMatchingDevices(ctx context.Context, blockDevices []internal.BlockDevice, vgs []VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]internal.BlockDevice, error) { +func (r *VGReconciler) filterMatchingDevices(ctx context.Context, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]lsblk.BlockDevice, error) { logger := log.FromContext(ctx) - var filteredBlockDevices []internal.BlockDevice + var filteredBlockDevices []lsblk.BlockDevice devicesAlreadyInVG := false if volumeGroup.Spec.DeviceSelector != nil { @@ -186,7 +182,7 @@ func (r *VGReconciler) filterMatchingDevices(ctx context.Context, blockDevices [ return blockDevices, nil } -func isDeviceAlreadyPartOfVG(vgs []VolumeGroup, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool { +func isDeviceAlreadyPartOfVG(vgs []lvm.VolumeGroup, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool { for _, vg := range vgs { if vg.Name == volumeGroup.Name { for _, pv := range vg.PVs { @@ -200,7 +196,7 @@ func isDeviceAlreadyPartOfVG(vgs []VolumeGroup, diskName string, volumeGroup *lv return false } -func hasExactDisk(blockDevices []internal.BlockDevice, deviceName string) (internal.BlockDevice, bool) { +func hasExactDisk(blockDevices []lsblk.BlockDevice, deviceName string) (lsblk.BlockDevice, bool) { for _, blockDevice := range blockDevices { if blockDevice.KName == deviceName { return blockDevice, true @@ -211,7 +207,7 @@ func hasExactDisk(blockDevices []internal.BlockDevice, deviceName string) (inter } } } - return internal.BlockDevice{}, false + return lsblk.BlockDevice{}, false } func checkDuplicateDeviceSelectorPaths(selector *lvmv1alpha1.DeviceSelector) error { @@ -255,22 +251,22 @@ func checkDuplicateDeviceSelectorPaths(selector *lvmv1alpha1.DeviceSelector) err // // An error will be returned if the device is invalid // No error and an empty BlockDevice object will be returned if this device should be skipped (ex: duplicate device) -func getValidDevice(devicePath string, blockDevices []internal.BlockDevice, vgs []VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) (internal.BlockDevice, error) { +func getValidDevice(devicePath string, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) (lsblk.BlockDevice, error) { // Make sure the symlink exists diskName, err := filepath.EvalSymlinks(devicePath) if err != nil { - return internal.BlockDevice{}, fmt.Errorf("unable to find symlink for disk path %s: %v", devicePath, err) + return lsblk.BlockDevice{}, fmt.Errorf("unable to find symlink for disk path %s: %v", devicePath, err) } // Make sure this isn't a duplicate in the VG if isDeviceAlreadyPartOfVG(vgs, diskName, volumeGroup) { - return internal.BlockDevice{}, nil // No error, we just don't want a duplicate + return lsblk.BlockDevice{}, nil // No error, we just don't want a duplicate } // Make sure the block device exists blockDevice, ok := hasExactDisk(blockDevices, diskName) if !ok { - return internal.BlockDevice{}, fmt.Errorf("can not find device name %s in the available block devices", devicePath) + return lsblk.BlockDevice{}, fmt.Errorf("can not find device name %s in the available block devices", devicePath) } blockDevice.DevicePath = devicePath diff --git a/pkg/vgmanager/devices_test.go b/pkg/vgmanager/devices_test.go index 21db2f788..37f03387c 100644 --- a/pkg/vgmanager/devices_test.go +++ b/pkg/vgmanager/devices_test.go @@ -9,7 +9,8 @@ import ( "github.com/go-logr/logr/testr" "github.com/openshift/lvm-operator/api/v1alpha1" - "github.com/openshift/lvm-operator/pkg/internal" + "github.com/openshift/lvm-operator/pkg/lsblk" + "github.com/openshift/lvm-operator/pkg/lvm" "github.com/stretchr/testify/assert" "sigs.k8s.io/controller-runtime/pkg/log" @@ -32,15 +33,18 @@ func TestAvailableDevicesForVG(t *testing.T) { } r := &VGReconciler{} - + r.LSBLK = &lsblk.HostLSBLK{} + r.LSBLK.ResetFilterMap() + defaultFilters := r.LSBLK.GetFilterMap() // remove noBindMounts filter as it reads `proc/1/mountinfo` file. - delete(FilterMap, "noBindMounts") + delete(defaultFilters, "noBindMounts") + r.LSBLK.SetFilterMap(defaultFilters) testCases := []struct { description string volumeGroup v1alpha1.LVMVolumeGroup - existingBlockDevices []internal.BlockDevice - existingVGs []VolumeGroup + existingBlockDevices []lsblk.BlockDevice + existingVGs []lvm.VolumeGroup numOfAvailableDevices int expectError bool }{ @@ -51,7 +55,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -70,7 +74,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -89,7 +93,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -108,7 +112,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -128,7 +132,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -148,7 +152,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -168,7 +172,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -176,7 +180,7 @@ func TestAvailableDevicesForVG(t *testing.T) { ReadOnly: false, State: "live", KName: "/dev/nvme1n1", - Children: []internal.BlockDevice{ + Children: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1p1", ReadOnly: true, @@ -193,7 +197,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1", Type: "disk", @@ -201,7 +205,7 @@ func TestAvailableDevicesForVG(t *testing.T) { ReadOnly: false, State: "live", KName: "/dev/nvme1n1", - Children: []internal.BlockDevice{ + Children: []lsblk.BlockDevice{ { Name: "/dev/nvme1n1p1", Type: "disk", @@ -237,7 +241,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -263,7 +267,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{}, + existingBlockDevices: []lsblk.BlockDevice{}, numOfAvailableDevices: 0, expectError: true, }, @@ -281,7 +285,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -310,7 +314,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingVGs: []VolumeGroup{ + existingVGs: []lvm.VolumeGroup{ { Name: "vg1", PVs: []string{ @@ -319,7 +323,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -354,12 +358,12 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingVGs: []VolumeGroup{ + existingVGs: []lvm.VolumeGroup{ { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -386,12 +390,12 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingVGs: []VolumeGroup{ + existingVGs: []lvm.VolumeGroup{ { Name: "vg1", }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -399,7 +403,7 @@ func TestAvailableDevicesForVG(t *testing.T) { Size: "279.4G", ReadOnly: false, State: "live", - Children: []internal.BlockDevice{ + Children: []lsblk.BlockDevice{ { Name: "nvme1n1p2", KName: calculateDevicePath(t, "nvme1n1p2"), @@ -431,7 +435,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -467,7 +471,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), @@ -494,7 +498,7 @@ func TestAvailableDevicesForVG(t *testing.T) { }, }, }, - existingBlockDevices: []internal.BlockDevice{ + existingBlockDevices: []lsblk.BlockDevice{ { Name: "nvme1n1p1", KName: calculateDevicePath(t, "nvme1n1p1"), diff --git a/pkg/vgmanager/filter.go b/pkg/vgmanager/filter.go deleted file mode 100644 index c0991f79a..000000000 --- a/pkg/vgmanager/filter.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright © 2023 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vgmanager - -import ( - "strings" - - "github.com/openshift/lvm-operator/pkg/internal" -) - -const ( - // filter names: - notReadOnly = "notReadOnly" - notSuspended = "notSuspended" - noBiosBootInPartLabel = "noBiosBootInPartLabel" - noReservedInPartLabel = "noReservedInPartLabel" - noFilesystemSignature = "noFilesystemSignature" - noBindMounts = "noBindMounts" - noChildren = "noChildren" - usableDeviceType = "usableDeviceType" -) - -// maps of function identifier (for logs) to filter function. -// These are passed the localv1alpha1.DeviceInclusionSpec to make testing easier, -// but they aren't expected to use it -// they verify that the device itself is good to use -var FilterMap = map[string]func(internal.BlockDevice, internal.Executor) (bool, error){ - notReadOnly: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - return !dev.ReadOnly, nil - }, - - notSuspended: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - matched := dev.State != internal.StateSuspended - return matched, nil - }, - - noBiosBootInPartLabel: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - biosBootInPartLabel := strings.Contains(strings.ToLower(dev.PartLabel), strings.ToLower("bios")) || - strings.Contains(strings.ToLower(dev.PartLabel), strings.ToLower("boot")) - return !biosBootInPartLabel, nil - }, - - noReservedInPartLabel: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - reservedInPartLabel := strings.Contains(strings.ToLower(dev.PartLabel), "reserved") - return !reservedInPartLabel, nil - }, - - noFilesystemSignature: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - matched := dev.FSType == "" - return matched, nil - }, - - noBindMounts: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - hasBindMounts, _, err := dev.HasBindMounts() - return !hasBindMounts, err - }, - - noChildren: func(dev internal.BlockDevice, _ internal.Executor) (bool, error) { - hasChildren := dev.HasChildren() - return !hasChildren, nil - }, - - usableDeviceType: func(dev internal.BlockDevice, executor internal.Executor) (bool, error) { - switch dev.Type { - case internal.DeviceTypeLoop: - // check loop device isn't being used by kubernetes - return dev.IsUsableLoopDev(executor) - case internal.DeviceTypeROM: - return false, nil - case internal.DeviceTypeLVM: - return false, nil - default: - return true, nil - } - }, -} diff --git a/pkg/vgmanager/filter_test.go b/pkg/vgmanager/filter_test.go deleted file mode 100644 index f6af872d2..000000000 --- a/pkg/vgmanager/filter_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package vgmanager - -import ( - "testing" - - "github.com/openshift/lvm-operator/pkg/internal" - "github.com/stretchr/testify/assert" -) - -type filterTestCase struct { - label string - device internal.BlockDevice - expected bool - expectErr bool -} - -func TestNotReadOnly(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc false", device: internal.BlockDevice{ReadOnly: false}, expected: true, expectErr: false}, - {label: "tc true", device: internal.BlockDevice{ReadOnly: true}, expected: false, expectErr: false}, - } - for _, tc := range testcases { - result, err := FilterMap[notReadOnly](tc.device, nil) - assert.Equal(t, tc.expected, result) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } -} - -func TestNotSuspended(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc suspended", device: internal.BlockDevice{State: "suspended"}, expected: false, expectErr: false}, - {label: "tc live", device: internal.BlockDevice{State: "live"}, expected: true, expectErr: false}, - {label: "tc running", device: internal.BlockDevice{State: "running"}, expected: true, expectErr: false}, - } - for _, tc := range testcases { - result, err := FilterMap[notSuspended](tc.device, nil) - assert.Equal(t, tc.expected, result) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } -} - -func TestNoFilesystemSignature(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc no fs", device: internal.BlockDevice{FSType: ""}, expected: true, expectErr: false}, - {label: "tc xfs", device: internal.BlockDevice{FSType: "xfs"}, expected: false, expectErr: false}, - {label: "tc swap", device: internal.BlockDevice{FSType: "swap"}, expected: false, expectErr: false}, - } - for _, tc := range testcases { - result, err := FilterMap[noFilesystemSignature](tc.device, nil) - assert.Equal(t, tc.expected, result) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } -} - -func TestNoChildren(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc child", device: internal.BlockDevice{Name: "dev1", Children: []internal.BlockDevice{{Name: "child1"}}}, expected: false, expectErr: false}, - {label: "tc no child", device: internal.BlockDevice{Name: "dev2", Children: []internal.BlockDevice{}}, expected: true, expectErr: false}, - } - for _, tc := range testcases { - result, err := FilterMap[noChildren](tc.device, nil) - assert.Equal(t, tc.expected, result) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } -} - -func TestIsUsableDeviceType(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc ROM", device: internal.BlockDevice{Name: "dev1", Type: "rom"}, expected: false, expectErr: false}, - {label: "tc Disk", device: internal.BlockDevice{Name: "dev2", Type: "disk"}, expected: true, expectErr: false}, - } - for _, tc := range testcases { - result, err := FilterMap[usableDeviceType](tc.device, nil) - assert.Equal(t, tc.expected, result) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } -} - -func TestNoBiosBootInPartLabel(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc 1", device: internal.BlockDevice{Name: "dev1", PartLabel: ""}, expected: true, expectErr: false}, - {label: "tc 2", device: internal.BlockDevice{Name: "dev2", PartLabel: "abc"}, expected: true, expectErr: false}, - {label: "tc 3", device: internal.BlockDevice{Name: "dev3", PartLabel: "bios"}, expected: false, expectErr: false}, - {label: "tc 4", device: internal.BlockDevice{Name: "dev4", PartLabel: "BIOS"}, expected: false, expectErr: false}, - {label: "tc 5", device: internal.BlockDevice{Name: "dev5", PartLabel: "boot"}, expected: false, expectErr: false}, - {label: "tc 6", device: internal.BlockDevice{Name: "dev6", PartLabel: "BOOT"}, expected: false, expectErr: false}, - } - for _, tc := range testcases { - result, err := FilterMap[noBiosBootInPartLabel](tc.device, nil) - assert.Equal(t, tc.expected, result) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } -} - -func TestNoReservedInPartLabel(t *testing.T) { - testcases := []filterTestCase{ - {label: "tc 1", device: internal.BlockDevice{Name: "dev1", PartLabel: ""}, expected: true}, - {label: "tc 2", device: internal.BlockDevice{Name: "dev2", PartLabel: "abc"}, expected: true}, - {label: "tc 3", device: internal.BlockDevice{Name: "dev3", PartLabel: "reserved"}, expected: false}, - {label: "tc 4", device: internal.BlockDevice{Name: "dev4", PartLabel: "RESERVED"}, expected: false}, - {label: "tc 5", device: internal.BlockDevice{Name: "dev5", PartLabel: "Reserved"}, expected: false}, - } - for _, tc := range testcases { - result, err := FilterMap[noReservedInPartLabel](tc.device, nil) - assert.NoError(t, err) - assert.Equal(t, tc.expected, result) - } -} diff --git a/pkg/vgmanager/status.go b/pkg/vgmanager/status.go index 375787f21..0b61235c1 100644 --- a/pkg/vgmanager/status.go +++ b/pkg/vgmanager/status.go @@ -154,7 +154,7 @@ func (r *VGReconciler) removeVolumeGroupStatus(ctx context.Context, vgName strin } func (r *VGReconciler) setDevices(status *lvmv1alpha1.VGStatus) (bool, error) { - vgs, err := ListVolumeGroups(r.executor) + vgs, err := r.LVM.ListVGs() if err != nil { return false, fmt.Errorf("failed to list volume groups. %v", err) } diff --git a/pkg/vgmanager/suite_test.go b/pkg/vgmanager/suite_test.go new file mode 100644 index 000000000..b936d709a --- /dev/null +++ b/pkg/vgmanager/suite_test.go @@ -0,0 +1,180 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vgmanager + +import ( + "context" + "log" + "os/user" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" + secv1 "github.com/openshift/api/security/v1" + "github.com/openshift/lvm-operator/pkg/exec" + "github.com/openshift/lvm-operator/pkg/lsblk" + "github.com/openshift/lvm-operator/pkg/lvm" + "github.com/openshift/lvm-operator/pkg/lvmd" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" + topolvmv1 "github.com/topolvm/topolvm/api/v1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + testNodeSelector corev1.NodeSelector + testLVMDFile string + testLSBLK lsblk.LSBLK + testLVM lvm.LVM +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller Suite") +} + +const ( + testNamespaceName = "openshift-storage" + testNodeName = "test-node" + testHostname = "test-host.vgmanager.test.io" + timeout = time.Second * 10 + interval = time.Millisecond * 250 +) + +var _ = BeforeSuite(func() { + logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)) + logf.SetLogger(logger) + + ctx, cancel = context.WithCancel(context.Background()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "test", "e2e", "testdata")}, + ErrorIfCRDPathMissing: true, + CRDInstallOptions: envtest.CRDInstallOptions{ + CleanUpAfterUse: true, + }, + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = lvmv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = topolvmv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = snapapi.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = secv1.Install(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = configv1.Install(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + // CreateVG the primary namespace to be used by some tests + testNamespace := &corev1.Namespace{} + testNamespace.SetName(testNamespaceName) + Expect(k8sClient.Create(ctx, testNamespace)).Should(Succeed()) + + testNode := &corev1.Node{} + testNode.SetName(testNodeName) + hostnameKey := "kubernetes.io/hostname" + testNode.SetLabels(map[string]string{ + hostnameKey: testHostname, + }) + testNodeSelector = corev1.NodeSelector{NodeSelectorTerms: []corev1.NodeSelectorTerm{{ + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: hostnameKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{testHostname}, + }}, + }}} + Expect(k8sClient.Create(ctx, testNode)).Should(Succeed()) + + testLVMDFile = filepath.Join(GinkgoT().TempDir(), "lvmd.yaml") + + executor := &exec.CommandExecutor{} + testLSBLK = lsblk.NewHostLSBLK(executor, lsblk.DefaultMountinfo, lsblk.DefaultLosetup) + testLVM = lvm.NewHostLVM(executor) + err = (&VGReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + LVM: testLVM, + LSBLK: testLSBLK, + Namespace: testNamespaceName, + NodeName: testNodeName, + LVMDConfig: lvmd.NewLVMDFileConfig(testLVMDFile), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + Expect(testEnv.Stop()).To(Succeed()) +}) + +func isRoot() bool { + currentUser, err := user.Current() + if err != nil { + log.Fatalf("[isRoot] Unable to get current user: %s", err) + } + return currentUser.Username == "root" || currentUser.Uid == "0" +} diff --git a/pkg/vgmanager/vgmanager_controller.go b/pkg/vgmanager/vgmanager_controller.go index d3c1e5ced..6c6bcf303 100644 --- a/pkg/vgmanager/vgmanager_controller.go +++ b/pkg/vgmanager/vgmanager_controller.go @@ -19,7 +19,6 @@ package vgmanager import ( "context" "fmt" - "os" "strconv" "strings" "time" @@ -27,7 +26,9 @@ import ( "github.com/google/go-cmp/cmp" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" "github.com/openshift/lvm-operator/controllers" - "github.com/openshift/lvm-operator/pkg/internal" + "github.com/openshift/lvm-operator/pkg/lsblk" + "github.com/openshift/lvm-operator/pkg/lvm" + lvmd2 "github.com/openshift/lvm-operator/pkg/lvmd" "github.com/topolvm/topolvm/lvmd" lvmdCMD "github.com/topolvm/topolvm/pkg/lvmd/cmd" @@ -38,12 +39,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/yaml" ) const ( - ControllerName = "vg-manager" - DefaultChunkSize = "128" reconcileInterval = 15 * time.Second metadataWarningPercentage = 95 ) @@ -61,10 +59,12 @@ func (r *VGReconciler) SetupWithManager(mgr ctrl.Manager) error { type VGReconciler struct { client.Client - Scheme *runtime.Scheme - executor internal.Executor + Scheme *runtime.Scheme + lvm.LVM + lsblk.LSBLK NodeName string Namespace string + lvmd2.LVMDConfig } func (r *VGReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -87,8 +87,6 @@ func (r *VGReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re logger.Info("node labels do not match the selector", "VGName", volumeGroup.Name) return ctrl.Result{}, nil } - - r.executor = &internal.CommandExecutor{} return r.reconcile(ctx, volumeGroup) } @@ -100,7 +98,7 @@ func (r *VGReconciler) reconcile(ctx context.Context, volumeGroup *lvmv1alpha1.L } // Read the lvmd config file - lvmdConfig, err := loadLVMDConfig() + lvmdConfig, err := r.LVMDConfig.Load() if err != nil { err = fmt.Errorf("failed to read the lvmd config file: %w", err) if err := r.setVolumeGroupFailedStatus(ctx, volumeGroup.Name, err); err != nil { @@ -117,12 +115,12 @@ func (r *VGReconciler) reconcile(ctx context.Context, volumeGroup *lvmv1alpha1.L } existingLvmdConfig := *lvmdConfig - vgs, err := ListVolumeGroups(r.executor) + vgs, err := r.LVM.ListVGs() if err != nil { return ctrl.Result{}, fmt.Errorf("failed to list volume groups. %v", err) } - blockDevices, err := internal.ListBlockDevices(r.executor) + blockDevices, err := r.LSBLK.ListBlockDevices() if err != nil { return ctrl.Result{}, fmt.Errorf("failed to list block devices: %v", err) } @@ -173,7 +171,7 @@ func (r *VGReconciler) reconcile(ctx context.Context, volumeGroup *lvmv1alpha1.L return reconcileAgain, nil } - // Create/extend VG + // CreateVG/extend VG if err = r.addDevicesToVG(ctx, vgs, volumeGroup.Name, availableDevices); err != nil { err = fmt.Errorf("failed to create/extend volume group %s: %w", volumeGroup.Name, err) if err := r.setVolumeGroupFailedStatus(ctx, volumeGroup.Name, err); err != nil { @@ -182,7 +180,7 @@ func (r *VGReconciler) reconcile(ctx context.Context, volumeGroup *lvmv1alpha1.L return ctrl.Result{}, err } - // Create thin pool + // CreateVG thin pool if err = r.addThinPoolToVG(ctx, volumeGroup.Name, volumeGroup.Spec.ThinPoolConfig); err != nil { err := fmt.Errorf("failed to create thin pool %s for volume group %s: %w", volumeGroup.Spec.ThinPoolConfig.Name, volumeGroup.Name, err) if err := r.setVolumeGroupFailedStatus(ctx, volumeGroup.Name, err); err != nil { @@ -226,7 +224,7 @@ func (r *VGReconciler) reconcile(ctx context.Context, volumeGroup *lvmv1alpha1.L // Apply and save lvmd config if !cmp.Equal(existingLvmdConfig, lvmdConfig) { - if err := saveLVMDConfig(lvmdConfig); err != nil { + if err := r.LVMDConfig.Save(lvmdConfig); err != nil { err := fmt.Errorf("failed to update lvmd config file to update volume group %s: %w", volumeGroup.GetName(), err) if err := r.setVolumeGroupFailedStatus(ctx, volumeGroup.Name, err); err != nil { logger.Error(err, "failed to set status to failed", "VGName", volumeGroup.GetName()) @@ -247,7 +245,7 @@ func (r *VGReconciler) processDelete(ctx context.Context, volumeGroup *lvmv1alph logger := log.FromContext(ctx) // Read the lvmd config file - lvmdConfig, err := loadLVMDConfig() + lvmdConfig, err := r.LVMDConfig.Load() if err != nil { // Failed to read lvmdconfig file. Reconcile again return fmt.Errorf("failed to read the lvmd config file: %w", err) @@ -277,24 +275,24 @@ func (r *VGReconciler) processDelete(ctx context.Context, volumeGroup *lvmv1alph } // Check if volume group exists - vg, err := GetVolumeGroup(r.executor, volumeGroup.Name) + vg, err := r.LVM.GetVG(volumeGroup.Name) if err != nil { - if err != ErrVolumeGroupNotFound { + if err != lvm.ErrVolumeGroupNotFound { return fmt.Errorf("failed to get volume group %s, %w", volumeGroup.GetName(), err) } return nil } - // Delete thin pool + // DeleteVG thin pool if volumeGroup.Spec.ThinPoolConfig != nil { thinPoolName := volumeGroup.Spec.ThinPoolConfig.Name - lvExists, err := LVExists(r.executor, thinPoolName, volumeGroup.Name) + lvExists, err := r.LVM.LVExists(thinPoolName, volumeGroup.Name) if err != nil { return fmt.Errorf("failed to check existence of thin pool %q in volume group %q. %v", thinPoolName, volumeGroup.Name, err) } if lvExists { - if err := DeleteLV(r.executor, thinPoolName, volumeGroup.Name); err != nil { + if err := r.LVM.DeleteLV(thinPoolName, volumeGroup.Name); err != nil { err := fmt.Errorf("failed to delete thin pool %s in volume group %s: %w", thinPoolName, volumeGroup.Name, err) if err := r.setVolumeGroupFailedStatus(ctx, volumeGroup.Name, err); err != nil { logger.Error(err, "failed to set status to failed", "VGName", volumeGroup.GetName()) @@ -307,7 +305,7 @@ func (r *VGReconciler) processDelete(ctx context.Context, volumeGroup *lvmv1alph } } - if err = vg.Delete(r.executor); err != nil { + if err = r.LVM.DeleteVG(vg); err != nil { err := fmt.Errorf("failed to delete volume group %s: %w", volumeGroup.Name, err) if err := r.setVolumeGroupFailedStatus(ctx, volumeGroup.Name, err); err != nil { logger.Error(err, "failed to set status to failed", "VGName", volumeGroup.GetName()) @@ -320,11 +318,11 @@ func (r *VGReconciler) processDelete(ctx context.Context, volumeGroup *lvmv1alph logger.Info("updating lvmd config") if len(lvmdConfig.DeviceClasses) > 0 { - if err = saveLVMDConfig(lvmdConfig); err != nil { + if err = r.LVMDConfig.Save(lvmdConfig); err != nil { return fmt.Errorf("failed to update lvmd.conf file for volume group %s: %w", volumeGroup.GetName(), err) } } else { - if err = deleteLVMDConfig(); err != nil { + if err = r.LVMDConfig.Delete(); err != nil { return fmt.Errorf("failed to delete lvmd.conf file for volume group %s: %w", volumeGroup.GetName(), err) } } @@ -346,7 +344,7 @@ func (r *VGReconciler) validateLVs(ctx context.Context, volumeGroup *lvmv1alpha1 return nil } - resp, err := GetLVSOutput(r.executor, volumeGroup.Name) + resp, err := r.LVM.ListLVs(volumeGroup.Name) if err != nil { return fmt.Errorf("could not get logical volumes found inside volume group, volume group content is degraded or corrupt: %w", err) } @@ -400,9 +398,9 @@ func (r *VGReconciler) validateLVs(ctx context.Context, volumeGroup *lvmv1alpha1 } func (r *VGReconciler) addThinPoolToVG(ctx context.Context, vgName string, config *lvmv1alpha1.ThinPoolConfig) error { - logger := log.FromContext(ctx) + logger := log.FromContext(ctx).WithValues("VGName", vgName, "ThinPool", config.Name) - resp, err := GetLVSOutput(r.executor, vgName) + resp, err := r.LVM.ListLVs(vgName) if err != nil { return fmt.Errorf("failed to list logical volumes in the volume group %q. %v", vgName, err) } @@ -411,7 +409,7 @@ func (r *VGReconciler) addThinPoolToVG(ctx context.Context, vgName string, confi for _, lv := range report.Lv { if lv.Name == config.Name { if strings.Contains(lv.LvAttr, "t") { - logger.Info("lvm thinpool already exists", "VGName", vgName, "ThinPool", config.Name) + logger.Info("lvm thinpool already exists") if err := r.extendThinPool(ctx, vgName, lv.LvSize, config); err != nil { return fmt.Errorf("failed to extend the lvm thinpool %s in volume group %s: %w", config.Name, vgName, err) } @@ -423,21 +421,21 @@ func (r *VGReconciler) addThinPoolToVG(ctx context.Context, vgName string, confi } } - args := []string{"-l", fmt.Sprintf("%d%%FREE", config.SizePercent), "-c", DefaultChunkSize, "-Z", "y", "-T", fmt.Sprintf("%s/%s", vgName, config.Name)} - - if _, err = r.executor.ExecuteCommandWithOutputAsHost(lvCreateCmd, args...); err != nil { - return fmt.Errorf("failed to create thin pool %q in the volume group %q using command '%s': %v", config.Name, vgName, fmt.Sprintf("%s %s", lvCreateCmd, strings.Join(args, " ")), err) + logger.Info("creating lvm thinpool") + if err := r.LVM.CreateLV(config.Name, vgName, config.SizePercent); err != nil { + return fmt.Errorf("failed to create thinpool: %w", err) } + logger.Info("successfully created thinpool") return nil } func (r *VGReconciler) extendThinPool(ctx context.Context, vgName string, lvSize string, config *lvmv1alpha1.ThinPoolConfig) error { - logger := log.FromContext(ctx) + logger := log.FromContext(ctx).WithValues("VGName", vgName, "ThinPool", config.Name) - vg, err := GetVolumeGroup(r.executor, vgName) + vg, err := r.LVM.GetVG(vgName) if err != nil { - if err != ErrVolumeGroupNotFound { + if err != lvm.ErrVolumeGroupNotFound { return fmt.Errorf("failed to get volume group. %q, %v", vgName, err) } return nil @@ -458,72 +456,28 @@ func (r *VGReconciler) extendThinPool(ctx context.Context, vgName string, lvSize return nil } - logger.Info("extending lvm thinpool ", "VGName", vgName, "ThinPool", config.Name) - - args := []string{"-l", fmt.Sprintf("%d%%Vg", config.SizePercent), fmt.Sprintf("%s/%s", vgName, config.Name)} - - if _, err = r.executor.ExecuteCommandWithOutputAsHost(lvExtendCmd, args...); err != nil { - return fmt.Errorf("failed to extend thin pool %q in the volume group %q using command '%s': %v", config.Name, vgName, fmt.Sprintf("%s %s", lvExtendCmd, strings.Join(args, " ")), err) + logger.Info("extending lvm thinpool") + if err := r.LVM.ExtendLV(config.Name, vgName, config.SizePercent); err != nil { + return fmt.Errorf("failed to extend thinpool: %w", err) } - - logger.Info("successfully extended the thin pool in the volume group ", "thinpool", config.Name, "vgName", vgName) + logger.Info("successfully extended thinpool") return nil } -func NodeSelectorMatchesNodeLabels(node *corev1.Node, nodeSelector *corev1.NodeSelector) (bool, error) { - if nodeSelector == nil { - return true, nil - } - if node == nil { - return false, fmt.Errorf("node cannot be nil") - } - - matches, err := corev1helper.MatchNodeSelectorTerms(node, nodeSelector) - return matches, err -} - func (r *VGReconciler) matchesThisNode(ctx context.Context, selector *corev1.NodeSelector) (bool, error) { node := &corev1.Node{} err := r.Client.Get(ctx, types.NamespacedName{Name: r.NodeName}, node) if err != nil { return false, err } - return NodeSelectorMatchesNodeLabels(node, selector) -} - -func loadLVMDConfig() (*lvmdCMD.Config, error) { - - cfgBytes, err := os.ReadFile(controllers.LvmdConfigFile) - if os.IsNotExist(err) { - // If the file does not exist, return nil for both - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("failed to load config file %s: %w", controllers.LvmdConfigFile, err) - } else { - lvmdconfig := &lvmdCMD.Config{} - if err = yaml.Unmarshal(cfgBytes, lvmdconfig); err != nil { - return nil, fmt.Errorf("failed to unmarshal config file %s: %w", controllers.LvmdConfigFile, err) - } - return lvmdconfig, nil - } -} - -func saveLVMDConfig(lvmdConfig *lvmdCMD.Config) error { - out, err := yaml.Marshal(lvmdConfig) - if err == nil { - err = os.WriteFile(controllers.LvmdConfigFile, out, 0600) + if selector == nil { + return true, nil } - if err != nil { - return fmt.Errorf("failed to save config file %s: %w", controllers.LvmdConfigFile, err) + if node == nil { + return false, fmt.Errorf("node cannot be nil") } - return nil -} -func deleteLVMDConfig() error { - err := os.Remove(controllers.LvmdConfigFile) - if err != nil { - return fmt.Errorf("failed to delete config file %s: %w", controllers.LvmdConfigFile, err) - } - return err + matches, err := corev1helper.MatchNodeSelectorTerms(node, selector) + return matches, err } diff --git a/pkg/vgmanager/vgmanager_controller_test.go b/pkg/vgmanager/vgmanager_controller_test.go index 468414e62..eff444ece 100644 --- a/pkg/vgmanager/vgmanager_controller_test.go +++ b/pkg/vgmanager/vgmanager_controller_test.go @@ -3,15 +3,24 @@ package vgmanager import ( "context" "fmt" + "os/exec" + "path/filepath" + "slices" + "strings" "testing" "github.com/go-logr/logr/testr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" - "github.com/openshift/lvm-operator/pkg/internal" + lvmexec "github.com/openshift/lvm-operator/pkg/exec" mockExec "github.com/openshift/lvm-operator/pkg/internal/test" + "github.com/openshift/lvm-operator/pkg/lsblk" + "github.com/openshift/lvm-operator/pkg/lvm" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/strings/slices" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -92,7 +101,7 @@ var mockLvsOutputRAID = ` func TestVGReconciler_validateLVs(t *testing.T) { type fields struct { - executor internal.Executor + executor lvmexec.Executor } type args struct { volumeGroup *lvmv1alpha1.LVMVolumeGroup @@ -108,7 +117,7 @@ func TestVGReconciler_validateLVs(t *testing.T) { "json", } - mockExecutorForLVSOutput := func(output string) internal.Executor { + mockExecutorForLVSOutput := func(output string) lvmexec.Executor { return &mockExec.MockExecutor{ MockExecuteCommandWithOutputAsHost: func(command string, args ...string) (string, error) { if !slices.Equal(args, lvsCommandForVG1) { @@ -219,10 +228,73 @@ func TestVGReconciler_validateLVs(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &VGReconciler{ - executor: tt.fields.executor, - } + r := &VGReconciler{LVM: lvm.NewHostLVM(tt.fields.executor)} tt.wantErr(t, r.validateLVs(log.IntoContext(context.Background(), testr.New(t)), tt.args.volumeGroup), fmt.Sprintf("validateLVs(%v)", tt.args.volumeGroup)) }) } } + +var _ = Describe("vgmanager controller", func() { + // SetDefaultEventuallyTimeout(timeout) + // SetDefaultEventuallyPollingInterval(interval) + Context("verifying standard behavior with node selector", func() { + It("should be reconciled successfully with a looped block device", func(ctx context.Context) { + if !isRoot() { + Skip("running as non-root, cannot run native vgmanager tests," + + "as they use losetup and dd to spin up disks for testing") + } + By("setting up the disk") + disk := filepath.Join(GinkgoT().TempDir(), "disk0") + Expect(exec.CommandContext(ctx, "dd", + "if=/dev/zero", + fmt.Sprintf("of=%s", disk), + // 64K * 16384 => 1 GB + "bs=64K", "count=16384", + ).Run()).To(Succeed(), "block device should be created successfully in temporary directory") + + rawDevice, err := exec.CommandContext(ctx, "losetup", "--find", "--show", disk).Output() + Expect(err).ToNot(HaveOccurred(), "losetup should succeed") + Expect(rawDevice).ToNot(BeEmpty(), "assigned losetup device should not be empty") + DeferCleanup(func(ctx context.Context) { + Expect(exec.CommandContext(ctx, lsblk.DefaultLosetup, "--detach", disk).Run()).To(Succeed()) + }) + device := strings.TrimSpace(string(rawDevice)) + + By("setting up the LVMVolumeGroup with the temporary device") + vg := &lvmv1alpha1.LVMVolumeGroup{} + vg.SetName("vg1") + vg.SetNamespace(testNamespaceName) + vg.Spec.NodeSelector = testNodeSelector.DeepCopy() + vg.Spec.DeviceSelector = &lvmv1alpha1.DeviceSelector{Paths: []string{device}} + vg.Spec.ThinPoolConfig = &lvmv1alpha1.ThinPoolConfig{ + Name: "thin-pool", + SizePercent: 90, + OverprovisionRatio: 10, + } + + Expect(k8sClient.Create(ctx, vg)).To(Succeed()) + DeferCleanup(func(ctx context.Context) { + By("deleting the LVMVolumeGroup after successful verification") + Expect(k8sClient.Delete(ctx, vg)).To(Succeed()) + Eventually(func(ctx context.Context) error { + return k8sClient.Get(ctx, client.ObjectKeyFromObject(vg), vg) + }).WithContext(ctx).Should(Satisfy(errors.IsNotFound), "no finalizers should have blocked"+ + "the LVMVolumeGroup deletion and it should not exist on the cluster anymore") + }) + + By("verifying the Node Status contains the Volume Group in Ready State") + nodeStatus := &lvmv1alpha1.LVMVolumeGroupNodeStatus{} + nodeStatus.SetName(testNodeName) + nodeStatus.SetNamespace(testNamespaceName) + Eventually(func(ctx context.Context) error { + return k8sClient.Get(ctx, client.ObjectKeyFromObject(nodeStatus), nodeStatus) + }).WithContext(ctx).Should(Succeed()) + Expect(nodeStatus.Spec.LVMVGStatus).ToNot(BeEmpty(), "volume group needs to be present") + Expect(nodeStatus.Spec.LVMVGStatus).To(ContainElement(lvmv1alpha1.VGStatus{ + Name: vg.GetName(), + Status: lvmv1alpha1.VGStatusReady, + Devices: vg.Spec.DeviceSelector.Paths, + }), "volume group needs to be ready and contain all the devices from the selector") + }) + }) +})