diff --git a/cmd/incus/completion.go b/cmd/incus/completion.go index f73e0af8e6c..2c9b1ade38f 100644 --- a/cmd/incus/completion.go +++ b/cmd/incus/completion.go @@ -47,7 +47,7 @@ func (g *cmdGlobal) cmpImages(toComplete string) ([]string, cobra.ShellCompDirec func (g *cmdGlobal) cmpInstanceAllKeys() ([]string, cobra.ShellCompDirective) { keys := []string{} - for k, _ := range instance.InstanceConfigKeysAny { + for k := range instance.InstanceConfigKeysAny { keys = append(keys, k) } diff --git a/cmd/incusd/patches.go b/cmd/incusd/patches.go index dd33619951f..0af96ecfcc0 100644 --- a/cmd/incusd/patches.go +++ b/cmd/incusd/patches.go @@ -83,6 +83,7 @@ var patches = []patch{ {name: "storage_zfs_unset_invalid_block_settings", stage: patchPostDaemonStorage, run: patchStorageZfsUnsetInvalidBlockSettings}, {name: "storage_zfs_unset_invalid_block_settings_v2", stage: patchPostDaemonStorage, run: patchStorageZfsUnsetInvalidBlockSettingsV2}, {name: "runtime_directory", stage: patchPostDaemonStorage, run: patchRuntimeDirectory}, + {name: "lvm_node_force_reuse", stage: patchPostDaemonStorage, run: patchLvmForceReuseKey}, } type patch struct { @@ -1244,4 +1245,68 @@ func patchRuntimeDirectory(name string, d *Daemon) error { return nil } +// The lvm.vg.force_reuse config key is node-specific and need to be linked to nodes. +func patchLvmForceReuseKey(name string, d *Daemon) error { + revert := revert.New() + defer revert.Fail() + + // Setup a transaction. + tx, err := d.db.Cluster.Begin() + if err != nil { + return fmt.Errorf("Failed to begin transaction: %w", err) + } + + revert.Add(func() { _ = tx.Rollback() }) + + // Fetch the IDs of all existing nodes. + nodeIDs, err := query.SelectIntegers(context.TODO(), tx, "SELECT id FROM nodes") + if err != nil { + return fmt.Errorf("Failed to get IDs of current nodes: %w", err) + } + + // Fetch the IDs of all existing lvm pools. + poolIDs, err := query.SelectIntegers(context.TODO(), tx, "SELECT id FROM storage_pools WHERE driver='lvm'") + if err != nil { + return fmt.Errorf("Failed to get IDs of current LVM pools: %w", err) + } + + for _, poolID := range poolIDs { + // Fetch the config for this LVM pool and check if it has the lvm.vg.force_reuse key. + config, err := query.SelectConfig(context.TODO(), tx, "storage_pools_config", "storage_pool_id=? AND node_id IS NULL", poolID) + if err != nil { + return fmt.Errorf("Failed to fetch of lvm pool config: %w", err) + } + + value, ok := config["lvm.vg.force_reuse"] + if !ok { + continue + } + + // Delete the current key. + _, err = tx.Exec("DELETE FROM storage_pools_config WHERE key='lvm.vg.force_reuse' AND storage_pool_id=? AND node_id IS NULL", poolID) + if err != nil { + return fmt.Errorf("Failed to delete old config: %w", err) + } + + // Add the config entry for each node. + for _, nodeID := range nodeIDs { + _, err := tx.Exec(` +INSERT INTO storage_pools_config(storage_pool_id, node_id, 'lvm.vg.force_reuse', value) + VALUES(?, ?, ?, ?) +`, poolID, nodeID, value) + if err != nil { + return fmt.Errorf("Failed to create new config: %w", err) + } + } + } + + err = tx.Commit() + if err != nil { + return fmt.Errorf("Failed to commit transaction: %w", err) + } + + revert.Success() + return nil +} + // Patches end here diff --git a/doc/reference/storage_lvm.md b/doc/reference/storage_lvm.md index b2c61ad4f87..69b2dc0146e 100644 --- a/doc/reference/storage_lvm.md +++ b/doc/reference/storage_lvm.md @@ -41,36 +41,36 @@ The following configuration options are available for storage pools that use the (storage-lvm-pool-config)= ### Storage pool configuration -Key | Type | Default | Description -:-- | :--- | :------ | :---------- -`lvm.thinpool_name` | string | `IncusThinPool` | Thin pool where volumes are created -`lvm.thinpool_metadata_size` | string | `0` (auto) | The size of the thin pool metadata volume (the default is to let LVM calculate an appropriate size) -`lvm.use_thinpool` | bool | `true` | Whether the storage pool uses a thin pool for logical volumes -`lvm.vg.force_reuse` | bool | `false` | Force using an existing non-empty volume group -`lvm.vg_name` | string | name of the pool | Name of the volume group to create -`rsync.bwlimit` | string | `0` (no limit) | The upper limit to be placed on the socket I/O when `rsync` must be used to transfer storage entities -`rsync.compression` | bool | `true` | Whether to use compression while migrating storage pools -`size` | string | auto (20% of free disk space, >= 5 GiB and <= 30 GiB) | Size of the storage pool when creating loop-based pools (in bytes, suffixes supported, can be increased to grow storage pool) -`source` | string | - | Path to an existing block device, loop file or LVM volume group -`source.wipe` | bool | `false` | Wipe the block device specified in `source` prior to creating the storage pool +Key | Type | Default | Description +:-- | :--- | :------ | :---------- +`lvm.thinpool_name` | string | `IncusThinPool` | Thin pool where volumes are created +`lvm.thinpool_metadata_size` | string | `0` (auto) | The size of the thin pool metadata volume (the default is to let LVM calculate an appropriate size) +`lvm.use_thinpool` | bool | `true` | Whether the storage pool uses a thin pool for logical volumes +`lvm.vg.force_reuse` | bool | `false` | Force using an existing non-empty volume group +`lvm.vg_name` | string | name of the pool | Name of the volume group to create +`rsync.bwlimit` | string | `0` (no limit) | The upper limit to be placed on the socket I/O when `rsync` must be used to transfer storage entities +`rsync.compression` | bool | `true` | Whether to use compression while migrating storage pools +`size` | string | auto (20% of free disk space, >= 5 GiB and <= 30 GiB) | Size of the storage pool when creating loop-based pools (in bytes, suffixes supported, can be increased to grow storage pool) +`source` | string | - | Path to an existing block device, loop file or LVM volume group +`source.wipe` | bool | `false` | Wipe the block device specified in `source` prior to creating the storage pool {{volume_configuration}} (storage-lvm-vol-config)= ### Storage volume configuration -Key | Type | Condition | Default | Description -:-- | :--- | :------ | :------ | :---------- -`block.filesystem` | string | block-based volume with content type `filesystem` | same as `volume.block.filesystem` | {{block_filesystem}} -`block.mount_options` | string | block-based volume with content type `filesystem` | same as `volume.block.mount_options` | Mount options for block-backed file system volumes -`lvm.stripes` | string | | same as `volume.lvm.stripes` | Number of stripes to use for new volumes (or thin pool volume) -`lvm.stripes.size` | string | | same as `volume.lvm.stripes.size` | Size of stripes to use (at least 4096 bytes and multiple of 512 bytes) -`security.shifted` | bool | custom volume | same as `volume.security.shifted` or `false` | {{enable_ID_shifting}} -`security.unmapped` | bool | custom volume | same as `volume.security.unmapped` or `false` | Disable ID mapping for the volume -`size` | string | | same as `volume.size` | Size/quota of the storage volume -`snapshots.expiry` | string | custom volume | same as `volume.snapshots.expiry` | {{snapshot_expiry_format}} -`snapshots.pattern` | string | custom volume | same as `volume.snapshots.pattern` or `snap%d` | {{snapshot_pattern_format}} [^*] -`snapshots.schedule` | string | custom volume | same as `volume.snapshots.schedule` | {{snapshot_schedule_format}} +Key | Type | Condition | Default | Description +:-- | :--- | :------ | :------ | :---------- +`block.filesystem` | string | block-based volume with content type `filesystem` | same as `volume.block.filesystem` | {{block_filesystem}} +`block.mount_options` | string | block-based volume with content type `filesystem` | same as `volume.block.mount_options` | Mount options for block-backed file system volumes +`lvm.stripes` | string | | same as `volume.lvm.stripes` | Number of stripes to use for new volumes (or thin pool volume) +`lvm.stripes.size` | string | | same as `volume.lvm.stripes.size` | Size of stripes to use (at least 4096 bytes and multiple of 512 bytes) +`security.shifted` | bool | custom volume | same as `volume.security.shifted` or `false` | {{enable_ID_shifting}} +`security.unmapped` | bool | custom volume | same as `volume.security.unmapped` or `false` | Disable ID mapping for the volume +`size` | string | | same as `volume.size` | Size/quota of the storage volume +`snapshots.expiry` | string | custom volume | same as `volume.snapshots.expiry` | {{snapshot_expiry_format}} +`snapshots.pattern` | string | custom volume | same as `volume.snapshots.pattern` or `snap%d` | {{snapshot_pattern_format}} [^*] +`snapshots.schedule` | string | custom volume | same as `volume.snapshots.schedule` | {{snapshot_schedule_format}} [^*]: {{snapshot_pattern_detail}} @@ -78,6 +78,6 @@ Key | Type | Condition | Default To enable storage buckets for local storage pool drivers and allow applications to access the buckets via the S3 protocol, you must configure the {config:option}`server-core:core.storage_buckets_address` server setting. -Key | Type | Condition | Default | Description -:-- | :--- | :-------- | :------ | :---------- -`size` | string | appropriate driver | same as `volume.size` | Size/quota of the storage bucket +Key | Type | Condition | Default | Description +:-- | :--- | :-------- | :------ | :---------- +`size` | string | appropriate driver | same as `volume.size` | Size/quota of the storage bucket diff --git a/internal/server/db/storage_pools.go b/internal/server/db/storage_pools.go index 1223cf857b1..33ac28d4b8f 100644 --- a/internal/server/db/storage_pools.go +++ b/internal/server/db/storage_pools.go @@ -885,6 +885,7 @@ var NodeSpecificStorageConfig = []string{ "zfs.pool_name", "lvm.thinpool_name", "lvm.vg_name", + "lvm.vg.force_reuse", } // IsRemoteStorage return whether a given pool is backed by remote storage.