diff --git a/doc/api-extensions.md b/doc/api-extensions.md index d6aef7ab24c5..e1fa1f864be2 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2464,3 +2464,9 @@ Adds the following internal metrics: * Total completed requests * Number of ongoing requests + +## `projects_limits_disk_pool` + +This introduces per-pool project disk limits, introducing a `limits.disk.pool.NAME` +configuration option to the project limits. When `limits.disk.pool.POOLNAME: 0` +for a project, the pool is excluded from `lxc storage list` in that project. diff --git a/doc/metadata.txt b/doc/metadata.txt index 0ab9c296d8fa..7ca257daebf9 100644 --- a/doc/metadata.txt +++ b/doc/metadata.txt @@ -1,5 +1,18 @@ // Code generated by lxd-metadata; DO NOT EDIT. + +```{config:option} limits.disk.pool.POOL_NAME -limits, +:shortdesc: "Maximum disk space used by the project on this pool" +:type: "string" +This value is the maximum value of the aggregate disk +space used by all instance volumes, custom volumes, and images of the +project on this specific storage pool. + +When set to 0, the pool is excluded from storage pool list for +the project. +``` + + ```{config:option} scheduler.instance cluster-cluster :defaultdesc: "`all`" diff --git a/lxc/project.go b/lxc/project.go index ecc68a2b8ed6..5b10f9239c8b 100644 --- a/lxc/project.go +++ b/lxc/project.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "slices" "sort" "strings" @@ -959,9 +960,11 @@ func (c *cmdProjectInfo) run(cmd *cobra.Command, args []string) error { byteLimits := []string{"disk", "memory"} data := [][]string{} for k, v := range projectState.Resources { + shortKey := strings.SplitN(k, ".", 2)[0] + limit := i18n.G("UNLIMITED") if v.Limit >= 0 { - if shared.ValueInSlice(k, byteLimits) { + if slices.Contains(byteLimits, shortKey) { limit = units.GetByteSizeStringIEC(v.Limit, 2) } else { limit = fmt.Sprintf("%d", v.Limit) @@ -969,13 +972,19 @@ func (c *cmdProjectInfo) run(cmd *cobra.Command, args []string) error { } usage := "" - if shared.ValueInSlice(k, byteLimits) { + if slices.Contains(byteLimits, shortKey) { usage = units.GetByteSizeStringIEC(v.Usage, 2) } else { usage = fmt.Sprintf("%d", v.Usage) } - data = append(data, []string{strings.ToUpper(k), limit, usage}) + columnName := strings.ToUpper(k) + fields := strings.SplitN(columnName, ".", 2) + if len(fields) == 2 { + columnName = fmt.Sprintf("%s (%s)", fields[0], fields[1]) + } + + data = append(data, []string{columnName, limit, usage}) } sort.Sort(cli.SortColumnsNaturally(data)) diff --git a/lxd/api_project.go b/lxd/api_project.go index cb7787189583..50327ca3e2d4 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -1380,6 +1380,37 @@ func projectValidateConfig(s *state.State, config map[string]string) error { "restricted.snapshots": isEitherAllowOrBlock, } + // Add the storage pool keys. + err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { + var err error + + // Load all the pools. + pools, err := tx.GetStoragePoolNames(ctx) + if err != nil { + return err + } + + // Add the storage-pool specific config keys. + for _, poolName := range pools { + // lxdmeta:generate(entity=project, group=limits, key=limits.disk.pool.POOL_NAME) + // This value is the maximum value of the aggregate disk + // space used by all instance volumes, custom volumes, and images of the + // project on this specific storage pool. + // + // When set to 0, the pool is excluded from storage pool list for + // the project. + // --- + // type: string + // shortdesc: Maximum disk space used by the project on this pool + projectConfigKeys[fmt.Sprintf("limits.disk.pool.%s", poolName)] = validate.Optional(validate.IsSize) + } + + return nil + }) + if err != nil { + return fmt.Errorf("Failed loading storage pool names: %w", err) + } + for k, v := range config { key := k diff --git a/lxd/metadata/configuration.json b/lxd/metadata/configuration.json index 08e7027199e9..eb752469f703 100644 --- a/lxd/metadata/configuration.json +++ b/lxd/metadata/configuration.json @@ -1,5 +1,18 @@ { "configs": { + "": { + "limits,": { + "keys": [ + { + "limits.disk.pool.POOL_NAME": { + "longdesc": "This value is the maximum value of the aggregate disk\nspace used by all instance volumes, custom volumes, and images of the\nproject on this specific storage pool.\n\nWhen set to 0, the pool is excluded from storage pool list for\nthe project.", + "shortdesc": "Maximum disk space used by the project on this pool", + "type": "string" + } + } + ] + } + }, "cluster": { "cluster": { "keys": [ diff --git a/lxd/project/limits/permissions.go b/lxd/project/limits/permissions.go index 95f85b5bb649..0eac04af6ad8 100644 --- a/lxd/project/limits/permissions.go +++ b/lxd/project/limits/permissions.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "slices" "strconv" "strings" @@ -23,6 +24,36 @@ import ( "github.com/canonical/lxd/shared/validate" ) +// projectLimitDiskPool is the prefix used for pool-specific disk limits. +var projectLimitDiskPool = "limits.disk.pool." + +// HiddenStoragePools returns a list of storage pools that should be hidden from users of the project. +func HiddenStoragePools(ctx context.Context, tx *db.ClusterTx, projectName string) ([]string, error) { + dbProject, err := cluster.GetProject(ctx, tx.Tx(), projectName) + if err != nil { + return nil, fmt.Errorf("Failed getting project: %w", err) + } + + project, err := dbProject.ToAPI(ctx, tx.Tx()) + if err != nil { + return nil, err + } + + hiddenPools := []string{} + for k, v := range project.Config { + if !strings.HasPrefix(k, projectLimitDiskPool) || v != "0" { + continue + } + + fields := strings.SplitN(k, projectLimitDiskPool, 2) + if len(fields) == 2 { + hiddenPools = append(hiddenPools, fields[1]) + } + } + + return hiddenPools, nil +} + // AllowInstanceCreation returns an error if any project-specific limit or // restriction is violated when creating a new instance. func AllowInstanceCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, req api.InstancesPost) error { @@ -234,7 +265,7 @@ func checkRestrictionsOnVolatileConfig(project api.Project, instanceType instanc // AllowVolumeCreation returns an error if any project-specific limit or // restriction is violated when creating a new custom volume in a project. -func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, req api.StorageVolumesPost) error { +func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, poolName string, req api.StorageVolumesPost) error { var globalConfigDump map[string]any if globalConfig != nil { globalConfigDump = globalConfig.Dump() @@ -256,8 +287,9 @@ func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, p // Add the volume being created. info.Volumes = append(info.Volumes, db.StorageVolumeArgs{ - Name: req.Name, - Config: req.Config, + Name: req.Name, + Config: req.Config, + PoolName: poolName, }) err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info) @@ -329,8 +361,9 @@ func checkRestrictionsAndAggregateLimits(globalConfig *clusterConfig.Config, tx // across all project instances. aggregateKeys := []string{} isRestricted := false + for key, value := range info.Project.Config { - if shared.ValueInSlice(key, allAggregateLimits) { + if slices.Contains(allAggregateLimits, key) || strings.HasPrefix(key, projectLimitDiskPool) { aggregateKeys = append(aggregateKeys, key) continue } @@ -388,7 +421,14 @@ func getAggregateLimits(info *projectInfo, aggregateKeys []string) (map[string]a max := int64(-1) limit := info.Project.Config[key] if limit != "" { - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] max, err = parser(info.Project.Config[key]) if err != nil { return nil, err @@ -417,7 +457,14 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error { } for _, key := range aggregateKeys { - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] max, err := parser(info.Project.Config[key]) if err != nil { return err @@ -427,6 +474,7 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error { return fmt.Errorf("Reached maximum aggregate value %q for %q in project %q", info.Project.Config[key], key, info.Project.Name) } } + return nil } @@ -1125,7 +1173,14 @@ func validateAggregateLimit(totals map[string]int64, key, value string) error { return nil } - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] limit, err := parser(value) if err != nil { return fmt.Errorf("Invalid value %q for limit %q: %w", value, key, err) @@ -1133,7 +1188,14 @@ func validateAggregateLimit(totals map[string]int64, key, value string) error { total := totals[key] if limit < total { - printer := aggregateLimitConfigValuePrinters[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + printer := aggregateLimitConfigValuePrinters[keyName] return fmt.Errorf("%q is too low: current total is %q", key, printer(total)) } @@ -1287,8 +1349,18 @@ func getTotalsAcrossProjectEntities(info *projectInfo, keys []string, skipUnset for _, key := range keys { totals[key] = 0 - if key == "limits.disk" { + if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) { + poolName := "" + fields := strings.SplitN(key, projectLimitDiskPool, 2) + if len(fields) == 2 { + poolName = fields[1] + } + for _, volume := range info.Volumes { + if poolName != "" && volume.PoolName != poolName { + continue + } + value, ok := volume.Config["size"] if !ok { if skipUnset { @@ -1329,14 +1401,31 @@ func getInstanceLimits(instance api.Instance, keys []string, skipUnset bool, sto for _, key := range keys { var limit int64 - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] + + if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) { + poolName := "" + fields := strings.SplitN(key, projectLimitDiskPool, 2) + if len(fields) == 2 { + poolName = fields[1] + } - if key == "limits.disk" { _, device, err := instancetype.GetRootDiskDevice(instance.Devices) if err != nil { return nil, fmt.Errorf("Failed getting root disk device for instance %q in project %q: %w", instance.Name, instance.Project, err) } + if poolName != "" && device["pool"] != poolName { + continue + } + value, ok := device["size"] if !ok || value == "" { if skipUnset { diff --git a/lxd/project/limits/state.go b/lxd/project/limits/state.go index 3962a95f298c..019e4f4505b1 100644 --- a/lxd/project/limits/state.go +++ b/lxd/project/limits/state.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strconv" + "strings" "github.com/canonical/lxd/lxd/db" "github.com/canonical/lxd/lxd/instance/instancetype" @@ -29,6 +30,16 @@ func GetCurrentAllocations(globalConfig map[string]any, ctx context.Context, tx return nil, err } + // Get per-pool limits. + poolLimits := []string{} + for k := range info.Project.Config { + if strings.HasPrefix(k, projectLimitDiskPool) { + poolLimits = append(poolLimits, k) + } + } + + allAggregateLimits := append(allAggregateLimits, poolLimits...) + // Get the instance aggregated values. raw, err := getAggregateLimits(info, allAggregateLimits) if err != nil { @@ -41,6 +52,13 @@ func GetCurrentAllocations(globalConfig map[string]any, ctx context.Context, tx result["networks"] = raw["limits.networks"] result["processes"] = raw["limits.processes"] + // Add the pool-specific disk limits. + for k, v := range raw { + if strings.HasPrefix(k, projectLimitDiskPool) && v.Limit > 0 { + result[fmt.Sprintf("disk.%s", strings.SplitN(k, ".", 4)[3])] = v + } + } + // Get the instance count values. count, limit, err := getTotalInstanceCountLimit(info) if err != nil { diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 794d705b4fc4..6fabe4934d92 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -7572,7 +7572,7 @@ func (b *lxdBackend) CreateCustomVolumeFromISO(projectName string, volName strin } err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { - return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, projectName, req) + return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, projectName, b.name, req) }) if err != nil { return fmt.Errorf("Failed checking volume creation allowed: %w", err) @@ -7677,7 +7677,7 @@ func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData } err = b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { - return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, srcBackup.Project, req) + return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, srcBackup.Project, b.name, req) }) if err != nil { return fmt.Errorf("Failed checking volume creation allowed: %w", err) diff --git a/lxd/storage_pools.go b/lxd/storage_pools.go index e139cf544179..60c4cf14160a 100644 --- a/lxd/storage_pools.go +++ b/lxd/storage_pools.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/url" + "slices" "strings" "sync" @@ -19,6 +20,7 @@ import ( dbCluster "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/lifecycle" "github.com/canonical/lxd/lxd/project" + "github.com/canonical/lxd/lxd/project/limits" "github.com/canonical/lxd/lxd/request" "github.com/canonical/lxd/lxd/response" "github.com/canonical/lxd/lxd/state" @@ -148,13 +150,24 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response { recursion := util.IsRecursionRequest(r) var poolNames []string + var hiddenPoolNames []string err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { var err error + // Load the pool names. poolNames, err = tx.GetStoragePoolNames(ctx) + if err != nil { + return err + } - return err + // Load the project limits. + hiddenPoolNames, err = limits.HiddenStoragePools(ctx, tx, request.ProjectParam(r)) + if err != nil { + return err + } + + return nil }) if err != nil && !response.IsNotFoundError(err) { return response.SmartError(err) @@ -168,6 +181,11 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response { resultString := []string{} resultMap := []api.StoragePool{} for _, poolName := range poolNames { + // Hide storage pools with a 0 project limit. + if slices.Contains(hiddenPoolNames, poolName) { + continue + } + if !recursion { resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, poolName)) } else { @@ -616,6 +634,27 @@ func storagePoolGet(d *Daemon, r *http.Request) response.Response { memberSpecific = true } + var hiddenPoolNames []string + err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { + var err error + + // Load the project limits. + hiddenPoolNames, err = limits.HiddenStoragePools(ctx, tx, request.ProjectParam(r)) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return response.SmartError(err) + } + + // Hide storage pools with a 0 project limit. + if slices.Contains(hiddenPoolNames, poolName) { + return response.NotFound(nil) + } + // Get the existing storage pool. pool, err := storagePools.LoadByName(s, poolName) if err != nil { diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go index 53b844351f4f..da061c307617 100644 --- a/lxd/storage_volumes.go +++ b/lxd/storage_volumes.go @@ -1048,7 +1048,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) response.Response { return err } - err = limits.AllowVolumeCreation(s.GlobalConfig, tx, projectName, req) + err = limits.AllowVolumeCreation(s.GlobalConfig, tx, projectName, poolName, req) if err != nil { return err } diff --git a/shared/version/api.go b/shared/version/api.go index 58506177068b..4c847c5fce92 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -415,6 +415,7 @@ var APIExtensions = []string{ "devlxd_images_vm", "disk_io_bus_virtio_blk", "metrics_api_requests", + "projects_limits_disk_pool", } // APIExtensionsCount returns the number of available API extensions. diff --git a/test/suites/projects.sh b/test/suites/projects.sh index 738cbe275f3d..8a06ecd0b96c 100644 --- a/test/suites/projects.sh +++ b/test/suites/projects.sh @@ -561,6 +561,43 @@ test_projects_limits() { deps/import-busybox --project p1 --alias testimage + # Test per-pool limits. + lxc storage create limit1 dir + lxc storage create limit2 dir + + lxc project set p1 limits.disk=50MiB + lxc project set p1 limits.disk.pool.limit1=0 + lxc project set p1 limits.disk.pool.limit2=0 + + ! lxc storage list | grep -q limit1 || false + ! lxc storage list | grep -q limit2 || false + + lxc storage volume create "${pool}" foo size=10MiB + ! lxc storage volume create "${pool}" bar size=50MiB || false + lxc storage volume delete "${pool}" foo + + ! lxc storage volume create limit1 foo size=10GiB || false + ! lxc storage volume create limit2 foo size=10GiB || false + + lxc project set p1 limits.disk.pool.limit1=10MiB + lxc project set p1 limits.disk.pool.limit2=10MiB + lxc storage volume create limit1 foo size=10MiB + ! lxc storage volume create limit1 bar size=10MiB || false + lxc storage volume create limit2 foo size=10MiB + ! lxc storage volume create limit2 bar size=10MiB || false + + ! lxc storage volume create "${pool}" foo size=40MiB || false + lxc storage volume delete limit1 foo + lxc storage volume delete limit2 foo + lxc storage volume create "${pool}" foo size=40MiB + + lxc storage volume delete "${pool}" foo + lxc project unset p1 limits.disk.pool.limit1 + lxc project unset p1 limits.disk.pool.limit2 + lxc project unset p1 limits.disk + lxc storage delete limit1 + lxc storage delete limit2 + # Create a couple of containers in the project. lxc init testimage c1 lxc init testimage c2