Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Compactor: Remove BlockDeletionMarksMigrationEnabled #122

Merged
merged 17 commits into from
Aug 19, 2021
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions docs/blocks-storage/compactor.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,13 +147,6 @@ compactor:
# CLI flag: -compactor.tenant-cleanup-delay
[tenant_cleanup_delay: <duration> | default = 6h]

# When enabled, at compactor startup the bucket will be scanned and all found
# deletion marks inside the block location will be copied to the markers
# global location too. This option can (and should) be safely disabled as soon
# as the compactor has successfully run at least once.
# CLI flag: -compactor.block-deletion-marks-migration-enabled
[block_deletion_marks_migration_enabled: <boolean> | default = true]

# Comma separated list of tenants that can be compacted. If specified, only
# these tenants will be compacted by compactor, otherwise all tenants can be
# compacted. Subject to sharding.
Expand Down
7 changes: 0 additions & 7 deletions docs/configuration/config-file-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -5046,13 +5046,6 @@ The `compactor_config` configures the compactor for the blocks storage.
# CLI flag: -compactor.tenant-cleanup-delay
[tenant_cleanup_delay: <duration> | default = 6h]

# When enabled, at compactor startup the bucket will be scanned and all found
# deletion marks inside the block location will be copied to the markers global
# location too. This option can (and should) be safely disabled as soon as the
# compactor has successfully run at least once.
# CLI flag: -compactor.block-deletion-marks-migration-enabled
[block_deletion_marks_migration_enabled: <boolean> | default = true]

# Comma separated list of tenants that can be compacted. If specified, only
# these tenants will be compacted by compactor, otherwise all tenants can be
# compacted. Subject to sharding.
Expand Down
11 changes: 5 additions & 6 deletions pkg/compactor/blocks_cleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,10 @@ import (
)

type BlocksCleanerConfig struct {
DeletionDelay time.Duration
CleanupInterval time.Duration
CleanupConcurrency int
BlockDeletionMarksMigrationEnabled bool // TODO Discuss whether we should remove it in Cortex 1.8.0 and document that upgrading to 1.7.0 before 1.8.0 is required.
TenantCleanupDelay time.Duration // Delay before removing tenant deletion mark and "debug".
DeletionDelay time.Duration
CleanupInterval time.Duration
CleanupConcurrency int
TenantCleanupDelay time.Duration // Delay before removing tenant deletion mark and "debug".
}

type BlocksCleaner struct {
Expand Down Expand Up @@ -306,7 +305,7 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string, firstRun b
}()

// Migrate block deletion marks to the global markers location. This operation is a best-effort.
if firstRun && c.cfg.BlockDeletionMarksMigrationEnabled {
aknuds1 marked this conversation as resolved.
Show resolved Hide resolved
if firstRun {
aknuds1 marked this conversation as resolved.
Show resolved Hide resolved
if err := bucketindex.MigrateBlockDeletionMarksToGlobalLocation(ctx, c.bucketClient, userID, c.cfgProvider); err != nil {
level.Warn(userLogger).Log("msg", "failed to migrate block deletion marks to the global markers location", "err", err)
} else {
Expand Down
9 changes: 4 additions & 5 deletions pkg/compactor/blocks_cleaner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,10 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions
}

cfg := BlocksCleanerConfig{
DeletionDelay: deletionDelay,
CleanupInterval: time.Minute,
CleanupConcurrency: options.concurrency,
BlockDeletionMarksMigrationEnabled: options.markersMigrationEnabled,
TenantCleanupDelay: options.tenantDeletionDelay,
DeletionDelay: deletionDelay,
CleanupInterval: time.Minute,
CleanupConcurrency: options.concurrency,
TenantCleanupDelay: options.tenantDeletionDelay,
}

reg := prometheus.NewPedanticRegistry()
Expand Down
13 changes: 4 additions & 9 deletions pkg/compactor/compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,6 @@ type Config struct {
DeletionDelay time.Duration `yaml:"deletion_delay"`
TenantCleanupDelay time.Duration `yaml:"tenant_cleanup_delay"`

// Whether the migration of block deletion marks to the global markers location is enabled.
BlockDeletionMarksMigrationEnabled bool `yaml:"block_deletion_marks_migration_enabled"`

EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"`
DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"`

Expand Down Expand Up @@ -150,7 +147,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
"If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. "+
"If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.")
f.DurationVar(&cfg.TenantCleanupDelay, "compactor.tenant-cleanup-delay", 6*time.Hour, "For tenants marked for deletion, this is time between deleting of last block, and doing final cleanup (marker files, debug files) of the tenant.")
f.BoolVar(&cfg.BlockDeletionMarksMigrationEnabled, "compactor.block-deletion-marks-migration-enabled", true, "When enabled, at compactor startup the bucket will be scanned and all found deletion marks inside the block location will be copied to the markers global location too. This option can (and should) be safely disabled as soon as the compactor has successfully run at least once.")

f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.")
f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.")
Expand Down Expand Up @@ -360,11 +356,10 @@ func (c *Compactor) starting(ctx context.Context) error {

// Create the blocks cleaner (service).
c.blocksCleaner = NewBlocksCleaner(BlocksCleanerConfig{
DeletionDelay: c.compactorCfg.DeletionDelay,
CleanupInterval: util.DurationWithJitter(c.compactorCfg.CleanupInterval, 0.1),
CleanupConcurrency: c.compactorCfg.CleanupConcurrency,
BlockDeletionMarksMigrationEnabled: c.compactorCfg.BlockDeletionMarksMigrationEnabled,
TenantCleanupDelay: c.compactorCfg.TenantCleanupDelay,
DeletionDelay: c.compactorCfg.DeletionDelay,
CleanupInterval: util.DurationWithJitter(c.compactorCfg.CleanupInterval, 0.1),
CleanupConcurrency: c.compactorCfg.CleanupConcurrency,
TenantCleanupDelay: c.compactorCfg.TenantCleanupDelay,
}, c.bucketClient, c.usersScanner, c.cfgProvider, c.parentLogger, c.registerer)

// Initialize the compactors ring if sharding is enabled.
Expand Down
3 changes: 0 additions & 3 deletions pkg/compactor/compactor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1093,9 +1093,6 @@ func prepareConfig() Config {
compactorCfg.retryMinBackoff = 0
compactorCfg.retryMaxBackoff = 0

// The migration is tested in a dedicated test.
compactorCfg.BlockDeletionMarksMigrationEnabled = false

// Do not wait for ring stability by default, in order to speed up tests.
compactorCfg.ShardingRing.WaitStabilityMinDuration = 0
compactorCfg.ShardingRing.WaitStabilityMaxDuration = 0
Expand Down
2 changes: 0 additions & 2 deletions tools/doc-generator/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,6 @@ func getFieldType(t reflect.Type) (string, error) {
return "url", nil
case "time.Duration":
return "duration", nil
case "cortex.moduleName":
aknuds1 marked this conversation as resolved.
Show resolved Hide resolved
return "string", nil
case "flagext.StringSliceCSV":
return "string", nil
case "flagext.CIDRSliceCSV":
Expand Down