Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Demote many log lines from info to debug #452

Merged
merged 1 commit into from
Jul 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ river migrate-up --database-url "$DATABASE_URL"
### Changed

- Tags are now limited to 255 characters in length, and should match the regex `\A[\w][\w\-]+[\w]\z` (importantly, they can't contain commas). [PR #351](https://github.com/riverqueue/river/pull/351).
- Many info logging statements have been demoted to debug level. [PR #452](https://github.com/riverqueue/river/pull/452).

## [0.9.0] - 2024-07-04

Expand Down
2 changes: 1 addition & 1 deletion client.go
Original file line number Diff line number Diff line change
Expand Up @@ -936,7 +936,7 @@ func (c *Client[TTx]) logStatsLoop(ctx context.Context, shouldStart bool, starte

func (c *Client[TTx]) handleLeadershipChangeLoop(ctx context.Context, shouldStart bool, started, stopped func()) error {
handleLeadershipChange := func(ctx context.Context, notification *leadership.Notification) {
c.baseService.Logger.InfoContext(ctx, c.baseService.Name+": Election change received",
c.baseService.Logger.DebugContext(ctx, c.baseService.Name+": Election change received",
slog.String("client_id", c.config.ID), slog.Bool("is_leader", notification.IsLeader))

switch {
Expand Down
16 changes: 8 additions & 8 deletions internal/leadership/elector.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ func (e *Elector) Start(ctx context.Context) error {

var sub *notifier.Subscription
if e.notifier == nil {
e.Logger.InfoContext(ctx, e.Name+": No notifier configured; starting in poll mode", "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": No notifier configured; starting in poll mode", "client_id", e.config.ClientID)
} else {
e.Logger.InfoContext(ctx, e.Name+": Listening for leadership changes", "client_id", e.config.ClientID, "topic", notifier.NotificationTopicLeadership)
e.Logger.DebugContext(ctx, e.Name+": Listening for leadership changes", "client_id", e.config.ClientID, "topic", notifier.NotificationTopicLeadership)
var err error
sub, err = e.notifier.Listen(ctx, notifier.NotificationTopicLeadership, func(topic notifier.NotificationTopic, payload string) {
e.handleLeadershipNotification(ctx, topic, payload)
Expand Down Expand Up @@ -163,7 +163,7 @@ func (e *Elector) Start(ctx context.Context) error {
return
}

e.Logger.InfoContext(ctx, e.Name+": Gained leadership", "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": Gained leadership", "client_id", e.config.ClientID)
e.testSignals.GainedLeadership.Signal(struct{}{})

err := e.keepLeadershipLoop(ctx)
Expand All @@ -188,7 +188,7 @@ func (e *Elector) attemptGainLeadershipLoop(ctx context.Context) error {
var numErrors int

for {
e.Logger.InfoContext(ctx, e.Name+": Attempting to gain leadership", "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": Attempting to gain leadership", "client_id", e.config.ClientID)

elected, err := attemptElectOrReelect(ctx, e.exec, false, &riverdriver.LeaderElectParams{
LeaderID: e.config.ClientID,
Expand Down Expand Up @@ -246,7 +246,7 @@ func (e *Elector) handleLeadershipNotification(ctx context.Context, topic notifi
return
}

e.Logger.InfoContext(ctx, e.Name+": Received notification from notifier", "action", notification.Action, "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": Received notification from notifier", "action", notification.Action, "client_id", e.config.ClientID)

if notification.Action != "resigned" {
// We only care about resignations because we use them to preempt the
Expand Down Expand Up @@ -324,7 +324,7 @@ func (e *Elector) keepLeadershipLoop(ctx context.Context) error {
}
}

e.Logger.InfoContext(ctx, e.Name+": Current leader attempting reelect", "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": Current leader attempting reelect", "client_id", e.config.ClientID)

reelected, err := attemptElectOrReelect(ctx, e.exec, true, &riverdriver.LeaderElectParams{
LeaderID: e.config.ClientID,
Expand Down Expand Up @@ -364,7 +364,7 @@ func (e *Elector) keepLeadershipLoop(ctx context.Context) error {
// always surrendered in a timely manner so it can be picked up quickly by
// another client, even in the event of a cancellation.
func (e *Elector) attemptResignLoop(ctx context.Context) {
e.Logger.InfoContext(ctx, e.Name+": Attempting to resign leadership", "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": Attempting to resign leadership", "client_id", e.config.ClientID)

// Make a good faith attempt to resign, even in the presence of errors, but
// don't keep hammering if it doesn't work. In case a resignation failure,
Expand Down Expand Up @@ -411,7 +411,7 @@ func (e *Elector) attemptResign(ctx context.Context, attempt int) error {
}

if resigned {
e.Logger.InfoContext(ctx, e.Name+": Resigned leadership successfully", "client_id", e.config.ClientID)
e.Logger.DebugContext(ctx, e.Name+": Resigned leadership successfully", "client_id", e.config.ClientID)
e.testSignals.ResignedLeadership.Signal(struct{}{})
}

Expand Down
10 changes: 6 additions & 4 deletions internal/maintenance/job_cleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,11 @@ func (s *JobCleaner) Start(ctx context.Context) error { //nolint:dupl
continue
}

s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int("num_jobs_deleted", res.NumJobsDeleted),
)
if res.NumJobsDeleted > 0 {
s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int("num_jobs_deleted", res.NumJobsDeleted),
)
}
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's some argument to be made that these changes are detrimental to debugging tests, but I generally find that if I'm debugging anything non-trivial I need to add more log statements anyway because the existing ones just aren't enough.

}
}()

Expand Down Expand Up @@ -170,7 +172,7 @@ func (s *JobCleaner) runOnce(ctx context.Context) (*jobCleanerRunOnceResult, err
break
}

s.Logger.InfoContext(ctx, s.Name+": Deleted batch of jobs",
s.Logger.DebugContext(ctx, s.Name+": Deleted batch of jobs",
slog.Int("num_jobs_deleted", numDeleted),
)

Expand Down
15 changes: 6 additions & 9 deletions internal/maintenance/job_rescuer.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,12 @@ func (s *JobRescuer) Start(ctx context.Context) error {
continue
}

s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int64("num_jobs_discarded", res.NumJobsDiscarded),
slog.Int64("num_jobs_retry_scheduled", res.NumJobsRetried),
)
if res.NumJobsDiscarded > 0 || res.NumJobsRetried > 0 {
s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int64("num_jobs_discarded", res.NumJobsDiscarded),
slog.Int64("num_jobs_retry_scheduled", res.NumJobsRetried),
)
}
}
}()

Expand Down Expand Up @@ -232,11 +234,6 @@ func (s *JobRescuer) runOnce(ctx context.Context) (*rescuerRunOnceResult, error)
break
}

s.Logger.InfoContext(ctx, s.Name+": Rescued batch of jobs",
slog.Int64("num_jobs_discarded", res.NumJobsDiscarded),
slog.Int64("num_jobs_retried", res.NumJobsRetried),
)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A couple of these services sort of had doubled up run lines. I took one half out and just left the line above.


s.CancellableSleepRandomBetween(ctx, BatchBackoffMin, BatchBackoffMax)
}

Expand Down
13 changes: 6 additions & 7 deletions internal/maintenance/job_scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,12 @@ func (s *JobScheduler) Start(ctx context.Context) error { //nolint:dupl
}
continue
}
s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int("num_jobs_scheduled", res.NumCompletedJobsScheduled),
)

if res.NumCompletedJobsScheduled > 0 {
s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int("num_jobs_scheduled", res.NumCompletedJobsScheduled),
)
}
}
}()

Expand Down Expand Up @@ -191,10 +194,6 @@ func (s *JobScheduler) runOnce(ctx context.Context) (*schedulerRunOnceResult, er
break
}

s.Logger.InfoContext(ctx, s.Name+": Scheduled batch of jobs",
slog.Int("num_completed_jobs_scheduled", numScheduled),
)

s.CancellableSleepRandomBetween(ctx, BatchBackoffMin, BatchBackoffMax)
}

Expand Down
12 changes: 5 additions & 7 deletions internal/maintenance/queue_cleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,11 @@ func (s *QueueCleaner) Start(ctx context.Context) error {
continue
}

s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Int("num_queues_deleted", len(res.QueuesDeleted)),
)
if len(res.QueuesDeleted) > 0 {
s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.String("queues_deleted", strings.Join(res.QueuesDeleted, ",")),
)
}
}
}()

Expand Down Expand Up @@ -150,10 +152,6 @@ func (s *QueueCleaner) runOnce(ctx context.Context) (*queueCleanerRunOnceResult,
break
}

s.Logger.InfoContext(ctx, s.Name+": Deleted batch of queues",
slog.String("queues_deleted", strings.Join(queuesDeleted, ",")),
)

s.CancellableSleepRandomBetween(ctx, BatchBackoffMin, BatchBackoffMax)
}

Expand Down
4 changes: 2 additions & 2 deletions internal/maintenance/reindexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func (s *Reindexer) Start(ctx context.Context) error {

nextRunAt := s.Config.ScheduleFunc(time.Now().UTC())

s.Logger.InfoContext(ctx, s.Name+": Scheduling first run", slog.Time("next_run_at", nextRunAt))
s.Logger.DebugContext(ctx, s.Name+": Scheduling first run", slog.Time("next_run_at", nextRunAt))

timerUntilNextRun := time.NewTimer(time.Until(nextRunAt))

Expand All @@ -130,7 +130,7 @@ func (s *Reindexer) Start(ctx context.Context) error {
nextRunAt = s.Config.ScheduleFunc(nextRunAt)

// TODO: maybe we should log differently if some of these fail?
s.Logger.InfoContext(ctx, s.Name+logPrefixRanSuccessfully,
s.Logger.DebugContext(ctx, s.Name+logPrefixRanSuccessfully,
slog.Time("next_run_at", nextRunAt), slog.Int("num_reindexes_initiated", len(s.Config.IndexNames)))

// Reset the timer after the insert loop has finished so it's
Expand Down
8 changes: 4 additions & 4 deletions internal/notifier/notifier.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ func (n *Notifier) listenerClose(ctx context.Context, skipLock bool) {
return
}

n.Logger.InfoContext(ctx, n.Name+": Listener closing")
n.Logger.DebugContext(ctx, n.Name+": Listener closing")
if err := n.listener.Close(ctx); err != nil {
if !errors.Is(err, context.Canceled) {
n.Logger.ErrorContext(ctx, n.Name+": Error closing listener", "err", err)
Expand All @@ -263,7 +263,7 @@ func (n *Notifier) listenerConnect(ctx context.Context, skipLock bool) error {
ctx, cancel := context.WithTimeout(ctx, listenerTimeout)
defer cancel()

n.Logger.InfoContext(ctx, n.Name+": Listener connecting")
n.Logger.DebugContext(ctx, n.Name+": Listener connecting")
if err := n.listener.Connect(ctx); err != nil {
if !errors.Is(err, context.Canceled) {
n.Logger.ErrorContext(ctx, n.Name+": Error connecting listener", "err", err)
Expand All @@ -285,7 +285,7 @@ func (n *Notifier) listenerListen(ctx context.Context, topic NotificationTopic)
ctx, cancel := context.WithTimeout(ctx, listenerTimeout)
defer cancel()

n.Logger.InfoContext(ctx, n.Name+": Listening on topic", "topic", topic)
n.Logger.DebugContext(ctx, n.Name+": Listening on topic", "topic", topic)
if err := n.listener.Listen(ctx, string(topic)); err != nil {
return fmt.Errorf("error listening on topic %q: %w", topic, err)
}
Expand All @@ -302,7 +302,7 @@ func (n *Notifier) listenerUnlisten(ctx context.Context, topic NotificationTopic
ctx, cancel := context.WithTimeout(ctx, listenerTimeout)
defer cancel()

n.Logger.InfoContext(ctx, n.Name+": Unlistening on topic", "topic", topic)
n.Logger.DebugContext(ctx, n.Name+": Unlistening on topic", "topic", topic)
if err := n.listener.Unlisten(ctx, string(topic)); err != nil {
return fmt.Errorf("error unlistening on topic %q: %w", topic, err)
}
Expand Down
4 changes: 2 additions & 2 deletions job_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ func (e *jobExecutor) reportResult(ctx context.Context, res *jobExecutorResult)
var snoozeErr *jobSnoozeError

if res.Err != nil && errors.As(res.Err, &snoozeErr) {
e.Logger.InfoContext(ctx, e.Name+": Job snoozed",
e.Logger.DebugContext(ctx, e.Name+": Job snoozed",
slog.Int64("job_id", e.JobRow.ID),
slog.String("job_kind", e.JobRow.Kind),
slog.Duration("duration", snoozeErr.duration),
Expand Down Expand Up @@ -300,7 +300,7 @@ func (e *jobExecutor) reportError(ctx context.Context, res *jobExecutorResult) {
switch {
case errors.As(res.Err, &cancelErr):
cancelJob = true
e.Logger.InfoContext(ctx, e.Name+": Job cancelled explicitly", logAttrs...)
e.Logger.DebugContext(ctx, e.Name+": Job cancelled explicitly", logAttrs...)
case res.Err != nil:
e.Logger.ErrorContext(ctx, e.Name+": Job errored", logAttrs...)
case res.PanicVal != nil:
Expand Down
4 changes: 2 additions & 2 deletions producer.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ func (p *producer) StartWorkContext(fetchCtx, workCtx context.Context) error {
insertSub *notifier.Subscription
)
if p.config.Notifier == nil {
p.Logger.InfoContext(fetchCtx, p.Name+": No notifier configured; starting in poll mode", "client_id", p.config.ClientID)
p.Logger.DebugContext(fetchCtx, p.Name+": No notifier configured; starting in poll mode", "client_id", p.config.ClientID)

go p.pollForSettingChanges(fetchCtx, initiallyPaused)
} else {
Expand Down Expand Up @@ -610,7 +610,7 @@ func (p *producer) pollForSettingChanges(ctx context.Context, lastPaused bool) {
Action: action,
Queue: p.config.Queue,
}
p.Logger.InfoContext(ctx, p.Name+": Queue control state changed from polling",
p.Logger.DebugContext(ctx, p.Name+": Queue control state changed from polling",
slog.String("queue", p.config.Queue),
slog.String("action", string(action)),
slog.Bool("paused", shouldBePaused),
Expand Down
4 changes: 2 additions & 2 deletions rivershared/startstoptest/startstoptest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ func (s *MyService) Start(ctx context.Context) error {
started()
defer stopped()

s.logger.InfoContext(ctx, "Service started")
defer s.logger.InfoContext(ctx, "Service stopped")
s.logger.DebugContext(ctx, "Service started")
defer s.logger.DebugContext(ctx, "Service stopped")

<-ctx.Done()
}()
Expand Down
2 changes: 1 addition & 1 deletion subscription_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (sm *subscriptionManager) logStats(ctx context.Context, svcName string) {
sm.statsMu.Lock()
defer sm.statsMu.Unlock()

sm.Logger.InfoContext(ctx, svcName+": Job stats (since last stats line)",
sm.Logger.DebugContext(ctx, svcName+": Job stats (since last stats line)",
"num_jobs_run", sm.statsNumJobs,
"average_complete_duration", sm.safeDurationAverage(sm.statsAggregate.CompleteDuration, sm.statsNumJobs),
"average_queue_wait_duration", sm.safeDurationAverage(sm.statsAggregate.QueueWaitDuration, sm.statsNumJobs),
Expand Down
Loading