diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index f4f29b9d90ee75..0d83cdc39ace78 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -14,7 +14,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, const struct cpumask *siblmsk; int cpu, sibl; - for ( ; cpus_per_vec > 0; ) { + while (cpus_per_vec > 0) { cpu = cpumask_first(nmsk); /* Should not happen, but I'm too lazy to think about it */ @@ -48,7 +48,7 @@ static cpumask_var_t *alloc_node_to_cpumask(void) if (!masks) return NULL; - for (node = 0; node < nr_node_ids; node++) { + for (node = nr_node_ids; --node;) { if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) goto out_unwind; } @@ -64,9 +64,9 @@ static cpumask_var_t *alloc_node_to_cpumask(void) static void free_node_to_cpumask(cpumask_var_t *masks) { - int node; + int node = nr_node_ids; - for (node = 0; node < nr_node_ids; node++) + while (--node) free_cpumask_var(masks[node]); kfree(masks); } @@ -185,10 +185,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) * If there aren't any vectors left after applying the pre/post * vectors don't bother with assigning affinity. */ - if (nvecs == affd->pre_vectors + affd->post_vectors) - return NULL; - - if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + if (nvecs == affd->pre_vectors + affd->post_vectors || + !zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index d6961d3c6f9e26..2645b6deb04b87 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -165,9 +165,8 @@ static void resume_irqs(bool want_early) bool is_early = desc->action && desc->action->flags & IRQF_EARLY_RESUME; - if (!is_early && want_early) - continue; - if (irq_settings_is_nested_thread(desc)) + if ((!is_early && want_early) || + irq_settings_is_nested_thread(desc)) continue; raw_spin_lock_irqsave(&desc->lock, flags); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 3f8a35104285ab..d827a18ec61210 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -87,10 +87,8 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) unsigned long task = owner & ~MUTEX_FLAGS; if (task) { - if (likely(task != curr)) - break; - - if (likely(!(flags & MUTEX_FLAG_PICKUP))) + if (likely(task != curr) || + likely(!(flags & MUTEX_FLAG_PICKUP))) break; flags &= ~MUTEX_FLAG_PICKUP; @@ -141,20 +139,14 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock) unsigned long curr = (unsigned long)current; unsigned long zero = 0UL; - if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) - return true; - - return false; + return (bool)atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr); } static __always_inline bool __mutex_unlock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; - if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) - return true; - - return false; + return atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr; } #endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 5342f6fc022e57..b0ec125a22e314 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -416,10 +416,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) goto Platform_early_resume; } error = platform_suspend_prepare_noirq(state); - if (error) - goto Platform_wake; - - if (suspend_test(TEST_PLATFORM)) + if (error || suspend_test(TEST_PLATFORM)) goto Platform_wake; error = disable_nonboot_cpus(); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 625bc9897f628b..443a1f235cfd3d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq) * If there are more than one RR tasks, we need the tick to effect the * actual RR behaviour. */ - if (rq->rt.rr_nr_running) { - if (rq->rt.rr_nr_running == 1) - return true; - else - return false; - } + if (rq->rt.rr_nr_running) + return rq->rt.rr_nr_running == 1; /* * If there's no RR tasks, but FIFO tasks, we can skip the tick, no diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index 5e54cbcae6735b..a8fd4bd68954a9 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)) { - if (WARN_ON(!data || !func)) - return; - - if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu))) + if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu))) return; data->func = func; diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index daaadf939ccb1e..152c133e8247d9 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -29,20 +29,16 @@ #include "sched.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ -static int convert_prio(int prio) +static int convert_prio(const int prio) { - int cpupri; - if (prio == CPUPRI_INVALID) - cpupri = CPUPRI_INVALID; + return CPUPRI_INVALID; else if (prio == MAX_PRIO) - cpupri = CPUPRI_IDLE; + return CPUPRI_IDLE; else if (prio >= MAX_RT_PRIO) - cpupri = CPUPRI_NORMAL; + return CPUPRI_NORMAL; else - cpupri = MAX_RT_PRIO - prio + 1; - - return cpupri; + return MAX_RT_PRIO - prio + 1; } /** @@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, smp_rmb(); /* Need to do the rmb for every iteration */ - if (skip) - continue; - - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) + if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask) + >= nr_cpu_ids) continue; if (lowest_mask) { @@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp) return 0; cleanup: - for (i--; i >= 0; i--) + while (--i >= 0) free_cpumask_var(cp->pri_to_cpu[i].mask); return -ENOMEM; } diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 76e0eaf4654e07..e123d0b809708f 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -119,10 +119,9 @@ static int membarrier_private_expedited(int flags) if (!(atomic_read(¤t->mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) return -EPERM; - } else { - if (!(atomic_read(¤t->mm->membarrier_state) & - MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) - return -EPERM; + } else if (!(atomic_read(¤t->mm->membarrier_state) & + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) { + return -EPERM; } if (num_online_cpus() == 1) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a8cf8fe3..acf1b94669ad74 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg) destroy_rt_bandwidth(&tg->rt_bandwidth); for_each_possible_cpu(i) { - if (tg->rt_rq) - kfree(tg->rt_rq[i]); - if (tg->rt_se) - kfree(tg->rt_se[i]); + /* Don't need to check if tg->rt_rq[i] + * or tg->rt_se[i] are NULL, since kfree(NULL) + * simply performs no operation + */ + kfree(tg->rt_rq[i]); + kfree(tg->rt_se[i]); } kfree(tg->rt_rq); @@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) BUG_ON(&rq->rt != rt_rq); - if (rt_rq->rt_queued) - return; - - if (rt_rq_throttled(rt_rq)) + if (rt_rq->rt_queued || rt_rq_throttled(rt_rq)) return; if (rt_rq->rt_nr_running) { @@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) */ static inline bool move_entity(unsigned int flags) { - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) - return false; - - return true; + return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) } static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) @@ -1393,7 +1389,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) /* For anything but wake ups, just return the task_cpu */ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) - goto out; + return cpu; rq = cpu_rq(cpu); @@ -1437,7 +1433,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) } rcu_read_unlock(); -out: return cpu; } @@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg, /* * Disallowing the root group RT runtime is BAD, it would disallow the * kernel creating (and or operating) RT threads. + * + * No period doesn't make any sense. */ - if (tg == &root_task_group && rt_runtime == 0) - return -EINVAL; - - /* No period doesn't make any sense. */ - if (rt_period == 0) + if ((tg == &root_task_group && !rt_runtime) || !rt_period) return -EINVAL; mutex_lock(&rt_constraints_mutex);