Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code cleanup #596

Closed
wants to merge 10 commits into from
14 changes: 6 additions & 8 deletions kernel/irq/affinity.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
const struct cpumask *siblmsk;
int cpu, sibl;

for ( ; cpus_per_vec > 0; ) {
while (cpus_per_vec > 0) {
cpu = cpumask_first(nmsk);

/* Should not happen, but I'm too lazy to think about it */
Expand Down Expand Up @@ -48,7 +48,7 @@ static cpumask_var_t *alloc_node_to_cpumask(void)
if (!masks)
return NULL;

for (node = 0; node < nr_node_ids; node++) {
for (node = nr_node_ids; --node;) {
if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
goto out_unwind;
}
Expand All @@ -64,9 +64,9 @@ static cpumask_var_t *alloc_node_to_cpumask(void)

static void free_node_to_cpumask(cpumask_var_t *masks)
{
int node;
int node = nr_node_ids;

for (node = 0; node < nr_node_ids; node++)
while (--node)
free_cpumask_var(masks[node]);
kfree(masks);
}
Expand Down Expand Up @@ -185,10 +185,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
* If there aren't any vectors left after applying the pre/post
* vectors don't bother with assigning affinity.
*/
if (nvecs == affd->pre_vectors + affd->post_vectors)
return NULL;

if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
if (nvecs == affd->pre_vectors + affd->post_vectors ||
!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;

if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
Expand Down
5 changes: 2 additions & 3 deletions kernel/irq/pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,8 @@ static void resume_irqs(bool want_early)
bool is_early = desc->action &&
desc->action->flags & IRQF_EARLY_RESUME;

if (!is_early && want_early)
continue;
if (irq_settings_is_nested_thread(desc))
if ((!is_early && want_early) ||
irq_settings_is_nested_thread(desc))
continue;

raw_spin_lock_irqsave(&desc->lock, flags);
Expand Down
16 changes: 4 additions & 12 deletions kernel/locking/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,8 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
unsigned long task = owner & ~MUTEX_FLAGS;

if (task) {
if (likely(task != curr))
break;

if (likely(!(flags & MUTEX_FLAG_PICKUP)))
if (likely(task != curr) ||
likely(!(flags & MUTEX_FLAG_PICKUP)))
break;

flags &= ~MUTEX_FLAG_PICKUP;
Expand Down Expand Up @@ -141,20 +139,14 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
unsigned long curr = (unsigned long)current;
unsigned long zero = 0UL;

if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
return true;

return false;
return (bool)atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr);
}

static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
{
unsigned long curr = (unsigned long)current;

if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
return true;

return false;
return atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr;
}
#endif

Expand Down
5 changes: 1 addition & 4 deletions kernel/power/suspend.c
Original file line number Diff line number Diff line change
Expand Up @@ -416,10 +416,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
if (error)
goto Platform_wake;

if (suspend_test(TEST_PLATFORM))
if (error || suspend_test(TEST_PLATFORM))
goto Platform_wake;

error = disable_nonboot_cpus();
Expand Down
8 changes: 2 additions & 6 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq)
* If there are more than one RR tasks, we need the tick to effect the
* actual RR behaviour.
*/
if (rq->rt.rr_nr_running) {
if (rq->rt.rr_nr_running == 1)
return true;
else
return false;
}
if (rq->rt.rr_nr_running)
return rq->rt.rr_nr_running == 1;

/*
* If there's no RR tasks, but FIFO tasks, we can skip the tick, no
Expand Down
5 changes: 1 addition & 4 deletions kernel/sched/cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags))
{
if (WARN_ON(!data || !func))
return;

if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu)))
return;

data->func = func;
Expand Down
22 changes: 8 additions & 14 deletions kernel/sched/cpupri.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,20 +29,16 @@
#include "sched.h"

/* Convert between a 140 based task->prio, and our 102 based cpupri */
static int convert_prio(int prio)
static int convert_prio(const int prio)
{
int cpupri;

if (prio == CPUPRI_INVALID)
cpupri = CPUPRI_INVALID;
return CPUPRI_INVALID;
else if (prio == MAX_PRIO)
cpupri = CPUPRI_IDLE;
return CPUPRI_IDLE;
else if (prio >= MAX_RT_PRIO)
cpupri = CPUPRI_NORMAL;
return CPUPRI_NORMAL;
else
cpupri = MAX_RT_PRIO - prio + 1;

return cpupri;
return MAX_RT_PRIO - prio + 1;
}

/**
Expand Down Expand Up @@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
smp_rmb();

/* Need to do the rmb for every iteration */
if (skip)
continue;

if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask)
>= nr_cpu_ids)
continue;

if (lowest_mask) {
Expand Down Expand Up @@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp)
return 0;

cleanup:
for (i--; i >= 0; i--)
while (--i >= 0)
free_cpumask_var(cp->pri_to_cpu[i].mask);
return -ENOMEM;
}
Expand Down
7 changes: 3 additions & 4 deletions kernel/sched/membarrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,9 @@ static int membarrier_private_expedited(int flags)
if (!(atomic_read(&current->mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM;
} else {
if (!(atomic_read(&current->mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
return -EPERM;
} else if (!(atomic_read(&current->mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) {
return -EPERM;
}

if (num_online_cpus() == 1)
Expand Down
31 changes: 12 additions & 19 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg)
destroy_rt_bandwidth(&tg->rt_bandwidth);

for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
/* Don't need to check if tg->rt_rq[i]
* or tg->rt_se[i] are NULL, since kfree(NULL)
* simply performs no operation
*/
kfree(tg->rt_rq[i]);
kfree(tg->rt_se[i]);
}

kfree(tg->rt_rq);
Expand Down Expand Up @@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)

BUG_ON(&rq->rt != rt_rq);

if (rt_rq->rt_queued)
return;

if (rt_rq_throttled(rt_rq))
if (rt_rq->rt_queued || rt_rq_throttled(rt_rq))
return;

if (rt_rq->rt_nr_running) {
Expand Down Expand Up @@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
*/
static inline bool move_entity(unsigned int flags)
{
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
return false;

return true;
return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
}

static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
Expand Down Expand Up @@ -1393,7 +1389,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)

/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
return cpu;

rq = cpu_rq(cpu);

Expand Down Expand Up @@ -1437,7 +1433,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
}
rcu_read_unlock();

out:
return cpu;
}

Expand Down Expand Up @@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*
* No period doesn't make any sense.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;

/* No period doesn't make any sense. */
if (rt_period == 0)
if ((tg == &root_task_group && !rt_runtime) || !rt_period)
return -EINVAL;

mutex_lock(&rt_constraints_mutex);
Expand Down