From 37150e13fcf35a7146ce131b540c144b82801938 Mon Sep 17 00:00:00 2001 From: PierceGriffiths Date: Wed, 19 Sep 2018 16:39:21 -0500 Subject: [PATCH 01/10] affinity.c: made improvements to loops --- kernel/irq/affinity.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index f4f29b9d90ee75..4ee3399dc83158 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -14,7 +14,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, const struct cpumask *siblmsk; int cpu, sibl; - for ( ; cpus_per_vec > 0; ) { + while ( cpus_per_vec > 0 ) { cpu = cpumask_first(nmsk); /* Should not happen, but I'm too lazy to think about it */ @@ -48,8 +48,8 @@ static cpumask_var_t *alloc_node_to_cpumask(void) if (!masks) return NULL; - for (node = 0; node < nr_node_ids; node++) { - if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) + for (node = nr_node_ids; node;) { + if (!zalloc_cpumask_var(&masks[--node], GFP_KERNEL)) goto out_unwind; } @@ -64,10 +64,10 @@ static cpumask_var_t *alloc_node_to_cpumask(void) static void free_node_to_cpumask(cpumask_var_t *masks) { - int node; + int node = nr_node_ids; - for (node = 0; node < nr_node_ids; node++) - free_cpumask_var(masks[node]); + while (node) + free_cpumask_var(masks[--node]); kfree(masks); } @@ -185,10 +185,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) * If there aren't any vectors left after applying the pre/post * vectors don't bother with assigning affinity. */ - if (nvecs == affd->pre_vectors + affd->post_vectors) - return NULL; - - if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + if (nvecs == affd->pre_vectors + affd->post_vectors || + !zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) From 953c68028b05142832fd3b9b14517895a5fe9581 Mon Sep 17 00:00:00 2001 From: PierceGriffiths Date: Thu, 20 Sep 2018 18:17:39 -0500 Subject: [PATCH 02/10] Improvements to loops and conditional evaluations --- kernel/irq/pm.c | 5 ++--- kernel/locking/mutex.c | 23 +++++++---------------- kernel/power/suspend.c | 7 ++----- kernel/sched/cpupri.c | 11 ++++------- 4 files changed, 15 insertions(+), 31 deletions(-) diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index d6961d3c6f9e26..5ec92f64c155ca 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -165,9 +165,8 @@ static void resume_irqs(bool want_early) bool is_early = desc->action && desc->action->flags & IRQF_EARLY_RESUME; - if (!is_early && want_early) - continue; - if (irq_settings_is_nested_thread(desc)) + if ((!is_early && want_early) || + irq_settings_is_nested_thread(desc)) continue; raw_spin_lock_irqsave(&desc->lock, flags); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 3f8a35104285ab..72a46a78aa7f53 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -82,16 +82,13 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) unsigned long owner, curr = (unsigned long)current; owner = atomic_long_read(&lock->owner); - for (;;) { /* must loop, can race against a flag */ + while (1) { /* must loop, can race against a flag */ unsigned long old, flags = __owner_flags(owner); unsigned long task = owner & ~MUTEX_FLAGS; if (task) { - if (likely(task != curr)) - break; - - if (likely(!(flags & MUTEX_FLAG_PICKUP))) - break; + if (likely(task != curr) || likely(!(flags & MUTEX_FLAG_PICKUP))) + break flags &= ~MUTEX_FLAG_PICKUP; } else { @@ -141,20 +138,14 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock) unsigned long curr = (unsigned long)current; unsigned long zero = 0UL; - if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) - return true; - - return false; + return atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr); } static __always_inline bool __mutex_unlock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; - if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) - return true; - - return false; + return atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr; } #endif @@ -631,7 +622,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, goto fail; } - for (;;) { + while (1) { struct task_struct *owner; /* Try to acquire the mutex... */ @@ -972,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, waiter.task = current; set_current_state(state); - for (;;) { + while (1) { /* * Once we hold wait_lock, we're serialized against * mutex_unlock() handing the lock off to us, do a trylock diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 5342f6fc022e57..c8c5b68d5cb29a 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -111,7 +111,7 @@ static void s2idle_loop(void) { pm_pr_dbg("suspend-to-idle\n"); - for (;;) { + while (1) { int error; dpm_noirq_begin(); @@ -416,10 +416,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) goto Platform_early_resume; } error = platform_suspend_prepare_noirq(state); - if (error) - goto Platform_wake; - - if (suspend_test(TEST_PLATFORM)) + if (error || suspend_test(TEST_PLATFORM)) goto Platform_wake; error = disable_nonboot_cpus(); diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index daaadf939ccb1e..e9138182686fbb 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -31,18 +31,15 @@ /* Convert between a 140 based task->prio, and our 102 based cpupri */ static int convert_prio(int prio) { - int cpupri; if (prio == CPUPRI_INVALID) - cpupri = CPUPRI_INVALID; + return CPUPRI_INVALID; else if (prio == MAX_PRIO) - cpupri = CPUPRI_IDLE; + return CPUPRI_IDLE; else if (prio >= MAX_RT_PRIO) - cpupri = CPUPRI_NORMAL; + return CPUPRI_NORMAL; else - cpupri = MAX_RT_PRIO - prio + 1; - - return cpupri; + return MAX_RT_PRIO - prio + 1; } /** From e988a4a2a9e4729e02796b3361c5afa2a620a43d Mon Sep 17 00:00:00 2001 From: PierceGriffiths Date: Thu, 20 Sep 2018 20:32:13 -0500 Subject: [PATCH 03/10] Cleaned up if statements --- kernel/sched/core.c | 4 ++-- kernel/sched/cpufreq.c | 5 +---- kernel/sched/cpupri.c | 10 +++------- kernel/sched/membarrier.c | 4 +--- kernel/sched/rt.c | 31 ++++++++++++------------------- 5 files changed, 19 insertions(+), 35 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 625bc9897f628b..3a2adafc345214 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -70,7 +70,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) lockdep_assert_held(&p->pi_lock); - for (;;) { + while (1) { rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { @@ -93,7 +93,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) { struct rq *rq; - for (;;) { + while (1) { raw_spin_lock_irqsave(&p->pi_lock, rf->flags); rq = task_rq(p); raw_spin_lock(&rq->lock); diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index 5e54cbcae6735b..a8fd4bd68954a9 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)) { - if (WARN_ON(!data || !func)) - return; - - if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu))) + if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu))) return; data->func = func; diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index e9138182686fbb..369ec8a408da9d 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -29,9 +29,8 @@ #include "sched.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ -static int convert_prio(int prio) +static int convert_prio(const int prio) { - if (prio == CPUPRI_INVALID) return CPUPRI_INVALID; else if (prio == MAX_PRIO) @@ -92,10 +91,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, smp_rmb(); /* Need to do the rmb for every iteration */ - if (skip) - continue; - - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) + if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { @@ -219,7 +215,7 @@ int cpupri_init(struct cpupri *cp) return 0; cleanup: - for (i--; i >= 0; i--) + while(--i >= 0) free_cpumask_var(cp->pri_to_cpu[i].mask); return -ENOMEM; } diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 76e0eaf4654e07..b8c446a50b5ede 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -119,11 +119,9 @@ static int membarrier_private_expedited(int flags) if (!(atomic_read(¤t->mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) return -EPERM; - } else { - if (!(atomic_read(¤t->mm->membarrier_state) & + } else if (!(atomic_read(¤t->mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) return -EPERM; - } if (num_online_cpus() == 1) return 0; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a8cf8fe3..acf1b94669ad74 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg) destroy_rt_bandwidth(&tg->rt_bandwidth); for_each_possible_cpu(i) { - if (tg->rt_rq) - kfree(tg->rt_rq[i]); - if (tg->rt_se) - kfree(tg->rt_se[i]); + /* Don't need to check if tg->rt_rq[i] + * or tg->rt_se[i] are NULL, since kfree(NULL) + * simply performs no operation + */ + kfree(tg->rt_rq[i]); + kfree(tg->rt_se[i]); } kfree(tg->rt_rq); @@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) BUG_ON(&rq->rt != rt_rq); - if (rt_rq->rt_queued) - return; - - if (rt_rq_throttled(rt_rq)) + if (rt_rq->rt_queued || rt_rq_throttled(rt_rq)) return; if (rt_rq->rt_nr_running) { @@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) */ static inline bool move_entity(unsigned int flags) { - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) - return false; - - return true; + return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) } static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) @@ -1393,7 +1389,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) /* For anything but wake ups, just return the task_cpu */ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) - goto out; + return cpu; rq = cpu_rq(cpu); @@ -1437,7 +1433,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) } rcu_read_unlock(); -out: return cpu; } @@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg, /* * Disallowing the root group RT runtime is BAD, it would disallow the * kernel creating (and or operating) RT threads. + * + * No period doesn't make any sense. */ - if (tg == &root_task_group && rt_runtime == 0) - return -EINVAL; - - /* No period doesn't make any sense. */ - if (rt_period == 0) + if ((tg == &root_task_group && !rt_runtime) || !rt_period) return -EINVAL; mutex_lock(&rt_constraints_mutex); From 2de6eb61534725516525153f29da4c7900442863 Mon Sep 17 00:00:00 2001 From: Pierce Griffiths Date: Fri, 21 Sep 2018 08:23:35 -0500 Subject: [PATCH 04/10] Update affinity.c Cleaned up previous changes to loops for readability --- kernel/irq/affinity.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4ee3399dc83158..fae07134df9a5f 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -48,8 +48,8 @@ static cpumask_var_t *alloc_node_to_cpumask(void) if (!masks) return NULL; - for (node = nr_node_ids; node;) { - if (!zalloc_cpumask_var(&masks[--node], GFP_KERNEL)) + for (node = nr_node_ids; --node;) { + if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) goto out_unwind; } @@ -66,8 +66,8 @@ static void free_node_to_cpumask(cpumask_var_t *masks) { int node = nr_node_ids; - while (node) - free_cpumask_var(masks[--node]); + while (--node) + free_cpumask_var(masks[node]); kfree(masks); } From 758f290507e6bd7d5b1570dd6e3a5bc3b2028cf7 Mon Sep 17 00:00:00 2001 From: Pierce Griffiths Date: Fri, 21 Sep 2018 08:45:28 -0500 Subject: [PATCH 05/10] Update mutex.c Reverted unnecessary loop changes and added bool casts to previously modified function returns --- kernel/locking/mutex.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 72a46a78aa7f53..fae09bc5e31679 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -82,7 +82,7 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) unsigned long owner, curr = (unsigned long)current; owner = atomic_long_read(&lock->owner); - while (1) { /* must loop, can race against a flag */ + for (;;) { /* must loop, can race against a flag */ unsigned long old, flags = __owner_flags(owner); unsigned long task = owner & ~MUTEX_FLAGS; @@ -138,14 +138,14 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock) unsigned long curr = (unsigned long)current; unsigned long zero = 0UL; - return atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr); + return (bool)atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr); } static __always_inline bool __mutex_unlock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; - return atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr; + return (bool)(atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr); } #endif @@ -622,7 +622,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, goto fail; } - while (1) { + for (;;) { struct task_struct *owner; /* Try to acquire the mutex... */ @@ -963,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, waiter.task = current; set_current_state(state); - while (1) { + for (;;) { /* * Once we hold wait_lock, we're serialized against * mutex_unlock() handing the lock off to us, do a trylock From 11096eb922353009807609e8eb42a9dfa6b60df7 Mon Sep 17 00:00:00 2001 From: Pierce Griffiths Date: Fri, 21 Sep 2018 08:54:38 -0500 Subject: [PATCH 06/10] Update suspend.c Reverted unnecessary replacement of for(;;) with while(1) --- kernel/power/suspend.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c8c5b68d5cb29a..b0ec125a22e314 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -111,7 +111,7 @@ static void s2idle_loop(void) { pm_pr_dbg("suspend-to-idle\n"); - while (1) { + for (;;) { int error; dpm_noirq_begin(); From 8d80107d707c7e8ed1d7fdad53b330162db0b2c0 Mon Sep 17 00:00:00 2001 From: Pierce Griffiths Date: Fri, 21 Sep 2018 09:09:23 -0500 Subject: [PATCH 07/10] Update core.c Reverted unnecessary changes made to loops Consolidated an if ... else into an equivalent return statement --- kernel/sched/core.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3a2adafc345214..443a1f235cfd3d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -70,7 +70,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) lockdep_assert_held(&p->pi_lock); - while (1) { + for (;;) { rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { @@ -93,7 +93,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) { struct rq *rq; - while (1) { + for (;;) { raw_spin_lock_irqsave(&p->pi_lock, rf->flags); rq = task_rq(p); raw_spin_lock(&rq->lock); @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq) * If there are more than one RR tasks, we need the tick to effect the * actual RR behaviour. */ - if (rq->rt.rr_nr_running) { - if (rq->rt.rr_nr_running == 1) - return true; - else - return false; - } + if (rq->rt.rr_nr_running) + return rq->rt.rr_nr_running == 1; /* * If there's no RR tasks, but FIFO tasks, we can skip the tick, no From aee07b9a1fb36188b81d7284316e90129ff9c2fc Mon Sep 17 00:00:00 2001 From: PierceGriffiths Date: Fri, 21 Sep 2018 10:06:21 -0500 Subject: [PATCH 08/10] Removed trailing space at end of line --- kernel/irq/pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 5ec92f64c155ca..2645b6deb04b87 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -166,7 +166,7 @@ static void resume_irqs(bool want_early) desc->action->flags & IRQF_EARLY_RESUME; if ((!is_early && want_early) || - irq_settings_is_nested_thread(desc)) + irq_settings_is_nested_thread(desc)) continue; raw_spin_lock_irqsave(&desc->lock, flags); From d10e7f3b5a0a95c2be687f0bf8906f644dd8ce5b Mon Sep 17 00:00:00 2001 From: PierceGriffiths Date: Fri, 21 Sep 2018 11:10:22 -0500 Subject: [PATCH 09/10] Added missing semicolon Made adjustments necessary for patch to meet code style requirements --- kernel/irq/affinity.c | 2 +- kernel/locking/mutex.c | 7 ++++--- kernel/sched/cpupri.c | 5 +++-- kernel/sched/membarrier.c | 3 ++- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index fae07134df9a5f..0d83cdc39ace78 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -14,7 +14,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, const struct cpumask *siblmsk; int cpu, sibl; - while ( cpus_per_vec > 0 ) { + while (cpus_per_vec > 0) { cpu = cpumask_first(nmsk); /* Should not happen, but I'm too lazy to think about it */ diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index fae09bc5e31679..d827a18ec61210 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -87,8 +87,9 @@ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) unsigned long task = owner & ~MUTEX_FLAGS; if (task) { - if (likely(task != curr) || likely(!(flags & MUTEX_FLAG_PICKUP))) - break + if (likely(task != curr) || + likely(!(flags & MUTEX_FLAG_PICKUP))) + break; flags &= ~MUTEX_FLAG_PICKUP; } else { @@ -145,7 +146,7 @@ static __always_inline bool __mutex_unlock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; - return (bool)(atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr); + return atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr; } #endif diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 369ec8a408da9d..152c133e8247d9 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -91,7 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, smp_rmb(); /* Need to do the rmb for every iteration */ - if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) + if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask) + >= nr_cpu_ids) continue; if (lowest_mask) { @@ -215,7 +216,7 @@ int cpupri_init(struct cpupri *cp) return 0; cleanup: - while(--i >= 0) + while (--i >= 0) free_cpumask_var(cp->pri_to_cpu[i].mask); return -ENOMEM; } diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index b8c446a50b5ede..fa75c0530b9c5e 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -120,8 +120,9 @@ static int membarrier_private_expedited(int flags) MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) return -EPERM; } else if (!(atomic_read(¤t->mm->membarrier_state) & - MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) { return -EPERM; + } if (num_online_cpus() == 1) return 0; From 9aa1f1a2e3e655aa6c8e9c0673a1b8a3dc2f7cb7 Mon Sep 17 00:00:00 2001 From: PierceGriffiths Date: Fri, 21 Sep 2018 11:41:43 -0500 Subject: [PATCH 10/10] Style conformance --- kernel/sched/membarrier.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index fa75c0530b9c5e..e123d0b809708f 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -120,8 +120,8 @@ static int membarrier_private_expedited(int flags) MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) return -EPERM; } else if (!(atomic_read(¤t->mm->membarrier_state) & - MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) { - return -EPERM; + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) { + return -EPERM; } if (num_online_cpus() == 1)