Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change nxsched_islocked_global to nxsched_islocked_tcb #13716

Merged
merged 2 commits into from
Oct 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion include/nuttx/init.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ extern "C"
* hardware resources may not yet be available to the OS-internal logic.
*/

EXTERN uint8_t g_nx_initstate; /* See enum nx_initstate_e */
EXTERN volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */

/****************************************************************************
* Public Function Prototypes
Expand Down
7 changes: 7 additions & 0 deletions sched/init/nx_smpstart.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <nuttx/kmalloc.h>
#include <nuttx/sched.h>
#include <nuttx/sched_note.h>
#include <nuttx/init.h>

#include "group/group.h"
#include "sched/sched.h"
Expand Down Expand Up @@ -74,6 +75,12 @@ void nx_idle_trampoline(void)
sched_note_start(tcb);
#endif

/* wait until cpu0 in idle() */

while (!OSINIT_IDLELOOP());

sched_unlock();

/* Enter the IDLE loop */

sinfo("CPU%d: Beginning Idle Loop\n", this_cpu());
Expand Down
10 changes: 2 additions & 8 deletions sched/init/nx_start.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ struct tasklist_s g_tasklisttable[NUM_TASK_STATES];
* hardware resources may not yet be available to the kernel logic.
*/

uint8_t g_nx_initstate; /* See enum nx_initstate_e */
volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */

/****************************************************************************
* Private Data
Expand Down Expand Up @@ -361,6 +361,7 @@ static void idle_task_initialize(void)

tcb->pid = i;
tcb->task_state = TSTATE_TASK_RUNNING;
tcb->lockcount = 1;

/* Set the entry point. This is only for debug purposes. NOTE: that
* the start_t entry point is not saved. That is acceptable, however,
Expand Down Expand Up @@ -628,13 +629,6 @@ void nx_start(void)

task_initialize();

/* Disables context switching because we need take the memory manager
* semaphore on this CPU so that it will not be available on the other
* CPUs until we have finished initialization.
*/

sched_lock();

/* Initialize the instrument function */

instrument_initialize();
Expand Down
10 changes: 2 additions & 8 deletions sched/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,9 +297,6 @@ extern volatile clock_t g_cpuload_total;
*/

#ifdef CONFIG_SMP
/* Used to keep track of which CPU(s) hold the IRQ lock. */

extern volatile cpu_set_t g_cpu_lockset;

/* This is the spinlock that enforces critical sections when interrupts are
* disabled.
Expand Down Expand Up @@ -406,16 +403,13 @@ static inline_function FAR struct tcb_s *this_task(void)
int nxsched_select_cpu(cpu_set_t affinity);
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
void nxsched_process_delivered(int cpu);

# define nxsched_islocked_global() (g_cpu_lockset != 0)
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()

#else
# define nxsched_select_cpu(a) (0)
# define nxsched_pause_cpu(t) (-38) /* -ENOSYS */
# define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
#endif

#define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0)

/* CPU load measurement support */

#if defined(CONFIG_SCHED_CPULOAD_SYSCLK) || \
Expand Down
10 changes: 1 addition & 9 deletions sched/sched/sched_addreadytorun.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* situation.
*/

if (nxsched_islocked_global())
if (nxsched_islocked_tcb(this_task()))
{
/* Add the new ready-to-run task to the g_pendingtasks task list for
* now.
Expand Down Expand Up @@ -275,14 +275,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
btcb->task_state = TSTATE_TASK_RUNNING;

doswitch = true;

/* Resume scheduling lock */

DEBUGASSERT(g_cpu_lockset == 0);
if (btcb->lockcount > 0)
{
g_cpu_lockset |= (1 << cpu);
}
}

return doswitch;
Expand Down
54 changes: 1 addition & 53 deletions sched/sched/sched_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,30 +42,6 @@
* Public Data
****************************************************************************/

/* Pre-emption is disabled via the interface sched_lock(). sched_lock()
* works by preventing context switches from the currently executing tasks.
* This prevents other tasks from running (without disabling interrupts) and
* gives the currently executing task exclusive access to the (single) CPU
* resources. Thus, sched_lock() and its companion, sched_unlock(), are
* used to implement some critical sections.
*
* In the single CPU case, pre-emption is disabled using a simple lockcount
* in the TCB. When the scheduling is locked, the lockcount is incremented;
* when the scheduler is unlocked, the lockcount is decremented. If the
* lockcount for the task at the head of the g_readytorun list has a
* lockcount > 0, then pre-emption is disabled.
*
* No special protection is required since only the executing task can
* modify its lockcount.
*/

#ifdef CONFIG_SMP
/* Used to keep track of which CPU(s) hold the IRQ lock. */

volatile cpu_set_t g_cpu_lockset;

#endif /* CONFIG_SMP */

/****************************************************************************
* Public Functions
****************************************************************************/
Expand Down Expand Up @@ -93,7 +69,6 @@ volatile cpu_set_t g_cpu_lockset;
int sched_lock(void)
{
FAR struct tcb_s *rtcb;
int cpu;

/* If the CPU supports suppression of interprocessor interrupts, then
* simple disabling interrupts will provide sufficient protection for
Expand All @@ -118,36 +93,9 @@ int sched_lock(void)
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);

flags = enter_critical_section();
cpu = this_cpu();

/* We must hold the lock on this CPU before we increment the lockcount
* for the first time. Holding the lock is sufficient to lockout
* context switching.
*/

if (rtcb->lockcount == 0)
{
/* We don't have the scheduler locked. But logic running on a
* different CPU may have the scheduler locked. It is not
* possible for some other task on this CPU to have the scheduler
* locked (or we would not be executing!).
*/

DEBUGASSERT((g_cpu_lockset & (1 << cpu)) == 0);
g_cpu_lockset |= (1 << cpu);
}
else
{
/* If this thread already has the scheduler locked, then
* g_cpu_lockset should indicate that the scheduler is locked
* and g_cpu_lockset should include the bit setting for this CPU.
*/

DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0);
}

/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
* operations on this thread
*/

rtcb->lockcount++;
Expand Down
4 changes: 2 additions & 2 deletions sched/sched/sched_mergepending.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ bool nxsched_merge_pending(void)
* some CPU other than this one is in a critical section.
*/

if (!nxsched_islocked_global())
if (!nxsched_islocked_tcb(this_task()))
{
/* Find the CPU that is executing the lowest priority task */

Expand Down Expand Up @@ -237,7 +237,7 @@ bool nxsched_merge_pending(void)
* Check if that happened.
*/

if (nxsched_islocked_global())
if (nxsched_islocked_tcb(this_task()))
{
/* Yes.. then we may have incorrectly placed some TCBs in the
* g_readytorun list (unlikely, but possible). We will have to
Expand Down
18 changes: 7 additions & 11 deletions sched/sched/sched_process_delivered.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,10 @@ void nxsched_process_delivered(int cpu)
g_cpu_irqset |= (1 << cpu);
}

tcb = current_task(cpu);

if (g_delivertasks[cpu] == NULL)
{
tcb = current_task(cpu);
if (tcb->irqcount <= 0)
{
cpu_irqlock_clear();
Expand All @@ -95,13 +96,12 @@ void nxsched_process_delivered(int cpu)
return;
}

if (nxsched_islocked_global())
if (nxsched_islocked_tcb(tcb))
{
btcb = g_delivertasks[cpu];
g_delivertasks[cpu] = NULL;
nxsched_add_prioritized(btcb, &g_pendingtasks);
btcb->task_state = TSTATE_TASK_PENDING;
tcb = current_task(cpu);
if (tcb->irqcount <= 0)
{
cpu_irqlock_clear();
Expand All @@ -111,29 +111,25 @@ void nxsched_process_delivered(int cpu)
}

btcb = g_delivertasks[cpu];
tasklist = &g_assignedtasks[cpu];

for (next = (FAR struct tcb_s *)tasklist->head;
(next && btcb->sched_priority <= next->sched_priority);
for (next = tcb; btcb->sched_priority <= next->sched_priority;
next = next->flink);

DEBUGASSERT(next);

prev = next->blink;
if (prev == NULL)
{
/* Special case: Insert at the head of the list */

tasklist = &g_assignedtasks[cpu];
dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist);
btcb->cpu = cpu;
btcb->task_state = TSTATE_TASK_RUNNING;

DEBUGASSERT(btcb->flink != NULL);
DEBUGASSERT(next == btcb->flink);
next->task_state = TSTATE_TASK_ASSIGNED;

if (btcb->lockcount > 0)
{
g_cpu_lockset |= (1 << cpu);
}
}
else
{
Expand Down
17 changes: 0 additions & 17 deletions sched/sched/sched_removereadytorun.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,23 +262,6 @@ void nxsched_remove_running(FAR struct tcb_s *tcb)
nxttcb = rtrtcb;
}

/* Will pre-emption be disabled after the switch? If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler lock.
*/

if (nxttcb->lockcount > 0)
{
/* Yes... make sure that scheduling logic knows about this */

g_cpu_lockset |= (1 << cpu);
}
else
{
/* No.. we may need to perform release our hold on the lock. */

g_cpu_lockset &= ~(1 << cpu);
}

/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
* controls will be done in the pause handler on the new CPU(cpu).
* If the task is scheduled on this CPU(me), do nothing because
Expand Down
2 changes: 1 addition & 1 deletion sched/sched/sched_setpriority.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb)
* then use the 'nxttcb' which will probably be the IDLE thread.
*/

if (!nxsched_islocked_global())
if (!nxsched_islocked_tcb(this_task()))
{
/* Search for the highest priority task that can run on tcb->cpu. */

Expand Down
25 changes: 8 additions & 17 deletions sched/sched/sched_unlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,11 @@ int sched_unlock(void)
irqstate_t flags = enter_critical_section();
int cpu = this_cpu();

DEBUGASSERT(rtcb->lockcount > 0);

/* Decrement the preemption lock counter */

if (rtcb->lockcount > 0)
{
rtcb->lockcount--;
}
rtcb->lockcount--;

/* Check if the lock counter has decremented to zero. If so,
* then pre-emption has been re-enabled.
Expand All @@ -103,14 +102,6 @@ int sched_unlock(void)

rtcb->lockcount = 0;

/* The lockcount has decremented to zero and we need to perform
* release our hold on the lock.
*/

DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0);

g_cpu_lockset &= ~(1 << cpu);

/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
*
Expand All @@ -137,7 +128,7 @@ int sched_unlock(void)
* BEFORE it clears IRQ lock.
*/

if (!nxsched_islocked_global() &&
if (!nxsched_islocked_tcb(rtcb) &&
list_pendingtasks()->head != NULL)
{
if (nxsched_merge_pending())
Expand Down Expand Up @@ -211,6 +202,7 @@ int sched_unlock(void)
#endif
}

UNUSED(cpu);
leave_critical_section(flags);
}

Expand All @@ -234,12 +226,11 @@ int sched_unlock(void)

irqstate_t flags = enter_critical_section();

DEBUGASSERT(rtcb->lockcount > 0);

/* Decrement the preemption lock counter */

if (rtcb->lockcount > 0)
{
rtcb->lockcount--;
}
rtcb->lockcount--;

/* Check if the lock counter has decremented to zero. If so,
* then pre-emption has been re-enabled.
Expand Down
15 changes: 0 additions & 15 deletions sched/task/task_exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,6 @@ int nxtask_exit(void)

rtcb->lockcount++;

#ifdef CONFIG_SMP
/* Make sure that the system knows about the locked state */

g_cpu_lockset |= (1 << cpu);
#endif

rtcb->task_state = TSTATE_TASK_READYTORUN;

/* Move the TCB to the specified blocked task list and delete it. Calling
Expand Down Expand Up @@ -177,14 +171,5 @@ int nxtask_exit(void)

rtcb->lockcount--;

#ifdef CONFIG_SMP
if (rtcb->lockcount == 0)
{
/* Make sure that the system knows about the unlocked state */

g_cpu_lockset &= ~(1 << cpu);
}
#endif

return ret;
}
Loading