Skip to content

Commit

Permalink
Merge branch 'feature/freertos_10.4.3_sync_critical_sections_vs_suspe…
Browse files Browse the repository at this point in the history
…nsion' into 'master'

FreeRTOS(IDF): Resolve critical section (multi-core) vs scheduler suspension (single-core)

Closes IDF-3755

See merge request espressif/esp-idf!21002
  • Loading branch information
Dazza0 committed Nov 21, 2022
2 parents 858c5e2 + 8b98e4e commit 74ed2aa
Show file tree
Hide file tree
Showing 8 changed files with 431 additions and 425 deletions.
108 changes: 41 additions & 67 deletions components/freertos/FreeRTOS-Kernel/event_groups.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,7 @@ typedef struct EventGroupDef_t
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif

#ifdef ESP_PLATFORM
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
#endif // ESP_PLATFORM
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
} EventGroup_t;

/*-----------------------------------------------------------*/
Expand Down Expand Up @@ -225,11 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
}
#endif

#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
{
uxOriginalBitValue = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -272,12 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
}
}
}
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xAlreadyYielded = pdFALSE;
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand Down Expand Up @@ -361,11 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
}
#endif

#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -433,12 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xAlreadyYielded = pdFALSE;
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand Down Expand Up @@ -581,15 +561,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,

pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );

prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( configNUM_CORES > 1 )

/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
#endif /* configNUM_CORES > 1 */
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );

Expand Down Expand Up @@ -661,13 +640,11 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
* bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
#ifdef ESP_PLATFORM /* IDF-3755 */
/* Release the previously taken kernel lock, then release the event group spinlock. */
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
vTaskReleaseKernelLock();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
#endif /* configNUM_CORES > 1 */
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );

return pxEventBits->uxEventBits;
}
Expand All @@ -678,53 +655,50 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
EventGroup_t * pxEventBits = xEventGroup;
const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );

prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( configNUM_CORES > 1 )

/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#endif /* configNUM_CORES > 1 */
{
traceEVENT_GROUP_DELETE( xEventGroup );

/* IDF-3755 */
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#ifdef ESP_PLATFORM

/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#endif

while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
* and cannot therefore have any bits set. */
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
}
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
vTaskReleaseKernelLock();
#endif /* configNUM_CORES > 1 */
prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );

#ifdef ESP_PLATFORM
/* Release the previously taken kernel lock. */
vTaskReleaseKernelLock();
#endif
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );

#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The event group can only have been allocated dynamically - free
* it again. */
vPortFree( pxEventBits );
}
#elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The event group could have been allocated statically or
* dynamically, so check before attempting to free the memory. */
if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
/* The event group can only have been allocated dynamically - free
* it again. */
vPortFree( pxEventBits );
}
#elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
else
{
/* The event group could have been allocated statically or
* dynamically, so check before attempting to free the memory. */
if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxEventBits );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
mtCOVERAGE_TEST_MARKER();
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}
/*-----------------------------------------------------------*/

Expand Down
33 changes: 31 additions & 2 deletions components/freertos/FreeRTOS-Kernel/include/freertos/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -3407,6 +3407,32 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION;
*----------------------------------------------------------*/
/** @cond !DOC_EXCLUDE_HEADER_SECTION */

/*
* Various convenience macros for critical sections and scheduler suspension
* called by other FreeRTOS sources and not meant to be called by the
* application. The behavior of each macro depends on whether FreeRTOS is
* currently configured for SMP or single core.
*/
#if ( configNUM_CORES > 1 )
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) )
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } )
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
taskENTER_CRITICAL_ISR( ( pxLock ) ); \
( void ) ( uxInterruptStatus );
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
taskEXIT_CRITICAL_ISR( ( pxLock ) ); \
( void ) ( uxInterruptStatus );
#else /* configNUM_CORES > 1 */
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } )
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll()
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
( uxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
( void ) ( pxLock );
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxSavedInterruptStatus ) ); \
( void ) ( pxLock );
#endif /* configNUM_CORES > 1 */

/*
* Return the handle of the task running on a certain CPU. Because of
* the nature of SMP processing, there is no guarantee that this
Expand Down Expand Up @@ -3519,6 +3545,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
TickType_t xTicksToWait,
const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION;

#if ( configNUM_CORES > 1 )

/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
Expand All @@ -3533,8 +3561,9 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* of delegating the entire responsibility to one of vTask...EventList()
* functions).
*/
void vTaskTakeKernelLock( void );
void vTaskReleaseKernelLock( void );
void vTaskTakeKernelLock( void );
void vTaskReleaseKernelLock( void );
#endif /* configNUM_CORES > 1 */

/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
Expand Down
16 changes: 13 additions & 3 deletions components/freertos/FreeRTOS-Kernel/portable/port_systick.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,14 +180,24 @@ BaseType_t xPortSysTickHandler(void)

// Call FreeRTOS Increment tick function
BaseType_t xSwitchRequired;
#if CONFIG_FREERTOS_UNICORE
xSwitchRequired = xTaskIncrementTick();
#else
#if ( configNUM_CORES > 1 )
/*
For SMP, xTaskIncrementTick() will internally enter a critical section. But only core 0 calls xTaskIncrementTick()
while core 1 should call xTaskIncrementTickOtherCores().
*/
if (xPortGetCoreID() == 0) {
xSwitchRequired = xTaskIncrementTick();
} else {
xSwitchRequired = xTaskIncrementTickOtherCores();
}
#else // configNUM_CORES > 1
/*
Vanilla (single core) FreeRTOS expects that xTaskIncrementTick() cannot be interrupted (i.e., no nested interrupts).
Thus we have to disable interrupts before calling it.
*/
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
xSwitchRequired = xTaskIncrementTick();
portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
#endif

// Check if yield is required
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,13 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void);

#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()

/**
* @brief Used by FreeRTOS functions to call the correct version of critical section API
*/
#if ( configNUM_CORES > 1 )
#define portCHECK_IF_IN_ISR() xPortInIsrContext()
#endif

// ------------------ Critical Sections --------------------

/**
Expand Down
Loading

0 comments on commit 74ed2aa

Please sign in to comment.