diff --git a/components/freertos/FreeRTOS-Kernel/event_groups.c b/components/freertos/FreeRTOS-Kernel/event_groups.c index 0c674fc498b..ffdf23db5c2 100644 --- a/components/freertos/FreeRTOS-Kernel/event_groups.c +++ b/components/freertos/FreeRTOS-Kernel/event_groups.c @@ -80,9 +80,7 @@ typedef struct EventGroupDef_t uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif - #ifdef ESP_PLATFORM - portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */ - #endif // ESP_PLATFORM + portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */ } EventGroup_t; /*-----------------------------------------------------------*/ @@ -225,11 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, } #endif - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); { uxOriginalBitValue = pxEventBits->uxEventBits; @@ -272,12 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, } } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - xAlreadyYielded = pdFALSE; - #else - xAlreadyYielded = xTaskResumeAll(); - #endif // ESP_PLATFORM + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -361,11 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, } #endif - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); { const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; @@ -433,12 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - xAlreadyYielded = pdFALSE; - #else - xAlreadyYielded = xTaskResumeAll(); - #endif // ESP_PLATFORM + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -581,15 +561,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); + + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); + #if ( configNUM_CORES > 1 ) /* We are about to traverse a task list which is a kernel data structure. * Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */ vTaskTakeKernelLock(); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + #endif /* configNUM_CORES > 1 */ { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); @@ -661,13 +640,11 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, * bit was set in the control word. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - #ifdef ESP_PLATFORM /* IDF-3755 */ - /* Release the previously taken kernel lock, then release the event group spinlock. */ + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ vTaskReleaseKernelLock(); - taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + #endif /* configNUM_CORES > 1 */ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); return pxEventBits->uxEventBits; } @@ -678,18 +655,16 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) EventGroup_t * pxEventBits = xEventGroup; const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); + #if ( configNUM_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */ + vTaskTakeKernelLock(); + #endif /* configNUM_CORES > 1 */ { traceEVENT_GROUP_DELETE( xEventGroup ); - /* IDF-3755 */ - taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - #ifdef ESP_PLATFORM - - /* We are about to traverse a task list which is a kernel data structure. - * Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */ - vTaskTakeKernelLock(); - #endif - while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) { /* Unblock the task, returning 0 as the event list is being deleted @@ -697,34 +672,33 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) ); vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); } + } + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + vTaskReleaseKernelLock(); + #endif /* configNUM_CORES > 1 */ + prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); - #ifdef ESP_PLATFORM - /* Release the previously taken kernel lock. */ - vTaskReleaseKernelLock(); - #endif - taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); - - #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The event group can only have been allocated dynamically - free + * it again. */ + vPortFree( pxEventBits ); + } + #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The event group could have been allocated statically or + * dynamically, so check before attempting to free the memory. */ + if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) { - /* The event group can only have been allocated dynamically - free - * it again. */ vPortFree( pxEventBits ); } - #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + else { - /* The event group could have been allocated statically or - * dynamically, so check before attempting to free the memory. */ - if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) - { - vPortFree( pxEventBits ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + mtCOVERAGE_TEST_MARKER(); } - #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ - } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ } /*-----------------------------------------------------------*/ diff --git a/components/freertos/FreeRTOS-Kernel/include/freertos/task.h b/components/freertos/FreeRTOS-Kernel/include/freertos/task.h index 6a9879a7a55..43c5fa310e5 100644 --- a/components/freertos/FreeRTOS-Kernel/include/freertos/task.h +++ b/components/freertos/FreeRTOS-Kernel/include/freertos/task.h @@ -3407,6 +3407,32 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION; *----------------------------------------------------------*/ /** @cond !DOC_EXCLUDE_HEADER_SECTION */ +/* + * Various convenience macros for critical sections and scheduler suspension + * called by other FreeRTOS sources and not meant to be called by the + * application. The behavior of each macro depends on whether FreeRTOS is + * currently configured for SMP or single core. + */ +#if ( configNUM_CORES > 1 ) + #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) ) + #define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } ) + #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ + taskENTER_CRITICAL_ISR( ( pxLock ) ); \ + ( void ) ( uxInterruptStatus ); + #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ + taskEXIT_CRITICAL_ISR( ( pxLock ) ); \ + ( void ) ( uxInterruptStatus ); +#else /* configNUM_CORES > 1 */ + #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } ) + #define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll() + #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ + ( uxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ + ( void ) ( pxLock ); + #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxSavedInterruptStatus ) ); \ + ( void ) ( pxLock ); +#endif /* configNUM_CORES > 1 */ + /* * Return the handle of the task running on a certain CPU. Because of * the nature of SMP processing, there is no guarantee that this @@ -3519,6 +3545,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; +#if ( configNUM_CORES > 1 ) + /* * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. @@ -3533,8 +3561,9 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, * of delegating the entire responsibility to one of vTask...EventList() * functions). */ -void vTaskTakeKernelLock( void ); -void vTaskReleaseKernelLock( void ); + void vTaskTakeKernelLock( void ); + void vTaskReleaseKernelLock( void ); +#endif /* configNUM_CORES > 1 */ /* * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN diff --git a/components/freertos/FreeRTOS-Kernel/portable/port_systick.c b/components/freertos/FreeRTOS-Kernel/portable/port_systick.c index 2c12f72f893..4cdb71f9eb8 100644 --- a/components/freertos/FreeRTOS-Kernel/portable/port_systick.c +++ b/components/freertos/FreeRTOS-Kernel/portable/port_systick.c @@ -180,14 +180,24 @@ BaseType_t xPortSysTickHandler(void) // Call FreeRTOS Increment tick function BaseType_t xSwitchRequired; -#if CONFIG_FREERTOS_UNICORE - xSwitchRequired = xTaskIncrementTick(); -#else +#if ( configNUM_CORES > 1 ) + /* + For SMP, xTaskIncrementTick() will internally enter a critical section. But only core 0 calls xTaskIncrementTick() + while core 1 should call xTaskIncrementTickOtherCores(). + */ if (xPortGetCoreID() == 0) { xSwitchRequired = xTaskIncrementTick(); } else { xSwitchRequired = xTaskIncrementTickOtherCores(); } +#else // configNUM_CORES > 1 + /* + Vanilla (single core) FreeRTOS expects that xTaskIncrementTick() cannot be interrupted (i.e., no nested interrupts). + Thus we have to disable interrupts before calling it. + */ + UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSwitchRequired = xTaskIncrementTick(); + portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus); #endif // Check if yield is required diff --git a/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h b/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h index b7f14e1e2ab..a08ae69d309 100644 --- a/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h +++ b/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h @@ -447,6 +447,13 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void); #define portASSERT_IF_IN_ISR() vPortAssertIfInISR() +/** + * @brief Used by FreeRTOS functions to call the correct version of critical section API + */ +#if ( configNUM_CORES > 1 ) +#define portCHECK_IF_IN_ISR() xPortInIsrContext() +#endif + // ------------------ Critical Sections -------------------- /** diff --git a/components/freertos/FreeRTOS-Kernel/queue.c b/components/freertos/FreeRTOS-Kernel/queue.c index e5929a21fe5..5d730b6d70e 100644 --- a/components/freertos/FreeRTOS-Kernel/queue.c +++ b/components/freertos/FreeRTOS-Kernel/queue.c @@ -1095,12 +1095,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - #if ( configNUM_CORES > 1 ) - taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - ( void ) uxSavedInterruptStatus; - #else - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - #endif + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1236,11 +1231,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - #if ( configNUM_CORES > 1 ) - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - #else - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - #endif + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -1286,12 +1277,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - #if ( configNUM_CORES > 1 ) - taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - ( void ) uxSavedInterruptStatus; - #else - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - #endif + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1422,11 +1408,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - #if ( configNUM_CORES > 1 ) - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - #else - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - #endif + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -2094,12 +2076,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - #if ( configNUM_CORES > 1 ) - taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - ( void ) uxSavedInterruptStatus; - #else - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - #endif + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -2170,12 +2147,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - #if ( configNUM_CORES > 1 ) - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - #else - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - #endif - + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -2209,12 +2181,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - #if ( configNUM_CORES > 1 ) - taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - ( void ) uxSavedInterruptStatus; - #else - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - #endif + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2235,12 +2202,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - #if ( configNUM_CORES > 1 ) - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - #else - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - #endif - + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -3269,8 +3231,21 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( pxQueueSetContainer ); configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); - /* We need to also acquire the queue set's spinlock as well. */ - taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); + #if ( configNUM_CORES > 1 ) + + /* In SMP, queue sets have their own spinlock. Thus we need to also + * acquire the queue set's spinlock before accessing it. This + * function can also be called from an ISR context, so we need to + * check whether we are in an ISR. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); + } + else + { + taskENTER_CRITICAL_ISR( &( pxQueueSetContainer->xQueueLock ) ); + } + #endif /* configNUM_CORES > 1 */ if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) { @@ -3321,8 +3296,17 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) mtCOVERAGE_TEST_MARKER(); } - /* Release the previously acquired queue set's spinlock. */ - taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); + #if ( configNUM_CORES > 1 ) + /* Release the previously acquired queue set's spinlock. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); + } + else + { + taskEXIT_CRITICAL_ISR( &( pxQueueSetContainer->xQueueLock ) ); + } + #endif /* configNUM_CORES > 1 */ return xReturn; } diff --git a/components/freertos/FreeRTOS-Kernel/stream_buffer.c b/components/freertos/FreeRTOS-Kernel/stream_buffer.c index 5fb8351a879..fa9a3ce2516 100644 --- a/components/freertos/FreeRTOS-Kernel/stream_buffer.c +++ b/components/freertos/FreeRTOS-Kernel/stream_buffer.c @@ -60,35 +60,19 @@ * or #defined the notification macros away, then provide default implementations * that uses task notifications. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */ - #ifndef sbRECEIVE_COMPLETED - #ifdef ESP_PLATFORM /* IDF-3775 */ - #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ - taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ - } \ - } \ - taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); - #else /* ifdef ESP_PLATFORM */ - #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ - } \ - } \ - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); #endif /* sbRECEIVE_COMPLETED */ #ifndef sbRECEIVE_COMPLETED_FROM_ISR @@ -116,33 +100,18 @@ * or #defined the notification macro away, them provide a default implementation * that uses task notifications. */ #ifndef sbSEND_COMPLETED - #ifdef ESP_PLATFORM /* IDF-3755 */ - #define sbSEND_COMPLETED( pxStreamBuffer ) \ - taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ - } \ - } \ - taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); - #else /* ifdef ESP_PLATFORM */ - #define sbSEND_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ - } \ - } \ - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); #endif /* sbSEND_COMPLETED */ #ifndef sbSEND_COMPLETE_FROM_ISR @@ -309,7 +278,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pucAllocatedMemory = NULL; } - if( pucAllocatedMemory != NULL ) { prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ diff --git a/components/freertos/FreeRTOS-Kernel/tasks.c b/components/freertos/FreeRTOS-Kernel/tasks.c index 7ac5c92a4b2..efd464be3cb 100644 --- a/components/freertos/FreeRTOS-Kernel/tasks.c +++ b/components/freertos/FreeRTOS-Kernel/tasks.c @@ -403,12 +403,9 @@ PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Poi PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ -#ifdef ESP_PLATFORM - /* Spinlock required for SMP critical sections. This lock protects all of the * kernel's data structures such as various tasks lists, flags, and tick counts. */ - PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED; -#endif // ESP_PLATFORM +PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED; #if ( INCLUDE_vTaskDelete == 1 ) @@ -1537,11 +1534,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( ( xTimeIncrement > 0U ) ); configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Minor optimisation. The tick count cannot change in this * block. */ @@ -1597,12 +1590,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - xAlreadyYielded = pdFALSE; - #else - xAlreadyYielded = xTaskResumeAll(); - #endif // ESP_PLATFORM + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); /* Force a reschedule if xTaskResumeAll has not already done so, we may * have put ourselves to sleep. */ @@ -1631,11 +1619,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( xTicksToDelay > ( TickType_t ) 0U ) { configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { traceTASK_DELAY(); @@ -1648,12 +1632,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * executing task. */ prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - xAlreadyYielded = pdFALSE; - #else - xAlreadyYielded = xTaskResumeAll(); - #endif // ESP_PLATFORM + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } else { @@ -2836,11 +2815,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Search the ready lists. */ do @@ -2886,11 +2861,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char } #endif } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return pxTCB; } @@ -2906,11 +2877,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char { UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Is there a space in the array for each task in the system? */ if( uxArraySize >= uxCurrentNumberOfTasks ) @@ -2969,11 +2936,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char mtCOVERAGE_TEST_MARKER(); } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return uxTask; } @@ -3008,10 +2971,12 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char void vTaskStepTick( const TickType_t xTicksToJump ) { - #ifdef ESP_PLATFORM - /* For SMP, we require a critical section to access xTickCount */ + #if ( configNUM_CORES > 1 ) + + /* Although this is called with the scheduler suspended. For SMP, we + * still need to take the kernel lock to access xTickCount. */ taskENTER_CRITICAL( &xKernelLock ); - #endif + #endif /* configNUM_CORES > 1 */ /* Correct the tick count value after a period during which the tick * was suppressed. Note this does *not* call the tick hook function for @@ -3019,9 +2984,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); xTickCount += xTicksToJump; traceINCREASE_TICK_COUNT( xTicksToJump ); - #ifdef ESP_PLATFORM + + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ taskEXIT_CRITICAL( &xKernelLock ); - #endif + #endif /* configNUM_CORES > 1 */ } #endif /* configUSE_TICKLESS_IDLE */ @@ -3042,16 +3009,17 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ vTaskSuspendAll(); - #ifdef ESP_PLATFORM + #if ( configNUM_CORES > 1 ) - /* For SMP, we still require a critical section to access xPendedTicks even - * if the scheduler is disabled. */ + /* Although the scheduler is suspended. For SMP, we still need to take + * the kernel lock to access xPendedTicks. */ taskENTER_CRITICAL( &xKernelLock ); - xPendedTicks += xTicksToCatchUp; + #endif /* configNUM_CORES > 1 */ + xPendedTicks += xTicksToCatchUp; + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ taskEXIT_CRITICAL( &xKernelLock ); - #else // ESP_PLATFORM - xPendedTicks += xTicksToCatchUp; - #endif // ESP_PLATFORM + #endif /* configNUM_CORES > 1 */ xYieldOccurred = xTaskResumeAll(); return xYieldOccurred; @@ -3067,11 +3035,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) configASSERT( pxTCB ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* A task can only be prematurely removed from the Blocked state if * it is actually in the Blocked state. */ @@ -3134,11 +3098,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) xReturn = pdFAIL; } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return xReturn; } @@ -3148,14 +3108,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) BaseType_t xTaskIncrementTick( void ) { - #ifdef ESP_PLATFORM - #if ( configNUM_CORES > 1 ) - { - /* Only Core 0 should ever call this function. */ - configASSERT( xPortGetCoreID() == 0 ); - } - #endif /* ( configNUM_CORES > 1 ) */ - #endif // ESP_PLATFORM + #if ( configNUM_CORES > 1 ) + /* Only Core 0 should ever call this function. */ + configASSERT( xPortGetCoreID() == 0 ); + #endif /* ( configNUM_CORES > 1 ) */ + TCB_t * pxTCB; TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; @@ -3165,15 +3122,13 @@ BaseType_t xTaskIncrementTick( void ) * tasks to be unblocked. */ traceTASK_INCREMENT_TICK( xTickCount ); - #ifdef ESP_PLATFORM + #if ( configNUM_CORES > 1 ) - /* We need a critical section here as we are about to access kernel data - * structures: - * - Other cores could be accessing them simultaneously - * - Unlike other ports, we call xTaskIncrementTick() without disabling nested - * interrupts, which in turn is disabled by the critical section. */ + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures (unlike single core which calls this + * function with interrupts disabled). */ taskENTER_CRITICAL_ISR( &xKernelLock ); - #endif // ESP_PLATFORM + #endif /* ( configNUM_CORES > 1 ) */ if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) { @@ -3261,16 +3216,12 @@ BaseType_t xTaskIncrementTick( void ) /* Preemption is on, but a context switch should * only be performed if the unblocked task has a * priority that is equal to or higher than the - * currently executing task. */ - #if defined( ESP_PLATFORM ) && ( configNUM_CORES > 1 ) - - /* Since this function is only run on core 0, we - * only need to switch contexts if the unblocked task - * can run on core 0. */ - if( ( ( pxTCB->xCoreID == 0 ) || ( pxTCB->xCoreID == tskNO_AFFINITY ) ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) ) - #else - if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) - #endif + * currently executing task. + * + * For SMP, since this function is only run on core + * 0, only need to switch contexts if the unblocked + * task can run on core 0. */ + if( ( taskCAN_RUN_ON_CORE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) ) { xSwitchRequired = pdTRUE; } @@ -3300,23 +3251,22 @@ BaseType_t xTaskIncrementTick( void ) } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - #ifdef ESP_PLATFORM - #if ( configUSE_TICK_HOOK == 1 ) - TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ - #endif /* configUSE_TICK_HOOK */ - /* Exit the critical section as we have finished accessing the kernel data structures. */ + #if ( configUSE_TICK_HOOK == 1 ) + TickType_t xPendedTicksTemp = xPendedTicks; /* Non-volatile copy. */ + #endif /* configUSE_TICK_HOOK */ + + #if ( configNUM_CORES > 1 ) + + /* Release the previously taken kernel lock as we have finished + * accessing the kernel data structures. */ taskEXIT_CRITICAL_ISR( &xKernelLock ); - #endif // ESP_PLATFORM + #endif /* ( configNUM_CORES > 1 ) */ #if ( configUSE_TICK_HOOK == 1 ) { /* Guard against the tick hook being called when the pended tick * count is being unwound (when the scheduler is being unlocked). */ - #ifdef ESP_PLATFORM - if( xPendedCounts == ( TickType_t ) 0 ) - #else - if( xPendedTicks == ( TickType_t ) 0 ) - #endif + if( xPendedTicksTemp == ( TickType_t ) 0 ) { vApplicationTickHook(); } @@ -3343,10 +3293,12 @@ BaseType_t xTaskIncrementTick( void ) else { ++xPendedTicks; - #ifdef ESP_PLATFORM - /* Exit the critical section as we have finished accessing the kernel data structures. */ + #if ( configNUM_CORES > 1 ) + + /* Release the previously taken kernel lock as we have finished + * accessing the kernel data structures. */ taskEXIT_CRITICAL_ISR( &xKernelLock ); - #endif // ESP_PLATFORM + #endif /* ( configNUM_CORES > 1 ) */ /* The tick hook gets called at regular intervals, even if the * scheduler is locked. */ @@ -3378,12 +3330,8 @@ BaseType_t xTaskIncrementTick( void ) if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE ) { - /* We need a critical section here as we are about to access kernel data - * structures: - * - Other cores could be accessing them simultaneously - * - Unlike other ports, we call xTaskIncrementTick() without disabling - * nested interrupts, which in turn is disabled by the critical - * section. */ + /* We need take the kernel lock here as we are about to access + * kernel data structures. */ taskENTER_CRITICAL_ISR( &xKernelLock ); /* A task being unblocked cannot cause an immediate context switch @@ -3419,7 +3367,8 @@ BaseType_t xTaskIncrementTick( void ) } #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - /* Exit the critical section as we have finished accessing the kernel data structures. */ + /* Release the previously taken kernel lock as we have finished + * accessing the kernel data structures. */ taskEXIT_CRITICAL_ISR( &xKernelLock ); #if ( configUSE_PREEMPTION == 1 ) @@ -3508,26 +3457,18 @@ BaseType_t xTaskIncrementTick( void ) { TCB_t * pxTCB; TaskHookFunction_t xReturn; + UBaseType_t uxSavedInterruptStatus; /* If xTask is NULL then set the calling task's hook. */ pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - #if ( configNUM_CORES > 1 ) - taskENTER_CRITICAL_ISR( &xKernelLock ); - #else - UBaseType_t uxSavedInterruptStatus; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - #endif + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { xReturn = pxTCB->pxTaskTag; } - #if ( configNUM_CORES > 1 ) - taskEXIT_CRITICAL_ISR( &xKernelLock ); - #else - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - #endif + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xReturn; } @@ -3660,14 +3601,14 @@ BaseType_t xTaskIncrementTick( void ) void vTaskSwitchContext( void ) { - #ifdef ESP_PLATFORM + #if ( configNUM_CORES > 1 ) - /* vTaskSwitchContext is called either from: - * - ISR dispatcher when return from an ISR (interrupts will already be disabled) - * - vTaskSuspend() which is not in a critical section - * Therefore, we enter a critical section ISR version to ensure safety */ + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures (unlike single core which calls this + * function with either interrupts disabled or when the scheduler hasn't + * started yet). */ taskENTER_CRITICAL_ISR( &xKernelLock ); - #endif // ESP_PLATFORM + #endif /* ( configNUM_CORES > 1 ) */ if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE ) { @@ -3756,10 +3697,12 @@ void vTaskSwitchContext( void ) #endif // ESP_PLATFORM } - #ifdef ESP_PLATFORM - /* Exit the critical section previously entered */ + #if ( configNUM_CORES > 1 ) + + /* Release the previously taken kernel lock as we have finished + * accessing the kernel data structures. */ taskEXIT_CRITICAL_ISR( &xKernelLock ); - #endif // ESP_PLATFORM + #endif /* ( configNUM_CORES > 1 ) */ } /*-----------------------------------------------------------*/ @@ -3768,8 +3711,12 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, { configASSERT( pxEventList ); - /* Take the kernel lock as we are about to access the task lists. */ - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* In SMP, we need to take the kernel lock as we are about to access the + * task lists. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ @@ -3782,7 +3729,10 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ } /*-----------------------------------------------------------*/ @@ -3792,14 +3742,18 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, { configASSERT( pxEventList ); - /* Take the kernel lock as we are about to access the task lists. */ - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event groups implementation. */ + /* In SMP, the event groups haven't suspended the scheduler at this + * point. We need to take the kernel lock instead as we are about to + * access the task lists. */ + taskENTER_CRITICAL( &xKernelLock ); + #else /* configNUM_CORES > 1 */ - /* Note. We currently don't always suspend the scheduler. Todo: IDF-3755 - * configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 ); */ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event groups implementation. */ + configASSERT( uxSchedulerSuspended[ 0 ] != 0 ); + #endif /* configNUM_CORES > 1 */ /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -3815,7 +3769,10 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ } /*-----------------------------------------------------------*/ @@ -3827,8 +3784,12 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, { configASSERT( pxEventList ); - /* Take the kernel lock as we are about to access the task lists. */ - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* In SMP, we need to take the kernel lock as we are about to access + * the task lists. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ /* This function should not be called by application code hence the * 'Restricted' in its name. It is not part of the public API. It is @@ -3853,7 +3814,10 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ } #endif /* configUSE_TIMERS */ @@ -3865,12 +3829,24 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) BaseType_t xReturn; /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. - * - * However, we still need to take the kernel lock as we are about to access - * kernel data structures. Note that we use the ISR version of the macro as - * this function could be called from an ISR critical section. */ - taskENTER_CRITICAL_ISR( &xKernelLock ); + * called from a critical section within an ISR. */ + + #if ( configNUM_CORES > 1 ) + + /* In SMP, we need to take the kernel lock (even if the caller is + * already in a critical section by taking a different lock) as we are + * about to access the task lists, which are protected by the kernel + * lock. This function can also be called from an ISR context, so we + * need to check whether we are in an ISR.*/ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( &xKernelLock ); + } + else + { + taskENTER_CRITICAL_ISR( &xKernelLock ); + } + #endif /* configNUM_CORES > 1 */ { /* Before taking the kernel lock, another task/ISR could have already * emptied the pxEventList. So we insert a check here to see if @@ -3965,13 +3941,23 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) xReturn = pdFALSE; } } - taskEXIT_CRITICAL_ISR( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( &xKernelLock ); + } + else + { + taskEXIT_CRITICAL_ISR( &xKernelLock ); + } + #endif /* configNUM_CORES > 1 */ return xReturn; } /*-----------------------------------------------------------*/ -#ifdef ESP_PLATFORM +#if ( configNUM_CORES > 1 ) void vTaskTakeKernelLock( void ) { /* We call the tasks.c critical section macro to take xKernelLock */ @@ -3983,7 +3969,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) /* We call the tasks.c critical section macro to release xKernelLock */ taskEXIT_CRITICAL( &xKernelLock ); } -#endif // ESP_PLATFORM +#endif /* configNUM_CORES > 1 */ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) @@ -3991,14 +3977,17 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, TCB_t * pxUnblockedTCB; BaseType_t xCurCoreID = xPortGetCoreID(); - /* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN. - * It is used by the event flags implementation, thus those functions - * should call vTaskTakeKernelLock() before calling this function. */ + #if ( configNUM_CORES > 1 ) + + /* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN. + * It is used by the event flags implementation, thus those functions + * should call vTaskTakeKernelLock() before calling this function. */ + #else /* configNUM_CORES > 1 */ - /* - * Todo: IDF-5785 - * configASSERT( uxSchedulerSuspended[ xCurCoreID ] != pdFALSE ); - */ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event flags implementation. */ + configASSERT( uxSchedulerSuspended != pdFALSE ); + #endif /* configNUM_CORES > 1 */ /* Store the new item value in the event list. */ listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); @@ -4066,18 +4055,19 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) * On a single core configuration, this problem doesn't appear as this function is meant to be called from * a critical section, disabling the (tick) interrupts. */ - #if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) + #if ( configNUM_CORES > 1 ) configASSERT( pxTimeOut ); taskENTER_CRITICAL( &xKernelLock ); - #endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) + #endif /* configNUM_CORES > 1 */ /* For internal use only as it does not use a critical section. */ pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; - #if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ taskEXIT_CRITICAL( &xKernelLock ); - #endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) + #endif /* configNUM_CORES > 1 */ } /*-----------------------------------------------------------*/ @@ -4288,11 +4278,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) { - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Now the scheduler is suspended, the expected idle * time can be sampled again, and this time its value can @@ -4316,11 +4302,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) mtCOVERAGE_TEST_MARKER(); } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } else { @@ -4389,11 +4371,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) { - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we + * another core could also update this task's TLSP at the + * same time. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ + pxTCB = prvGetTCBFromHandle( xTaskToSet ); pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback; - taskEXIT_CRITICAL( &xKernelLock ); + + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ } } @@ -4414,10 +4407,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) { - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we + * another core could also update this task's TLSP at the + * same time. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ + pxTCB = prvGetTCBFromHandle( xTaskToSet ); + configASSERT( pxTCB != NULL ); pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; - taskEXIT_CRITICAL( &xKernelLock ); + + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* configNUM_CORES > 1 */ } } #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 */ @@ -4634,22 +4639,14 @@ static void prvCheckTasksWaitingTermination( void ) * it should be reported as being in the Blocked state. */ if( eState == eSuspended ) { - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &xKernelLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { pxTaskStatus->eCurrentState = eBlocked; } } - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xKernelLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } } #endif /* INCLUDE_vTaskSuspend */ @@ -5006,7 +5003,12 @@ static void prvResetNextTaskUnblockTime( void ) TCB_t * const pxMutexHolderTCB = pxMutexHolder; BaseType_t xReturn = pdFALSE; - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ /* If the mutex was given back by an interrupt while the queue was * locked then the mutex holder might now be NULL. _RB_ Is this still @@ -5085,7 +5087,10 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_ISR( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ return xReturn; } @@ -5100,7 +5105,12 @@ static void prvResetNextTaskUnblockTime( void ) TCB_t * const pxTCB = pxMutexHolder; BaseType_t xReturn = pdFALSE; - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ if( pxMutexHolder != NULL ) { @@ -5169,7 +5179,10 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_ISR( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ return xReturn; } @@ -5186,7 +5199,12 @@ static void prvResetNextTaskUnblockTime( void ) UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; - taskENTER_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ if( pxMutexHolder != NULL ) { @@ -5281,7 +5299,10 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ } #endif /* configUSE_MUTEXES */ @@ -5615,18 +5636,27 @@ static void prvResetNextTaskUnblockTime( void ) TickType_t uxTaskResetEventItemValue( void ) { TickType_t uxReturn; - TCB_t * pxCurTCB; + BaseType_t xCoreID; - taskENTER_CRITICAL( &xKernelLock ); - pxCurTCB = pxCurrentTCB[ xPortGetCoreID() ]; + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here to ensure nothing else + * modifies the task's event item value simultaneously. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ - uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ) ); + xCoreID = xPortGetCoreID(); + + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xEventListItem ) ); /* Reset the event list item to its normal value - so it can be used with * queues and semaphores. */ - listSET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - taskEXIT_CRITICAL( &xKernelLock ); + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_ISR( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ return uxReturn; } @@ -5636,21 +5666,31 @@ TickType_t uxTaskResetEventItemValue( void ) TaskHandle_t pvTaskIncrementMutexHeldCount( void ) { - TCB_t * curTCB; + TCB_t * pxCurTCB; + BaseType_t xCoreID; + + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ + xCoreID = xPortGetCoreID(); /* If xSemaphoreCreateMutex() is called before any tasks have been created * then pxCurrentTCB will be NULL. */ - taskENTER_CRITICAL( &xKernelLock ); - - if( pxCurrentTCB[ xPortGetCoreID() ] != NULL ) + if( pxCurrentTCB[ xCoreID ] != NULL ) { - ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++; + ( pxCurrentTCB[ xCoreID ]->uxMutexesHeld )++; } - curTCB = pxCurrentTCB[ xPortGetCoreID() ]; - taskEXIT_CRITICAL( &xKernelLock ); + pxCurTCB = pxCurrentTCB[ xCoreID ]; + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ - return curTCB; + return pxCurTCB; } #endif /* configUSE_MUTEXES */ @@ -5971,6 +6011,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5995,7 +6036,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - taskENTER_CRITICAL_ISR( &xKernelLock ); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { if( pulPreviousNotificationValue != NULL ) { @@ -6089,7 +6130,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - taskEXIT_CRITICAL_ISR( &xKernelLock ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xReturn; } @@ -6105,7 +6146,7 @@ TickType_t uxTaskResetEventItemValue( void ) { TCB_t * pxTCB; uint8_t ucOriginalNotifyState; - + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -6130,7 +6171,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - taskENTER_CRITICAL_ISR( &xKernelLock ); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -6180,7 +6221,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - taskEXIT_CRITICAL_ISR( &xKernelLock ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -6252,11 +6293,23 @@ TickType_t uxTaskResetEventItemValue( void ) uint32_t ulTaskGetIdleRunTimeCounter( void ) { - taskENTER_CRITICAL( &xKernelLock ); - tskTCB * pxTCB = ( tskTCB * ) xIdleTaskHandle[ xPortGetCoreID() ]; - taskEXIT_CRITICAL( &xKernelLock ); + uint32_t ulRunTimeCounter; + + #if ( configNUM_CORES > 1 ) + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ + + ulRunTimeCounter = xIdleTaskHandle[ xPortGetCoreID() ]->ulRunTimeCounter; + + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); + #endif /* ( configNUM_CORES > 1 ) */ - return pxTCB->ulRunTimeCounter; + return ulRunTimeCounter; } #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ diff --git a/components/freertos/FreeRTOS-Kernel/timers.c b/components/freertos/FreeRTOS-Kernel/timers.c index cce670e801c..448e183bb27 100644 --- a/components/freertos/FreeRTOS-Kernel/timers.c +++ b/components/freertos/FreeRTOS-Kernel/timers.c @@ -606,11 +606,7 @@ TickType_t xTimeNow; BaseType_t xTimerListsWereSwitched; - #ifdef ESP_PLATFORM - taskENTER_CRITICAL( &xTimerLock ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock ); { /* Obtain the time now to make an assessment as to whether the timer * has expired or not. If obtaining the time causes the lists to switch @@ -624,11 +620,7 @@ /* The tick count has not overflowed, has the timer expired? */ if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) { - #ifdef ESP_PLATFORM - taskEXIT_CRITICAL( &xTimerLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ); prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); } else @@ -648,11 +640,7 @@ vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xTimerLock ); - #else - if( xTaskResumeAll() == pdFALSE ) - #endif // ESP_PLATFORM + if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE ) { /* Yield to wait for either a command to arrive, or the * block time to expire. If a command arrived between the @@ -660,22 +648,15 @@ * will not cause the task to block. */ portYIELD_WITHIN_API(); } - - #ifndef ESP_PLATFORM /* IDF-3755 */ - else - { - mtCOVERAGE_TEST_MARKER(); - } - #endif // ESP_PLATFORM + else + { + mtCOVERAGE_TEST_MARKER(); + } } } else { - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &xTimerLock ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ); } } }