diff --git a/components/esp_hw_support/esp_clk.c b/components/esp_hw_support/esp_clk.c index 0fd5deec936..2421196646e 100644 --- a/components/esp_hw_support/esp_clk.c +++ b/components/esp_hw_support/esp_clk.c @@ -8,6 +8,7 @@ #include #include +#include "freertos/FreeRTOS.h" #include "esp_attr.h" #include "soc/rtc.h" #include "soc/soc_caps.h" @@ -46,7 +47,7 @@ extern uint32_t g_ticks_per_us_app; #endif #endif -static _lock_t s_esp_rtc_time_lock; +static portMUX_TYPE s_esp_rtc_time_lock = portMUX_INITIALIZER_UNLOCKED; // TODO: IDF-4239 static RTC_DATA_ATTR uint64_t s_esp_rtc_time_us = 0, s_rtc_last_ticks = 0; @@ -94,7 +95,7 @@ uint64_t esp_rtc_get_time_us(void) //IDF-3901 return 0; #endif - _lock_acquire(&s_esp_rtc_time_lock); + portENTER_CRITICAL_SAFE(&s_esp_rtc_time_lock); const uint32_t cal = esp_clk_slowclk_cal_get(); const uint64_t rtc_this_ticks = rtc_time_get(); const uint64_t ticks = rtc_this_ticks - s_rtc_last_ticks; @@ -115,7 +116,7 @@ uint64_t esp_rtc_get_time_us(void) ((ticks_high * cal) << (32 - RTC_CLK_CAL_FRACT)); s_esp_rtc_time_us += delta_time_us; s_rtc_last_ticks = rtc_this_ticks; - _lock_release(&s_esp_rtc_time_lock); + portEXIT_CRITICAL_SAFE(&s_esp_rtc_time_lock); return s_esp_rtc_time_us; } @@ -143,3 +144,13 @@ uint64_t esp_clk_rtc_time(void) return 0; #endif } + +void esp_clk_private_lock(void) +{ + portENTER_CRITICAL(&s_esp_rtc_time_lock); +} + +void esp_clk_private_unlock(void) +{ + portEXIT_CRITICAL(&s_esp_rtc_time_lock); +} diff --git a/components/esp_hw_support/include/esp_private/esp_clk.h b/components/esp_hw_support/include/esp_private/esp_clk.h index 8462b032eec..01b2849f8a0 100644 --- a/components/esp_hw_support/include/esp_private/esp_clk.h +++ b/components/esp_hw_support/include/esp_private/esp_clk.h @@ -82,6 +82,19 @@ int esp_clk_xtal_freq(void); */ uint64_t esp_clk_rtc_time(void); +/** + * @brief obtain internal critical section used esp_clk implementation. + * + * This is used by the esp_light_sleep_start() to avoid deadlocking when it + * calls esp_clk related API after stalling the other CPU. + */ +void esp_clk_private_lock(void); + +/** + * @brief counterpart of esp_clk_private_lock + */ +void esp_clk_private_unlock(void); + #ifdef __cplusplus } #endif diff --git a/components/esp_hw_support/sleep_modes.c b/components/esp_hw_support/sleep_modes.c index 58caee9b3e3..154b9bb6498 100644 --- a/components/esp_hw_support/sleep_modes.c +++ b/components/esp_hw_support/sleep_modes.c @@ -665,12 +665,29 @@ esp_err_t esp_light_sleep_start(void) s_config.ccount_ticks_record = esp_cpu_get_cycle_count(); static portMUX_TYPE light_sleep_lock = portMUX_INITIALIZER_UNLOCKED; portENTER_CRITICAL(&light_sleep_lock); + /* + Note: We are about to stall the other CPU via the esp_ipc_isr_stall_other_cpu(). However, there is a chance of + deadlock if after stalling the other CPU, we attempt to take spinlocks already held by the other CPU that is. + + Thus any functions that we call after stalling the other CPU will need to have the locks taken first to avoid + deadlock. + + Todo: IDF-5257 + */ + /* We will be calling esp_timer_private_set inside DPORT access critical * section. Make sure the code on the other CPU is not holding esp_timer * lock, otherwise there will be deadlock. */ esp_timer_private_lock(); + /* We will be calling esp_rtc_get_time_us() below. Make sure the code on the other CPU is not holding the + * esp_rtc_get_time_us() lock, otherwise there will be deadlock. esp_rtc_get_time_us() is called via: + * + * - esp_clk_slowclk_cal_set() -> esp_rtc_get_time_us() + */ + esp_clk_private_lock(); + s_config.rtc_ticks_at_sleep_start = rtc_time_get(); uint32_t ccount_at_sleep_start = esp_cpu_get_cycle_count(); uint64_t high_res_time_at_start = esp_timer_get_time(); @@ -793,6 +810,7 @@ esp_err_t esp_light_sleep_start(void) } esp_set_time_from_rtc(); + esp_clk_private_unlock(); esp_timer_private_unlock(); esp_ipc_isr_release_other_cpu(); if (!wdt_was_enabled) { diff --git a/components/esp_system/include/esp_ipc_isr.h b/components/esp_system/include/esp_ipc_isr.h index 22fa5b10b03..70290ba5618 100644 --- a/components/esp_system/include/esp_ipc_isr.h +++ b/components/esp_system/include/esp_ipc_isr.h @@ -62,6 +62,7 @@ void esp_ipc_isr_asm_call_blocking(esp_ipc_isr_func_t func, void* arg); * - If the stall feature is paused using esp_ipc_isr_stall_pause(), this function will have no effect * * @note This function is not available in single-core mode. + * @note It is the caller's responsibility to avoid deadlocking on spinlocks */ void esp_ipc_isr_stall_other_cpu(void); diff --git a/components/esp_timer/include/esp_private/esp_timer_private.h b/components/esp_timer/include/esp_private/esp_timer_private.h index 4744b20a396..37aaeac03f1 100644 --- a/components/esp_timer/include/esp_private/esp_timer_private.h +++ b/components/esp_timer/include/esp_private/esp_timer_private.h @@ -51,11 +51,11 @@ void esp_timer_private_set(uint64_t new_us); void esp_timer_private_advance(int64_t time_diff_us); /** - * @brief obtain internal critical section used esp_timer implementation + * @brief obtain internal critical section used in the esp_timer implementation * This can be used when a sequence of calls to esp_timer has to be made, * and it is necessary that the state of the timer is consistent between * the calls. Should be treated in the same way as a spinlock. - * Call esp_timer_unlock to release the lock + * Call esp_timer_private_unlock to release the lock */ void esp_timer_private_lock(void);