From f11d579407314a3fa624520a92ce24c0af35f620 Mon Sep 17 00:00:00 2001 From: Jerome Forissier Date: Fri, 14 Sep 2018 18:26:07 +0200 Subject: [PATCH] core: introduce lockdep algorithm This commit introduces an algorithm that may be used to detect improper usage of locks at runtime. It can detect two kinds errors: 1. A thread tries to release a lock it does not own, 2. A thread tries to aquire a lock and the operation could *potentially* result in a deadlock. The potential deadlock detection assumes that the code adheres to a strict locking hierarchy, in other word, that there is a partial ordering on the locks so that there can be no situation where circular waits can occur. To put things simply, any two locks should be acquired in the same order in the same thread. This addresses the following case: [Thread #1] [Thread #2] lock(A) lock(B) lock(B) lock(A) <-- deadlock! ... The algorithm builds the lock hierarchy dynamically and reports as soon as a violation is detected. The interface is made of two functions: lockdep_lock_acquire() and lockdep_lock_release(), which are meant to be introduced in the implementation of the actual lock objects. The "acquire" hook tells the algorithm that a particular lock is about to be requested by a particular thread, while the "release" hook is meant to be called before the lock is actually released. If an error is detected, debugging information is sent to the console, and panic() is called. The debugging information includes the lock cycle that was detected (in the above example, {A, B}), as well as the call stacks at the points where the locks were acquired. The good thing with such an instrumentation of the locking code is that there is no need to wait for an actual deadlock to occur in order to detect potential problems. For instance, the timing of execution in the above example could be different but the problem would still be detected: [Thread #1] [Thread #2] lock(A) lock(B) unlock(B) unlock(A) lock(B) lock(A) <-- error! A pseudo-TA is added for testing (pta/core_lockdep_tests.c). This code is based on two sources: - A presentation called "Dl-Check: dynamic potential deadlock detection tool for Java programs" [1], although the somewhat complex MNR algorithm for topological ordering of a DAG was not used; - A depth-first search algorithm [2] was used instead. Link: [1] https://www.slideshare.net/IosifItkin/tmpa2017-dlcheck-dynamic-potential-deadlock-detection-tool-for-java-programs Link: [2] https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search Signed-off-by: Jerome Forissier --- core/arch/arm/pta/core_lockdep_tests.c | 149 ++++++++++ core/arch/arm/pta/core_self_tests.h | 14 + core/arch/arm/pta/pta_invoke_tests.c | 2 + core/arch/arm/pta/sub.mk | 1 + core/include/kernel/lockdep.h | 119 ++++++++ core/kernel/lockdep.c | 397 +++++++++++++++++++++++++ core/kernel/sub.mk | 15 +- lib/libutee/include/pta_invoke_tests.h | 5 + 8 files changed, 695 insertions(+), 7 deletions(-) create mode 100644 core/arch/arm/pta/core_lockdep_tests.c create mode 100644 core/include/kernel/lockdep.h create mode 100644 core/kernel/lockdep.c diff --git a/core/arch/arm/pta/core_lockdep_tests.c b/core/arch/arm/pta/core_lockdep_tests.c new file mode 100644 index 00000000000..99fc3ea43c5 --- /dev/null +++ b/core/arch/arm/pta/core_lockdep_tests.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: BSD-2-Clause +/* + * Copyright (c) 2018, Linaro Limited + */ + +/* + * Test lockdep with hypothetical thread and lock objects + */ + +#include +#include +#include + +#include "core_self_tests.h" + +static int __maybe_unused self_test_lockdep1(void) +{ + TEE_Result res = TEE_ERROR_GENERIC; + struct lockdep_node_head graph; + struct lockdep_lock_head thread1; + int count = 0; + + DMSG(""); + + TAILQ_INIT(&thread1); + TAILQ_INIT(&graph); + + /* Not locked, expect failure */ + res = __lockdep_lock_release(&thread1, 1); + if (!res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread1, 1); + if (res) + return count; + count++; + + res = __lockdep_lock_release(&thread1, 1); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread1, 1); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread1, 3); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread1, 2); + if (res) + return count; + count++; + + res = __lockdep_lock_release(&thread1, 3); + if (res) + return count; + count++; + + /* Already locked */ + res = __lockdep_lock_acquire(&graph, &thread1, 2); + if (!res) + return count; + + lockdep_graph_delete(&graph); + lockdep_queue_delete(&thread1); + + return 0; +} + +static int __maybe_unused self_test_lockdep2(void) +{ + TEE_Result res = TEE_ERROR_GENERIC; + struct lockdep_node_head graph; + struct lockdep_lock_head thread1; + struct lockdep_lock_head thread2; + struct lockdep_lock_head thread3; + int count = 0; + + DMSG(""); + + TAILQ_INIT(&thread1); + TAILQ_INIT(&thread2); + TAILQ_INIT(&thread3); + TAILQ_INIT(&graph); + + res = __lockdep_lock_acquire(&graph, &thread1, 1); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread2, 2); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread1, 2); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread3, 3); + if (res) + return count; + count++; + + res = __lockdep_lock_acquire(&graph, &thread2, 3); + if (res) + return count; + count++; + + /* Deadlock 1-2-3 */ + res = __lockdep_lock_acquire(&graph, &thread3, 1); + if (!res) + return count; + count++; + + lockdep_graph_delete(&graph); + lockdep_queue_delete(&thread1); + lockdep_queue_delete(&thread2); + lockdep_queue_delete(&thread3); + + return 0; +} + +TEE_Result core_lockdep_tests(uint32_t nParamTypes __unused, + TEE_Param pParams[TEE_NUM_PARAMS] __unused) + +{ + int count = 0; + + count = self_test_lockdep1(); + if (count) + goto out; + count = self_test_lockdep2(); + if (count) + goto out; +out: + if (count) { + DMSG("count=%d", count); + return TEE_ERROR_GENERIC; + } + + return TEE_SUCCESS; +} diff --git a/core/arch/arm/pta/core_self_tests.h b/core/arch/arm/pta/core_self_tests.h index c4e1b06e6c3..3f6c9718ffa 100644 --- a/core/arch/arm/pta/core_self_tests.h +++ b/core/arch/arm/pta/core_self_tests.h @@ -5,6 +5,7 @@ #ifndef CORE_SELF_TESTS_H #define CORE_SELF_TESTS_H +#include #include #include @@ -18,4 +19,17 @@ TEE_Result core_fs_htree_tests(uint32_t nParamTypes, TEE_Result core_mutex_tests(uint32_t nParamTypes, TEE_Param pParams[TEE_NUM_PARAMS]); +#ifdef CFG_LOCKDEP +TEE_Result core_lockdep_tests(uint32_t nParamTypes, + TEE_Param pParams[TEE_NUM_PARAMS]); +#else +static inline TEE_Result core_lockdep_tests( + uint32_t nParamTypes __unused, + TEE_Param pParams[TEE_NUM_PARAMS] __unused) +{ + return TEE_ERROR_NOT_SUPPORTED; +} + +#endif + #endif /*CORE_SELF_TESTS_H*/ diff --git a/core/arch/arm/pta/pta_invoke_tests.c b/core/arch/arm/pta/pta_invoke_tests.c index 1706bae40f6..5215ab9f525 100644 --- a/core/arch/arm/pta/pta_invoke_tests.c +++ b/core/arch/arm/pta/pta_invoke_tests.c @@ -408,6 +408,8 @@ static TEE_Result invoke_command(void *pSessionContext __unused, #endif case PTA_INVOKE_TESTS_CMD_MUTEX: return core_mutex_tests(nParamTypes, pParams); + case PTA_INVOKE_TESTS_CMD_LOCKDEP: + return core_lockdep_tests(nParamTypes, pParams); default: break; } diff --git a/core/arch/arm/pta/sub.mk b/core/arch/arm/pta/sub.mk index cce0f9c4404..e7e2427bd06 100644 --- a/core/arch/arm/pta/sub.mk +++ b/core/arch/arm/pta/sub.mk @@ -4,6 +4,7 @@ srcs-y += core_self_tests.c srcs-y += interrupt_tests.c srcs-y += core_mutex_tests.c srcs-$(CFG_WITH_USER_TA) += core_fs_htree_tests.c +srcs-$(CFG_LOCKDEP) += core_lockdep_tests.c endif ifeq ($(CFG_WITH_USER_TA),y) srcs-$(CFG_SECSTOR_TA_MGMT_PTA) += secstor_ta_mgmt.c diff --git a/core/include/kernel/lockdep.h b/core/include/kernel/lockdep.h new file mode 100644 index 00000000000..4f2b440a250 --- /dev/null +++ b/core/include/kernel/lockdep.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (c) 2018, Linaro Limited + */ + +#ifndef __KERNEL_LOCKDEP_H +#define __KERNEL_LOCKDEP_H + +#include +#include +#include +#include +#include +#include + +/* + * Lock graph. If node A has an edge to node B, then A was locked before B in + * the same thread of execution. + */ + +struct lockdep_edge { + struct lockdep_node *to; + uintptr_t thread_id; + vaddr_t *call_stack_from; + vaddr_t *call_stack_to; + STAILQ_ENTRY(lockdep_edge) link; +}; + +STAILQ_HEAD(lockdep_edge_head, lockdep_edge); + +struct lockdep_node { + uintptr_t lock_id; /* For instance, address of actual lock object */ + struct lockdep_edge_head edges; + TAILQ_ENTRY(lockdep_node) link; + uint8_t flags; /* Used temporarily when walking the graph */ +}; + +TAILQ_HEAD(lockdep_node_head, lockdep_node); + +/* Per-thread queue of currently owned locks (point to nodes in the graph) */ + +struct lockdep_lock { + struct lockdep_node *node; + vaddr_t *call_stack; + TAILQ_ENTRY(lockdep_lock) link; +}; + +TAILQ_HEAD(lockdep_lock_head, lockdep_lock); + +#ifdef CFG_LOCKDEP + +/* + * Functions used internally and for testing the algorithm. Actual locking code + * should use the wrappers below (which panic in case of error). + */ +TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph, + struct lockdep_lock_head *owned, + uintptr_t id); +TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, + uintptr_t id); + +/* Delete all elements in @graph */ +void lockdep_graph_delete(struct lockdep_node_head *graph); + +/* Delete all elements in @queue */ +void lockdep_queue_delete(struct lockdep_lock_head *queue); + +/* + * Acquire lock @id, while already holding the locks in @owned. + * @owned represent the caller; there should be one instance per thread of + * execution. @graph is the directed acyclic graph (DAG) to be used for + * potential deadlock detection; use the same @graph for all the locks of the + * same type as lock @id. + * + * This function will panic() if the acquire operation would result in a lock + * hierarchy violation (potential deadlock). + */ +static inline void lockdep_lock_acquire(struct lockdep_node_head *graph, + struct lockdep_lock_head *owned, + uintptr_t id) +{ + TEE_Result res = __lockdep_lock_acquire(graph, owned, id); + + if (res) { + EMSG("lockdep: error %#" PRIx32, res); + panic(); + } +} + +/* + * Release lock @id. The lock is removed from @owned. + * + * This function will panic() if the lock is not held by the caller. + */ +static inline void lockdep_lock_release(struct lockdep_lock_head *owned, + uintptr_t id) +{ + TEE_Result res = __lockdep_lock_release(owned, id); + + if (res) { + EMSG("lockdep: error %#" PRIx32, res); + panic(); + } +} + +#else /* CFG_LOCKDEP */ + +static inline void lockdep_lock_acquire(struct lockdep_node_head *g __unused, + struct lockdep_lock_head *o __unused, + uintptr_t id __unused) +{} + +static inline void lockdep_lock_release(struct lockdep_lock_head *o __unused, + uintptr_t id __unused) +{} + +#endif /* !CFG_LOCKDEP */ + +#endif /* !__KERNEL_LOCKDEP_H */ diff --git a/core/kernel/lockdep.c b/core/kernel/lockdep.c new file mode 100644 index 00000000000..81c562e8474 --- /dev/null +++ b/core/kernel/lockdep.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: BSD-2-Clause +/* + * Copyright (c) 2018, Linaro Limited + */ + +#include +#include +#include +#include +#include +#include +#include + +/* lockdep_node::flags values */ +/* Flags used for depth-first topological sorting */ +#define LOCKDEP_NODE_TEMP_MARK BIT(0) +#define LOCKDEP_NODE_PERM_MARK BIT(1) +/* Flag used during breadth-first search (print shortest cycle) */ +#define LOCKDEP_NODE_BFS_VISITED BIT(2) + +/* Find node in graph or add it */ +static struct lockdep_node *lockdep_add_to_graph( + struct lockdep_node_head *graph, + uintptr_t lock_id) +{ + struct lockdep_node *node = NULL; + + TAILQ_FOREACH(node, graph, link) + if (node->lock_id == lock_id) + return node; + + node = calloc(1, sizeof(*node)); + if (!node) + return NULL; + + node->lock_id = lock_id; + STAILQ_INIT(&node->edges); + TAILQ_INSERT_TAIL(graph, node, link); + + return node; +} + +static vaddr_t *dup_call_stack(vaddr_t *stack) +{ + int n = 0; + vaddr_t *nstack = NULL; + + if (!stack) + return NULL; + while (stack[n]) + n++; + nstack = malloc(n * sizeof(vaddr_t)); + if (!nstack) + return NULL; + memcpy(nstack, stack, n * sizeof(vaddr_t)); + return nstack; +} + +static void lockdep_print_call_stack(vaddr_t *stack) +{ + vaddr_t *p = NULL; + + EMSG_RAW("Call stack:"); + for (p = stack; p && *p; p++) + EMSG_RAW(" %#" PRIxPTR, *p); +} + +static TEE_Result lockdep_add_edge(struct lockdep_node *from, + struct lockdep_node *to, + vaddr_t *call_stack_from, + vaddr_t *call_stack_to, + uintptr_t thread_id) +{ + struct lockdep_edge *edge = NULL; + + STAILQ_FOREACH(edge, &from->edges, link) + if (edge->to == to) + return TEE_SUCCESS; + + edge = calloc(1, sizeof(*edge)); + if (!edge) + return TEE_ERROR_OUT_OF_MEMORY; + edge->to = to; + edge->call_stack_from = dup_call_stack(call_stack_from); + edge->call_stack_to = dup_call_stack(call_stack_to); + edge->thread_id = thread_id; + STAILQ_INSERT_TAIL(&from->edges, edge, link); + + return TEE_SUCCESS; +} + +struct lockdep_bfs { + struct lockdep_node *node; + uintptr_t *path; + int pathlen; + TAILQ_ENTRY(lockdep_bfs) link; +}; + +TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs); + +static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue) +{ + struct lockdep_bfs *cur = NULL; + struct lockdep_bfs *next = NULL; + + TAILQ_FOREACH_SAFE(cur, queue, link, next) { + TAILQ_REMOVE(queue, cur, link); + free(cur->path); + free(cur); + } +} + +/* + * Print shortest cycle in @graph that contains @node. + * This function performs an iterative breadth-first search starting from @node, + * and stops when it reaches @node again. In each node we're tracking the path + * from the start node. + */ +static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node) +{ + struct lockdep_bfs_head queue; + struct lockdep_bfs *qe = NULL; + uintptr_t *ret = NULL; + + TAILQ_INIT(&queue); + node->flags |= LOCKDEP_NODE_BFS_VISITED; + + qe = calloc(1, sizeof(*qe)); + if (!qe) + goto out; + qe->node = node; + qe->path = malloc(sizeof(uintptr_t)); + if (!qe->path) + goto out; + qe->path[0] = node->lock_id; + qe->pathlen = 1; + TAILQ_INSERT_TAIL(&queue, qe, link); + + while (!TAILQ_EMPTY(&queue)) { + qe = TAILQ_FIRST(&queue); + + struct lockdep_node *n = qe->node; + + TAILQ_REMOVE(&queue, qe, link); + + struct lockdep_edge *e = NULL; + + STAILQ_FOREACH(e, &n->edges, link) { + if (e->to->lock_id == node->lock_id) { + uintptr_t *tmp = NULL; + size_t nlen = qe->pathlen + 1; + + /* + * Cycle found. Terminate cycle path with NULL + * and return it. + */ + tmp = realloc(qe->path, + nlen * sizeof(uintptr_t)); + if (!tmp) + goto out; + qe->path = tmp; + qe->path[nlen - 1] = 0; + ret = qe->path; + goto out; + } + + if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) { + e->to->flags |= LOCKDEP_NODE_BFS_VISITED; + + size_t nlen = qe->pathlen + 1; + struct lockdep_bfs *nqe = calloc(1, + sizeof(*nqe)); + if (!nqe) + goto out; + nqe->node = e->to; + nqe->path = malloc(nlen * sizeof(uintptr_t)); + if (!nqe->path) + goto out; + nqe->pathlen = nlen; + memcpy(nqe->path, qe->path, + qe->pathlen * sizeof(uintptr_t)); + nqe->path[nlen - 1] = e->to->lock_id; + TAILQ_INSERT_TAIL(&queue, nqe, link); + } + } + free(qe->path); + free(qe); + qe = NULL; + } + +out: + free(qe); + lockdep_bfs_queue_delete(&queue); + return ret; +} + +static TEE_Result lockdep_visit(struct lockdep_node *node) +{ + if (node->flags & LOCKDEP_NODE_PERM_MARK) + return TEE_SUCCESS; + + if (node->flags & LOCKDEP_NODE_TEMP_MARK) + return TEE_ERROR_BAD_STATE; /* Not a DAG! */ + + node->flags |= LOCKDEP_NODE_TEMP_MARK; + + struct lockdep_edge *e; + + STAILQ_FOREACH(e, &node->edges, link) { + TEE_Result res = lockdep_visit(e->to); + + if (res) + return res; + } + + node->flags |= LOCKDEP_NODE_PERM_MARK; + return TEE_SUCCESS; +} + +static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph) +{ + struct lockdep_node *node = NULL; + + TAILQ_FOREACH(node, graph, link) { + if (!node->flags) { + /* Unmarked node */ + TEE_Result res = lockdep_visit(node); + + if (res) + return res; + } + } + + TAILQ_FOREACH(node, graph, link) + node->flags = 0; + + return TEE_SUCCESS; +} + +static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph, + uintptr_t from, uintptr_t to) +{ + struct lockdep_node *node = NULL; + struct lockdep_edge *edge = NULL; + + TAILQ_FOREACH(node, graph, link) + if (node->lock_id == from) + STAILQ_FOREACH(edge, &node->edges, link) + if (edge->to->lock_id == to) + return edge; + return NULL; +} + +static void lockdep_print_edge_info(uintptr_t from __maybe_unused, + struct lockdep_edge *edge) +{ + uintptr_t __maybe_unused to = edge->to->lock_id; + + EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR " at:", + edge->thread_id, to); + lockdep_print_call_stack(edge->call_stack_to); + EMSG_RAW("...while holding lock %#" PRIxPTR " acquired at:", + from); + lockdep_print_call_stack(edge->call_stack_from); +} + +/* + * Find cycle containing @node in the lock graph, then print full debug + * information about each edge (thread that acquired the locks and call stacks) + */ +static void lockdep_print_cycle_info(struct lockdep_node_head *graph, + struct lockdep_node *node) +{ + struct lockdep_edge *edge = NULL; + uintptr_t *cycle = NULL; + uintptr_t *p = NULL; + uintptr_t from = 0; + uintptr_t to = 0; + + cycle = lockdep_graph_get_shortest_cycle(node); + assert(cycle && cycle[0]); + EMSG_RAW("-> Shortest cycle:"); + for (p = cycle; *p; p++) + EMSG_RAW(" Lock %#" PRIxPTR, *p); + for (p = cycle; ; p++) { + if (!*p) { + assert(p != cycle); + from = to; + to = cycle[0]; + edge = lockdep_find_edge(graph, from, to); + lockdep_print_edge_info(from, edge); + break; + } + if (p != cycle) + from = to; + to = *p; + if (p != cycle) { + edge = lockdep_find_edge(graph, from, to); + lockdep_print_edge_info(from, edge); + } + } + free(cycle); +} + +TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph, + struct lockdep_lock_head *owned, + uintptr_t id) +{ + vaddr_t *acq_stack = get_kernel_stack(); + struct lockdep_node *node = lockdep_add_to_graph(graph, id); + + if (!node) + return TEE_ERROR_OUT_OF_MEMORY; + + struct lockdep_lock *lock = NULL; + + TAILQ_FOREACH(lock, owned, link) { + TEE_Result res = lockdep_add_edge(lock->node, node, + lock->call_stack, + acq_stack, + (uintptr_t)owned); + + if (res) + return res; + } + + TEE_Result res = lockdep_graph_sort(graph); + + if (res) { + EMSG_RAW("Potential deadlock detected!"); + EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id); + lockdep_print_cycle_info(graph, node); + return res; + } + + lock = calloc(1, sizeof(*lock)); + if (!lock) + return TEE_ERROR_OUT_OF_MEMORY; + + lock->node = node; + lock->call_stack = acq_stack; + TAILQ_INSERT_TAIL(owned, lock, link); + + return TEE_SUCCESS; +} + +TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id) +{ + struct lockdep_lock *lock = NULL; + + TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) { + if (lock->node->lock_id == id) { + TAILQ_REMOVE(owned, lock, link); + free(lock->call_stack); + free(lock); + return TEE_SUCCESS; + } + } + + EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id); + return TEE_ERROR_ITEM_NOT_FOUND; +} + +static void lockdep_node_delete(struct lockdep_node *node) +{ + struct lockdep_edge *edge = NULL; + struct lockdep_edge *next = NULL; + + STAILQ_FOREACH_SAFE(edge, &node->edges, link, next) { + free(edge->call_stack_from); + free(edge->call_stack_to); + free(edge); + } + free(node); +} + +void lockdep_graph_delete(struct lockdep_node_head *graph) +{ + struct lockdep_node *node = NULL; + struct lockdep_node *next = NULL; + + TAILQ_FOREACH_SAFE(node, graph, link, next) { + TAILQ_REMOVE(graph, node, link); + lockdep_node_delete(node); + } +} + +void lockdep_queue_delete(struct lockdep_lock_head *owned) +{ + struct lockdep_lock *lock = NULL; + struct lockdep_lock *next = NULL; + + TAILQ_FOREACH_SAFE(lock, owned, link, next) { + TAILQ_REMOVE(owned, lock, link); + free(lock); + } +} diff --git a/core/kernel/sub.mk b/core/kernel/sub.mk index 41be95ffc8a..5124d9682d1 100644 --- a/core/kernel/sub.mk +++ b/core/kernel/sub.mk @@ -1,13 +1,14 @@ +srcs-$(CFG_CORE_SANITIZE_KADDRESS) += asan.c +cflags-remove-asan.c-y += $(cflags_kasan) srcs-y += assert.c srcs-y += console.c srcs-$(CFG_DT) += dt.c -srcs-y += msg_param.c -srcs-y += tee_ta_manager.c -srcs-y += tee_misc.c -srcs-y += panic.c srcs-y += handle.c srcs-y += interrupt.c -srcs-$(CFG_CORE_SANITIZE_UNDEFINED) += ubsan.c -srcs-$(CFG_CORE_SANITIZE_KADDRESS) += asan.c -cflags-remove-asan.c-y += $(cflags_kasan) +srcs-$(CFG_LOCKDEP) += lockdep.c +srcs-y += msg_param.c +srcs-y += panic.c srcs-y += refcount.c +srcs-y += tee_misc.c +srcs-y += tee_ta_manager.c +srcs-$(CFG_CORE_SANITIZE_UNDEFINED) += ubsan.c diff --git a/lib/libutee/include/pta_invoke_tests.h b/lib/libutee/include/pta_invoke_tests.h index 234a6bc709f..9653bd9b4cd 100644 --- a/lib/libutee/include/pta_invoke_tests.h +++ b/lib/libutee/include/pta_invoke_tests.h @@ -68,5 +68,10 @@ #define PTA_MUTEX_TEST_READER 1 #define PTA_INVOKE_TESTS_CMD_MUTEX 7 +/* + * Tests lock dependency checking algorithm + */ +#define PTA_INVOKE_TESTS_CMD_LOCKDEP 8 + #endif /*__PTA_INVOKE_TESTS_H*/