From 2b07dcb97c5eeb80645061e33b45f92c56dc91ca Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Wed, 16 Sep 2015 10:24:20 +0200 Subject: [PATCH] core: avoid deadlocks caused by single-instance TA Protect against deadlocks caused by single-instance TAs calling another single-instance TAs directly or indirectly. When a TA is invoked but already is busy with another operation the calling thread is suspended using condvar_wait() until the TA is available again. This is effectively a lock which can cause a deadlock if several such locks are used at the same time but in different order. This patch avoids this problem by only allowing one thread at a time to set a single-instance TA context busy. If the thread with a single-instance TA busy in the call stack tries to set an already busy TA context busy it will return TEE_ERROR_BUSY instead as there is a recursive loop in how the different TAs has invoked each other. Signed-off-by: Jens Wiklander Tested-by: Jens Wiklander (QEMU) Tested-by: Pascal Brand (QEMU - full test suite) Reviewed-by: Pascal Brand Tested-by: Jerome Forissier (HiKey - full test suite) --- core/arch/arm/include/kernel/thread.h | 3 +- core/arch/arm/kernel/tee_ta_manager.c | 102 +++++++++++++++++++++++--- core/arch/arm/kernel/thread.c | 2 +- 3 files changed, 95 insertions(+), 12 deletions(-) diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h index c4291764443..40bccf83e96 100644 --- a/core/arch/arm/include/kernel/thread.h +++ b/core/arch/arm/include/kernel/thread.h @@ -36,6 +36,7 @@ #endif #define THREAD_ID_0 0 +#define THREAD_ID_INVALID -1 #ifndef ASM extern uint32_t thread_vector_table[]; @@ -282,7 +283,7 @@ bool thread_init_stack(uint32_t stack_id, vaddr_t sp); /* * Returns current thread id. */ -uint32_t thread_get_id(void); +int thread_get_id(void); /* * Set Thread Specific Data (TSD) pointer. diff --git a/core/arch/arm/kernel/tee_ta_manager.c b/core/arch/arm/kernel/tee_ta_manager.c index 7c32900b67a..a27c22ac28e 100644 --- a/core/arch/arm/kernel/tee_ta_manager.c +++ b/core/arch/arm/kernel/tee_ta_manager.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -100,6 +101,10 @@ struct param_ta { /* This mutex protects the critical section in tee_ta_init_session */ static struct mutex tee_ta_mutex = MUTEX_INITIALIZER; +static struct condvar tee_ta_cv = CONDVAR_INITIALIZER; +static int tee_ta_single_instance_thread = THREAD_ID_INVALID; +static size_t tee_ta_single_instance_count; + static TEE_Result tee_ta_rpc_free(uint32_t handle); /* @@ -115,21 +120,93 @@ static void set_tee_rs(struct tee_ta_session *tee_rs) thread_set_tsd(tee_rs); } -static void tee_ta_set_busy(struct tee_ta_ctx *ctx) +static void lock_single_instance(void) { + /* Requires tee_ta_mutex to be held */ + if (tee_ta_single_instance_thread != thread_get_id()) { + /* Wait until the single-instance lock is available. */ + while (tee_ta_single_instance_thread != THREAD_ID_INVALID) + condvar_wait(&tee_ta_cv, &tee_ta_mutex); + + tee_ta_single_instance_thread = thread_get_id(); + assert(tee_ta_single_instance_count == 0); + } + + tee_ta_single_instance_count++; +} + +static void unlock_single_instance(void) +{ + /* Requires tee_ta_mutex to be held */ + assert(tee_ta_single_instance_thread == thread_get_id()); + assert(tee_ta_single_instance_count > 0); + + tee_ta_single_instance_count--; + if (tee_ta_single_instance_count == 0) { + tee_ta_single_instance_thread = THREAD_ID_INVALID; + condvar_signal(&tee_ta_cv); + } +} + +static bool has_single_instance_lock(void) +{ + /* Requires tee_ta_mutex to be held */ + return tee_ta_single_instance_thread == thread_get_id(); +} + +static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx) +{ + bool rc = true; + mutex_lock(&tee_ta_mutex); - while (ctx->busy) - condvar_wait(&ctx->busy_cv, &tee_ta_mutex); + + if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) + lock_single_instance(); + + if (has_single_instance_lock()) { + if (ctx->busy) { + /* + * We're holding the single-instance lock and the + * TA is busy, as waiting now would only cause a + * dead-lock, we release the lock and return false. + */ + rc = false; + if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) + unlock_single_instance(); + } + } else { + /* + * We're not holding the single-instance lock, we're free to + * wait for the TA to become available. + */ + while (ctx->busy) + condvar_wait(&ctx->busy_cv, &tee_ta_mutex); + } + + /* Either it's already true or we should set it to true */ ctx->busy = true; + mutex_unlock(&tee_ta_mutex); + return rc; +} + +static void tee_ta_set_busy(struct tee_ta_ctx *ctx) +{ + if (!tee_ta_try_set_busy(ctx)) + panic(); } static void tee_ta_clear_busy(struct tee_ta_ctx *ctx) { mutex_lock(&tee_ta_mutex); + assert(ctx->busy); ctx->busy = false; condvar_signal(&ctx->busy_cv); + + if (ctx->flags & TA_FLAG_SINGLE_INSTANCE) + unlock_single_instance(); + mutex_unlock(&tee_ta_mutex); } @@ -1184,6 +1261,7 @@ TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, struct tee_ta_session *s = NULL; struct tee_ta_ctx *ctx; bool panicked; + bool was_busy = false; res = tee_ta_init_session(err, open_sessions, uuid, &s); if (res != TEE_SUCCESS) { @@ -1219,12 +1297,16 @@ TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, COMMAND_OPEN_SESSION); } } else { - tee_ta_set_busy(ctx); - res = tee_user_ta_enter( - err, s, - USER_TA_FUNC_OPEN_CLIENT_SESSION, - cancel_req_to, 0, param); - tee_ta_clear_busy(ctx); + if (tee_ta_try_set_busy(ctx)) { + res = tee_user_ta_enter(err, s, + USER_TA_FUNC_OPEN_CLIENT_SESSION, + cancel_req_to, 0, param); + tee_ta_clear_busy(ctx); + } else { + /* Deadlock avoided */ + res = TEE_ERROR_BUSY; + was_busy = true; + } } } @@ -1237,7 +1319,7 @@ TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err, * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error, * apart from panicking. */ - if (panicked) + if (panicked || was_busy) *err = TEE_ORIGIN_TEE; else *err = TEE_ORIGIN_TRUSTED_APP; diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c index f4d51e7fda0..006884202bf 100644 --- a/core/arch/arm/kernel/thread.c +++ b/core/arch/arm/kernel/thread.c @@ -686,7 +686,7 @@ bool thread_init_stack(uint32_t thread_id, vaddr_t sp) return true; } -uint32_t thread_get_id(void) +int thread_get_id(void) { /* thread_get_core_local() requires IRQs to be disabled */ uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);