Skip to content

Commit

Permalink
core: avoid deadlocks caused by single-instance TA
Browse files Browse the repository at this point in the history
Protect against deadlocks caused by single-instance TAs calling another
single-instance TAs directly or indirectly. When a TA is invoked but
already is busy with another operation the calling thread is suspended
using condvar_wait() until the TA is available again. This is
effectively a lock which can cause a deadlock if several such locks are
used at the same time but in different order.

This patch avoids this problem by only allowing one thread at a time to
set a single-instance TA context busy. If the thread with a
single-instance TA busy in the call stack tries to set an already busy
TA context busy it will return TEE_ERROR_BUSY instead as there is a
recursive loop in how the different TAs has invoked each other.

Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Tested-by: Jens Wiklander <jens.wiklander@linaro.org> (QEMU)
Tested-by: Pascal Brand <pascal.brand@linaro.org> (QEMU - full test suite)
Reviewed-by: Pascal Brand <pascal.brand@linaro.org>
Tested-by: Jerome Forissier <jerome.forissier@linaro.org> (HiKey - full test suite)
  • Loading branch information
jenswi-linaro committed Sep 30, 2015
1 parent a23dc07 commit 2b07dcb
Show file tree
Hide file tree
Showing 3 changed files with 95 additions and 12 deletions.
3 changes: 2 additions & 1 deletion core/arch/arm/include/kernel/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#endif

#define THREAD_ID_0 0
#define THREAD_ID_INVALID -1

#ifndef ASM
extern uint32_t thread_vector_table[];
Expand Down Expand Up @@ -282,7 +283,7 @@ bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
/*
* Returns current thread id.
*/
uint32_t thread_get_id(void);
int thread_get_id(void);

/*
* Set Thread Specific Data (TSD) pointer.
Expand Down
102 changes: 92 additions & 10 deletions core/arch/arm/kernel/tee_ta_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <tee/abi.h>
#include <mm/tee_mmu.h>
#include <kernel/tee_misc.h>
#include <kernel/panic.h>
#include <tee/tee_svc_cryp.h>
#include <tee/tee_cryp_provider.h>
#include <tee/tee_cryp_utl.h>
Expand Down Expand Up @@ -100,6 +101,10 @@ struct param_ta {

/* This mutex protects the critical section in tee_ta_init_session */
static struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
static size_t tee_ta_single_instance_count;

static TEE_Result tee_ta_rpc_free(uint32_t handle);

/*
Expand All @@ -115,21 +120,93 @@ static void set_tee_rs(struct tee_ta_session *tee_rs)
thread_set_tsd(tee_rs);
}

static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
static void lock_single_instance(void)
{
/* Requires tee_ta_mutex to be held */
if (tee_ta_single_instance_thread != thread_get_id()) {
/* Wait until the single-instance lock is available. */
while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
condvar_wait(&tee_ta_cv, &tee_ta_mutex);

tee_ta_single_instance_thread = thread_get_id();
assert(tee_ta_single_instance_count == 0);
}

tee_ta_single_instance_count++;
}

static void unlock_single_instance(void)
{
/* Requires tee_ta_mutex to be held */
assert(tee_ta_single_instance_thread == thread_get_id());
assert(tee_ta_single_instance_count > 0);

tee_ta_single_instance_count--;
if (tee_ta_single_instance_count == 0) {
tee_ta_single_instance_thread = THREAD_ID_INVALID;
condvar_signal(&tee_ta_cv);
}
}

static bool has_single_instance_lock(void)
{
/* Requires tee_ta_mutex to be held */
return tee_ta_single_instance_thread == thread_get_id();
}

static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
{
bool rc = true;

mutex_lock(&tee_ta_mutex);
while (ctx->busy)
condvar_wait(&ctx->busy_cv, &tee_ta_mutex);

if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
lock_single_instance();

if (has_single_instance_lock()) {
if (ctx->busy) {
/*
* We're holding the single-instance lock and the
* TA is busy, as waiting now would only cause a
* dead-lock, we release the lock and return false.
*/
rc = false;
if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
unlock_single_instance();
}
} else {
/*
* We're not holding the single-instance lock, we're free to
* wait for the TA to become available.
*/
while (ctx->busy)
condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
}

/* Either it's already true or we should set it to true */
ctx->busy = true;

mutex_unlock(&tee_ta_mutex);
return rc;
}

static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
{
if (!tee_ta_try_set_busy(ctx))
panic();
}

static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
{
mutex_lock(&tee_ta_mutex);

assert(ctx->busy);
ctx->busy = false;
condvar_signal(&ctx->busy_cv);

if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
unlock_single_instance();

mutex_unlock(&tee_ta_mutex);
}

Expand Down Expand Up @@ -1184,6 +1261,7 @@ TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
struct tee_ta_session *s = NULL;
struct tee_ta_ctx *ctx;
bool panicked;
bool was_busy = false;

res = tee_ta_init_session(err, open_sessions, uuid, &s);
if (res != TEE_SUCCESS) {
Expand Down Expand Up @@ -1219,12 +1297,16 @@ TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
COMMAND_OPEN_SESSION);
}
} else {
tee_ta_set_busy(ctx);
res = tee_user_ta_enter(
err, s,
USER_TA_FUNC_OPEN_CLIENT_SESSION,
cancel_req_to, 0, param);
tee_ta_clear_busy(ctx);
if (tee_ta_try_set_busy(ctx)) {
res = tee_user_ta_enter(err, s,
USER_TA_FUNC_OPEN_CLIENT_SESSION,
cancel_req_to, 0, param);
tee_ta_clear_busy(ctx);
} else {
/* Deadlock avoided */
res = TEE_ERROR_BUSY;
was_busy = true;
}
}
}

Expand All @@ -1237,7 +1319,7 @@ TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
* Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
* apart from panicking.
*/
if (panicked)
if (panicked || was_busy)
*err = TEE_ORIGIN_TEE;
else
*err = TEE_ORIGIN_TRUSTED_APP;
Expand Down
2 changes: 1 addition & 1 deletion core/arch/arm/kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
return true;
}

uint32_t thread_get_id(void)
int thread_get_id(void)
{
/* thread_get_core_local() requires IRQs to be disabled */
uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
Expand Down

0 comments on commit 2b07dcb

Please sign in to comment.