Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core: recursive mutex implementation #5731

Merged
merged 3 commits into from
Feb 21, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 110 additions & 0 deletions core/include/rmutex.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/

/**
* @ingroup core_sync Synchronization
* @brief Recursive Mutex for thread synchronization
* @{
*
* @file
* @brief RIOT synchronization API
*
* @author Martin Elshuber <martin.elshuber@theobroma-systems.com>
*
*/

#ifndef RMUTEX_H_
#define RMUTEX_H_

#include <stdatomic.h>

#include "mutex.h"
#include "kernel_types.h"

#ifdef __cplusplus
extern "C" {
#endif

/**
* @brief Mutex structure. Must never be modified by the user.
*/
typedef struct rmutex_t {
/* fields are managed by mutex functions, don't touch */
/**
* @brief The mutex used for locking. **Must never be changed by
* the user.**
* @internal
*/
mutex_t mutex;

/**
* @brief Number of locks owned by the thread owner
* @internal
*/
uint16_t refcount;

/**
* @brief Owner thread of the mutex.
* @details Owner is written by the mutex holder, and read
* concurrently to ensure consistency,
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
* @internal
*/
atomic_int_least16_t owner;
} rmutex_t;

/**
* @brief Static initializer for rmutex_t.
* @details This initializer is preferable to rmutex_init().
*/
#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) }

/**
* @brief Initializes a recursive mutex object.
* @details For initialization of variables use RMUTEX_INIT instead.
* Only use the function call for dynamically allocated mutexes.
* @param[out] rmutex pre-allocated mutex structure, must not be NULL.
*/
static inline void rmutex_init(rmutex_t *rmutex)
{
rmutex_t empty_rmutex = RMUTEX_INIT;
*rmutex = empty_rmutex;
}

/**
* @brief Tries to get a recursive mutex, non-blocking.
*
* @param[in] rmutex Recursive mutex object to lock. Has to be
* initialized first. Must not be NULL.
*
* @return 1 if mutex was unlocked, now it is locked.
* @return 0 if the mutex was locked.
*/
int rmutex_trylock(rmutex_t *rmutex);

/**
* @brief Locks a recursive mutex, blocking.
*
* @param[in] rmutex Recursive mutex object to lock. Has to be
* initialized first. Must not be NULL.
*/
void rmutex_lock(rmutex_t *rmutex);

/**
* @brief Unlocks the recursive mutex.
*
* @param[in] rmutex Recursive mutex object to unlock, must not be NULL.
*/
void rmutex_unlock(rmutex_t *rmutex);

#ifdef __cplusplus
}
#endif

#endif /* RMUTEX_H_ */
/** @} */
150 changes: 150 additions & 0 deletions core/rmutex.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/

/**
* @ingroup core_sync Synchronization
* @brief Recursive Mutex for thread synchronization
* @{
*
* @file
* @brief RIOT synchronization API
*
* @author Martin Elshuber <martin.elshuber@theobroma-systems.com>
*
* The recursive mutex implementation is inspired by the implementetaion of
* Nick v. IJzendoorn <nijzendoorn@engineering-spirit.nl>
* @see https://github.com/RIOT-OS/RIOT/pull/4529/files#diff-8f48e1b9ed7a0a48d0c686a87cc5084eR35
*
*/

#include <stdio.h>
#include <inttypes.h>

#include "rmutex.h"
#include "thread.h"
#include "assert.h"

#define ENABLE_DEBUG (0)
#include "debug.h"

static int _lock(rmutex_t *rmutex, int trylock)
{
kernel_pid_t owner;

/* try to lock the mutex */
DEBUG("rmutex %" PRIi16" : trylock\n", thread_getpid());
if (mutex_trylock(&rmutex->mutex) == 0) {
DEBUG("rmutex %" PRIi16" : mutex already held\n", thread_getpid());
/* Mutex is already held
*
* Case 1: Mutex is not held by me
* Condition 1: holds
* rmutex->owner != thread_getpid()
*
* Note for Case 1:
*
* As a consequence it is necessaray to call
* mutex_lock(). However the read access to owner is not
* locked, and owner can be changed by a thread that is
* holding the lock (e.g.: holder unlocks the mutex, new
* holder aquired the lock). The atomic access strategy
* 'relaxed' ensures, that the value of rmutex->owner is read
* consistent.
*
* It is not necessary to synchronize (make written values
* visible) read/write with other threads, because every
* write by other threads let evaluate Condition 1 to
* false. They all write either KERNEL_PID_UNDEF or the
* pid of the other thread.
*
* Other threads never set rmutex->owner to the pid of the
* current thread. Hence, it is guaranteed that mutex_lock
* is eventually called.
*
* Case 2: Mutex is held be me (relock)
* Condition 2: holds
* rmutex->owner == thread_getpid()
*
* Note for Case 2:
*
* Because the mutex rmutex->owner is only written be the
* owner (me), rmutex->owner stays constant througout the
* complete call and rmutex->refcount is protected
* (read/write) by the mutex.
*/

/* ensure that owner is read atomically, since I need a consistent value */
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
DEBUG("rmutex %" PRIi16" : mutex held by %" PRIi16" \n", thread_getpid(), owner);

/* Case 1: Mutex is not held by me */
if (owner != thread_getpid()) {
/* wait for the mutex */
DEBUG("rmutex %" PRIi16" : locking mutex\n", thread_getpid());

if (trylock) {
return 0;
}
else {
mutex_lock(&rmutex->mutex);
}
}
/* Case 2: Mutex is held be me (relock) */
/* Note: There is nothing to do for Case 2; refcount is incremented below */
}

DEBUG("rmutex %" PRIi16" : I am now holding the mutex\n", thread_getpid());

/* I am holding the recursive mutex */
DEBUG("rmutex %" PRIi16" : settting the owner\n", thread_getpid());

/* ensure that owner is written atomically, since others need a consistent value */
atomic_store_explicit(&rmutex->owner, thread_getpid(), memory_order_relaxed);

DEBUG("rmutex %" PRIi16" : increasing refs\n", thread_getpid());

/* increase the refcount */
rmutex->refcount++;

return 1;
}

void rmutex_lock(rmutex_t *rmutex)
{
_lock(rmutex, 0);
}

int rmutex_trylock(rmutex_t *rmutex)
{
return _lock(rmutex, 1);
}

void rmutex_unlock(rmutex_t *rmutex)
{
assert(atomic_load_explicit(&rmutex->owner,memory_order_relaxed) == thread_getpid());
assert(rmutex->refcount > 0);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for the late commenting, but I just stumbled over this:

This means calling rmutex_unlock() from as non-owner or with refcount==0 is undefined? Wouldn't it be safer to handle those cases somehow sane?

E.g., after calling rmutex_unlock(), the mutex is guarantueed to be free. That would mean, if non-owner calls, it needs to lock first. If the owner calls but refcount==0, the function just returns.

What do you think?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just checked, C11 rmutexes have "The behavior is undefined if the mutex is not locked by the calling thread.", so I guess we can go with that. So let's postpone this discussion.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think calling unlock on without owning the lock is an error in the application, iff doing so the behaviour can be left undefined. We could argue to add a comment in the api description


DEBUG("rmutex %" PRIi16" : decrementing refs refs\n", thread_getpid());

/* decrement refcount */
rmutex->refcount--;

/* check if I should still hold the mutex */
if (rmutex->refcount == 0) {
/* if not release the mutex */

DEBUG("rmutex %" PRIi16" : resetting owner\n", thread_getpid());

/* ensure that owner is written only once */
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, memory_order_relaxed);

DEBUG("rmutex %" PRIi16" : releasing mutex\n", thread_getpid());

mutex_unlock(&rmutex->mutex);
}
}
6 changes: 6 additions & 0 deletions tests/rmutex/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
APPLICATION = rmutex
include ../Makefile.tests_common

BOARD_INSUFFICIENT_MEMORY := stm32f0discovery weio nucleo-f030 nucleo32-f042

include $(RIOTBASE)/Makefile.include
86 changes: 86 additions & 0 deletions tests/rmutex/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
Expected result
===============

When successful, you should see 5 different threads printing their
PID, priority and recursion depth. The thread with the lowest priority
should be able to lock (and unlock) the mutex first, followed by the
other threads in the order of their priority (highest next). If two
threads have the same priority the lower thread id should acquire the
lock. The output should look like the following:

```
main(): This is RIOT! (Version: xxx)
Recursive Mutex test
Please refer to the README.md for more information

Recursive Mutex test
Please refer to the README.md for more information

T3 (prio 6, depth 0): trying to lock rmutex now
T4 (prio 4, depth 0): trying to lock rmutex now
T5 (prio 5, depth 0): trying to lock rmutex now
T6 (prio 2, depth 0): trying to lock rmutex now
T7 (prio 3, depth 0): trying to lock rmutex now
main: unlocking recursive mutex
T6 (prio 2, depth 0): locked rmutex now
T6 (prio 2, depth 1): trying to lock rmutex now
T6 (prio 2, depth 1): locked rmutex now
T6 (prio 2, depth 2): trying to lock rmutex now
T6 (prio 2, depth 2): locked rmutex now
T6 (prio 2, depth 3): trying to lock rmutex now
T6 (prio 2, depth 3): locked rmutex now
T6 (prio 2, depth 3): unlocked rmutex
T6 (prio 2, depth 2): unlocked rmutex
T6 (prio 2, depth 1): unlocked rmutex
T6 (prio 2, depth 0): unlocked rmutex
T7 (prio 3, depth 0): locked rmutex now
T7 (prio 3, depth 1): trying to lock rmutex now
T7 (prio 3, depth 1): locked rmutex now
T7 (prio 3, depth 2): trying to lock rmutex now
T7 (prio 3, depth 2): locked rmutex now
T7 (prio 3, depth 3): trying to lock rmutex now
T7 (prio 3, depth 3): locked rmutex now
T7 (prio 3, depth 4): trying to lock rmutex now
T7 (prio 3, depth 4): locked rmutex now
T7 (prio 3, depth 4): unlocked rmutex
T7 (prio 3, depth 3): unlocked rmutex
T7 (prio 3, depth 2): unlocked rmutex
T7 (prio 3, depth 1): unlocked rmutex
T7 (prio 3, depth 0): unlocked rmutex
T4 (prio 4, depth 0): locked rmutex now
T4 (prio 4, depth 1): trying to lock rmutex now
T4 (prio 4, depth 1): locked rmutex now
T4 (prio 4, depth 2): trying to lock rmutex now
T4 (prio 4, depth 2): locked rmutex now
T4 (prio 4, depth 2): unlocked rmutex
T4 (prio 4, depth 1): unlocked rmutex
T4 (prio 4, depth 0): unlocked rmutex
T5 (prio 5, depth 0): locked rmutex now
T5 (prio 5, depth 1): trying to lock rmutex now
T5 (prio 5, depth 1): locked rmutex now
T5 (prio 5, depth 2): trying to lock rmutex now
T5 (prio 5, depth 2): locked rmutex now
T5 (prio 5, depth 2): unlocked rmutex
T5 (prio 5, depth 1): unlocked rmutex
T5 (prio 5, depth 0): unlocked rmutex
T3 (prio 6, depth 0): locked rmutex now
T3 (prio 6, depth 1): trying to lock rmutex now
T3 (prio 6, depth 1): locked rmutex now
T3 (prio 6, depth 2): trying to lock rmutex now
T3 (prio 6, depth 2): locked rmutex now
T3 (prio 6, depth 3): trying to lock rmutex now
T3 (prio 6, depth 3): locked rmutex now
T3 (prio 6, depth 4): trying to lock rmutex now
T3 (prio 6, depth 4): locked rmutex now
T3 (prio 6, depth 4): unlocked rmutex
T3 (prio 6, depth 3): unlocked rmutex
T3 (prio 6, depth 2): unlocked rmutex
T3 (prio 6, depth 1): unlocked rmutex
T3 (prio 6, depth 0): unlocked rmutex

Test END, check the order of priorities above.
```

Background
==========
This test application stresses a mutex with a number of threads waiting on it.
Loading