diff --git a/core/include/kernel/refcount.h b/core/include/kernel/refcount.h new file mode 100644 index 00000000000..d2d08957efc --- /dev/null +++ b/core/include/kernel/refcount.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2017, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#ifndef __KERNEL_REFCOUNT_H +#define __KERNEL_REFCOUNT_H + +#include + +/* + * Reference counter + * + * When val is 0, refcount_inc() does not change the value and returns false. + * Otherwise, it increments the value and returns true. + * + * refcount_dec() decrements the value and returns true when the call + * caused the value to become 0, false otherwise. + * + * Since each call to refcount_dec() is supposed to match a call to + * refcount_inc(), refcount_dec() called for val == 0 should never happen. + * + * This behaviour makes this pattern possible: + * if (!refcount_inc(r)) { + * mutex_lock(m); + * // Some other thread may have initialized o by now so check that + * // we still need to initialize o. + * if (!o) { + * o = initialize(); + * refcount_set(r, 1); + * } + * mutex_unlock(m); + * } + * + * or + * if (refcount_dec(r)) { + * mutex_lock(m); + * // Now that we have the mutex o can't be ininialized/uninitialized + * // by any other thread, check that the refcount value is still 0 + * // to guard against the thread above already having reinitialized o + * if (!refcount_val(r) && o) + * uninitialize(o) + * mutex_unlock(m); + * } + * + * where r if the reference counter, o is the object and m the mutex + * protecting the object. + */ + +struct refcount { + unsigned int val; +}; + +/* Increases refcount by 1, return true if val > 0 else false */ +bool refcount_inc(struct refcount *r); +/* Decreases refcount by 1, return true if val == 0 else false */ +bool refcount_dec(struct refcount *r); + +static inline void refcount_set(struct refcount *r, unsigned int val) +{ + atomic_store_uint(&r->val, val); +} + +static inline unsigned int refcount_val(struct refcount *r) +{ + return atomic_load_uint(&r->val); +} + +#endif /*!__KERNEL_REFCOUNT_H*/ diff --git a/core/kernel/refcount.c b/core/kernel/refcount.c new file mode 100644 index 00000000000..8efb21630cc --- /dev/null +++ b/core/kernel/refcount.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2017, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include +#include +#include + +bool refcount_inc(struct refcount *r) +{ + unsigned int nval; + unsigned int oval = atomic_load_uint(&r->val); + + while (true) { + nval = oval + 1; + + /* r->val is 0, we can't do anything more. */ + if (!oval) + return false; + + /* + * Note that atomic_cas_uint() updates oval to the current + * r->val read, regardless of return value. + */ + if (atomic_cas_uint(&r->val, &oval, nval)) + return true; + } +} + +bool refcount_dec(struct refcount *r) +{ + unsigned int nval; + unsigned int oval = atomic_load_uint(&r->val); + + while (true) { + assert(oval); + nval = oval - 1; + + /* + * Note that atomic_cas_uint() updates oval to the current + * r->val read, regardless of return value. + */ + if (atomic_cas_uint(&r->val, &oval, nval)) { + /* + * Value has been updated, if value was set to 0 + * return true to indicate that. + */ + return !nval; + } + } +} diff --git a/core/kernel/sub.mk b/core/kernel/sub.mk index 45f54f39948..41be95ffc8a 100644 --- a/core/kernel/sub.mk +++ b/core/kernel/sub.mk @@ -10,3 +10,4 @@ srcs-y += interrupt.c srcs-$(CFG_CORE_SANITIZE_UNDEFINED) += ubsan.c srcs-$(CFG_CORE_SANITIZE_KADDRESS) += asan.c cflags-remove-asan.c-y += $(cflags_kasan) +srcs-y += refcount.c