Skip to content

Commit

Permalink
pthread: facilitate dynamically allocated thread stacks
Browse files Browse the repository at this point in the history
This change allows users to call pthread_create(3) with
the pthread_attr_t argument equal to NULL, or with the
pthread_attr_t argument specifying a NULL stack but a
custom stack size.

If either of the above to requirements are met, then
a stack will be heap-allocated internally and
freed again after the thread has terminated.

This makes the Zephyr implementation of pthread_create(3)
more compliant with the normative spec.

Fixes zephyrproject-rtos#25973

Signed-off-by: Christopher Friedt <chrisfriedt@gmail.com>
  • Loading branch information
cfriedt committed Feb 25, 2021
1 parent b5a3fe1 commit f071306
Show file tree
Hide file tree
Showing 4 changed files with 131 additions and 40 deletions.
38 changes: 14 additions & 24 deletions include/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -234,20 +234,17 @@ extern void k_thread_foreach_unlocked(
#define K_INHERIT_PERMS (BIT(3))

/**
* @brief Callback item state
* @brief dynamically allocated stack
*
* @details
* This is a single bit of state reserved for "callback manager"
* utilities (p4wq initially) who need to track operations invoked
* from within a user-provided callback they have been invoked.
* Effectively it serves as a tiny bit of zero-overhead TLS data.
* This flag indicates that a thread stack has been heap-allocated with
* @ref k_malloc.
*/
#define K_CALLBACK_STATE (BIT(4))
#define K_STACK_ON_HEAP (BIT(4))

#ifdef CONFIG_X86
/* x86 Bitmask definitions for threads user options */

#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_SSE)
/* thread uses SSEx (and also FP) registers */
#define K_SSE_REGS (BIT(7))
#endif
Expand Down Expand Up @@ -420,9 +417,7 @@ void k_thread_system_pool_assign(struct k_thread *thread);
* to being aborted, self-exiting, or taking a fatal error. This API returns
* immediately if the thread isn't running.
*
* This API may only be called from ISRs with a K_NO_WAIT timeout,
* where it can be useful as a predicate to detect when a thread has
* aborted.
* This API may only be called from ISRs with a K_NO_WAIT timeout.
*
* @param thread Thread to wait to exit
* @param timeout upper bound time to wait for the thread to exit.
Expand Down Expand Up @@ -539,17 +534,6 @@ __syscall k_tid_t k_current_get(void);
* released. It is the responsibility of the caller of this routine to ensure
* all necessary cleanup is performed.
*
* After k_thread_abort() returns, the thread is guaranteed not to be
* running or to become runnable anywhere on the system. Normally
* this is done via blocking the caller (in the same manner as
* k_thread_join()), but in interrupt context on SMP systems the
* implementation is required to spin for threads that are running on
* other CPUs. Note that as specified, this means that on SMP
* platforms it is possible for application code to create a deadlock
* condition by simultaneously aborting a cycle of threads using at
* least one termination from interrupt context. Zephyr cannot detect
* all such conditions.
*
* @param thread ID of thread to abort.
*
* @return N/A
Expand Down Expand Up @@ -1969,8 +1953,8 @@ static inline void *z_impl_k_queue_peek_tail(struct k_queue *queue)
* A k_futex is a lightweight mutual exclusion primitive designed
* to minimize kernel involvement. Uncontended operation relies
* only on atomic access to shared memory. k_futex are tracked as
* kernel objects and can live in user memory so that any access
* bypasses the kernel object permission management mechanism.
* kernel objects and can live in user memory so any access bypass
* the kernel object permission management mechanism.
*/
struct k_futex {
atomic_t val;
Expand Down Expand Up @@ -4328,6 +4312,12 @@ void k_heap_free(struct k_heap *h, void *mem);
}, \
}

extern int z_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block,
size_t size, k_timeout_t timeout);
extern void *z_mem_pool_malloc(struct k_mem_pool *pool, size_t size);
extern void z_mem_pool_free(struct k_mem_block *block);
extern void z_mem_pool_free_id(struct k_mem_block_id *id);

/**
* @}
*/
Expand Down
1 change: 1 addition & 0 deletions include/posix/pthread.h
Original file line number Diff line number Diff line change
Expand Up @@ -477,6 +477,7 @@ static inline int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
return 0;
}

int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize);
int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy);
int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy);
Expand Down
24 changes: 24 additions & 0 deletions lib/posix/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,30 @@ config SEM_VALUE_MAX
help
Maximum semaphore count in POSIX compliant Application.

config PTHREAD_DYNAMIC_STACK
bool "Support for dynamic stacks"
select THREAD_STACK_INFO
default y
help
POSIX 1003.1 allows a NULL pthread_attr_t* to be passed to
pthread_create(3). However, Zephyr has traditionally required
that the caller statically allocate a stack and pass it in via the
pthread_attr_t*. With this option selected, NULL will be permitted
and a suitable stack will be automatically allocated and assigned,
inheriting permissions from the calling thread.

if PTHREAD_DYNAMIC_STACK
config PTHREAD_DYNAMIC_STACK_DEFAULT_SIZE
int "Default size for a dynamic pthread stack (in bytes)"
default 1024
help
This value is used for the default size of dynamically-allocated
stacks. However, users may still specify the size of
dynamically-allocated stacks via pthread_attr_setstacksize(3)
prior to calling pthread_create(3).

endif # PTHREAD_DYNAMIC_STACK

endif # PTHREAD_IPC

config POSIX_CLOCK
Expand Down
108 changes: 92 additions & 16 deletions lib/posix/pthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,19 @@

#include <kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/atomic.h>
#include <ksched.h>
#include <wait_q.h>
#include <posix/pthread.h>
#include <sys/slist.h>

#ifdef CONFIG_PTHREAD_DYNAMIC_STACK_DEFAULT_SIZE
#define DYNAMIC_STACK_SIZE CONFIG_PTHREAD_DYNAMIC_STACK_DEFAULT_SIZE
#else
#define DYNAMIC_STACK_SIZE 0
#endif

#define PTHREAD_INIT_FLAGS PTHREAD_CANCEL_ENABLE
#define PTHREAD_CANCELED ((void *) -1)

Expand Down Expand Up @@ -115,7 +122,10 @@ int pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr,
static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
{
void * (*fun_ptr)(void *) = arg3;

struct _thread_stack_info *stack_info
= &k_current_get()->stack_info;
stack_info->delta = (size_t)
((uint8_t *)stack_info->start - (uint8_t *)arg2);
fun_ptr(arg1);
pthread_exit(NULL);
}
Expand All @@ -135,17 +145,43 @@ int pthread_create(pthread_t *newthread, const pthread_attr_t *attr,
uint32_t pthread_num;
pthread_condattr_t cond_attr;
struct posix_thread *thread;
pthread_attr_t dynamic_attr;
k_thread_stack_t *dynamic_stack = NULL;
/* a non-const pthread_attr_t* that we can modify, if needed */
pthread_attr_t *mattr = (pthread_attr_t *)attr;

/*
* FIXME: Pthread attribute must be non-null and it provides stack
* pointer and stack size. So even though POSIX 1003.1 spec accepts
* attrib as NULL but zephyr needs it initialized with valid stack.
*/
if ((attr == NULL) || (attr->initialized == 0U)
|| (attr->stack == NULL) || (attr->stacksize == 0)) {
if (mattr != NULL && mattr->initialized == 0) {
return EINVAL;
}

if (mattr == NULL || mattr->stack == NULL) {
if (IS_ENABLED(CONFIG_PTHREAD_DYNAMIC_STACK)) {
/*
* We dynamically allocate space when either
* 1) attr == NULL -> use DYNAMIC_STACK_SIZE, or
* 2) attr != NULL && attr->stack == NULL
* -> allocate attr->stacksize
*/
if (mattr == NULL) {
(void) pthread_attr_init(&dynamic_attr);
dynamic_attr.stacksize = DYNAMIC_STACK_SIZE;
mattr = &dynamic_attr;
}

dynamic_stack = k_aligned_alloc(ARCH_STACK_PTR_ALIGN,
Z_KERNEL_STACK_SIZE_ADJUST(mattr->stacksize));
if (dynamic_stack == NULL) {
return EAGAIN;
}

__ASSERT_NO_MSG(dynamic_stack != NULL);
mattr->stack = dynamic_stack;
mattr->flags |= K_STACK_ON_HEAP;
} else {
return EINVAL;
}
}

pthread_mutex_lock(&pthread_pool_lock);
for (pthread_num = 0;
pthread_num < CONFIG_MAX_PTHREAD_COUNT; pthread_num++) {
Expand All @@ -158,10 +194,14 @@ int pthread_create(pthread_t *newthread, const pthread_attr_t *attr,
pthread_mutex_unlock(&pthread_pool_lock);

if (pthread_num >= CONFIG_MAX_PTHREAD_COUNT) {
if (IS_ENABLED(CONFIG_PTHREAD_DYNAMIC_STACK)
&& dynamic_stack != NULL) {
k_free(dynamic_stack);
}
return EAGAIN;
}

prio = posix_to_zephyr_priority(attr->priority, attr->schedpolicy);
prio = posix_to_zephyr_priority(mattr->priority, mattr->schedpolicy);

thread = &posix_thread_pool[pthread_num];
/*
Expand All @@ -172,25 +212,25 @@ int pthread_create(pthread_t *newthread, const pthread_attr_t *attr,
(void)pthread_mutex_init(&thread->cancel_lock, NULL);

pthread_mutex_lock(&thread->cancel_lock);
thread->cancel_state = (1 << _PTHREAD_CANCEL_POS) & attr->flags;
thread->cancel_state = (1 << _PTHREAD_CANCEL_POS) & mattr->flags;
thread->cancel_pending = 0;
pthread_mutex_unlock(&thread->cancel_lock);

pthread_mutex_lock(&thread->state_lock);
thread->state = attr->detachstate;
thread->state = mattr->detachstate;
pthread_mutex_unlock(&thread->state_lock);

pthread_cond_init(&thread->state_cond, &cond_attr);
sys_slist_init(&thread->key_list);

*newthread = (pthread_t) k_thread_create(&thread->thread, attr->stack,
attr->stacksize,
*newthread = (pthread_t) k_thread_create(&thread->thread, mattr->stack,
mattr->stacksize,
(k_thread_entry_t)
zephyr_thread_wrapper,
(void *)arg, NULL,
(void *)arg, dynamic_stack,
threadroutine, prio,
(~K_ESSENTIAL & attr->flags),
K_MSEC(attr->delayedstart));
(~K_ESSENTIAL & mattr->flags),
K_MSEC(mattr->delayedstart));
return 0;
}

Expand Down Expand Up @@ -347,6 +387,23 @@ int pthread_once(pthread_once_t *once, void (*init_func)(void))
return 0;
}

#ifdef CONFIG_PTHREAD_DYNAMIC_STACK
static void zephyr_pthread_stack_reclaim(struct k_thread *thread)
{
uint8_t *p = (uint8_t *)thread->stack_info.start;

p -= thread->stack_info.delta;
memset((void *)thread->stack_info.start, 0,
thread->stack_info.size);
k_free(p);
}
#else
static inline void zephyr_pthread_stack_reclaim(struct k_thread *thread)
{
ARG_UNUSED(thread);
}
#endif

/**
* @brief Terminate calling thread.
*
Expand Down Expand Up @@ -385,6 +442,10 @@ void pthread_exit(void *retval)
}
}

if ((self->thread.base.user_options & K_STACK_ON_HEAP) != 0) {
self->thread.fn_abort = zephyr_pthread_stack_reclaim;
}

pthread_mutex_unlock(&self->state_lock);
k_thread_abort((k_tid_t)self);
}
Expand Down Expand Up @@ -534,6 +595,21 @@ int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
return 0;
}

/**
* @brief Set stack size attribute in thread attributes object.
*
* See IEEE 1003.1
*/
int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
if ((attr == NULL) || (attr->initialized == 0U)) {
return EINVAL;
}

attr->stacksize = stacksize;
return 0;
}

/**
* @brief Get stack size attribute in thread attributes object.
*
Expand Down

0 comments on commit f071306

Please sign in to comment.