Skip to content

Commit

Permalink
kernel: remove legacy fields in _kernel
Browse files Browse the repository at this point in the history
UP should just use _kernel.cpus[0].

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
  • Loading branch information
Andrew Boie authored and carlescufi committed May 8, 2020
1 parent b074896 commit a203d21
Show file tree
Hide file tree
Showing 16 changed files with 44 additions and 61 deletions.
8 changes: 4 additions & 4 deletions arch/arc/include/swap_macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -307,13 +307,13 @@
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
mov \reg1, _kernel
ld \reg2, [\reg1, ___kernel_t_nested_OFFSET]
ld \reg2, [\reg1, _kernel_offset_to_nested]
#endif
add \reg2, \reg2, 1
#ifdef CONFIG_SMP
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
st \reg2, [\reg1, ___kernel_t_nested_OFFSET]
st \reg2, [\reg1, _kernel_offset_to_nested]
#endif
cmp \reg2, 1
.endm
Expand All @@ -329,13 +329,13 @@
ld \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
mov \reg1, _kernel
ld \reg2, [\reg1, ___kernel_t_nested_OFFSET]
ld \reg2, [\reg1, _kernel_offset_to_nested]
#endif
sub \reg2, \reg2, 1
#ifdef CONFIG_SMP
st \reg2, [\reg1, ___cpu_t_nested_OFFSET]
#else
st \reg2, [\reg1, ___kernel_t_nested_OFFSET]
st \reg2, [\reg1, _kernel_offset_to_nested]
#endif
.endm

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/include/aarch64/exc.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ extern void z_arm64_offload(void);

static ALWAYS_INLINE bool arch_is_in_isr(void)
{
return _kernel.nested != 0U;
return _kernel.cpus[0].nested != 0U;
}


Expand Down
4 changes: 2 additions & 2 deletions arch/nios2/core/irq_manage.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ void _enter_irq(u32_t ipending)
read_timer_start_of_isr();
#endif

_kernel.nested++;
_kernel.cpus[0].nested++;

#ifdef CONFIG_IRQ_OFFLOAD
z_irq_do_offload();
Expand Down Expand Up @@ -113,7 +113,7 @@ void _enter_irq(u32_t ipending)
#endif
}

_kernel.nested--;
_kernel.cpus[0].nested--;
#ifdef CONFIG_STACK_SENTINEL
z_check_stack_sentinel();
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/nios2/include/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,

static inline bool arch_is_in_isr(void)
{
return _kernel.nested != 0U;
return _kernel.cpus[0].nested != 0U;
}

#ifdef CONFIG_IRQ_OFFLOAD
Expand Down
16 changes: 8 additions & 8 deletions arch/posix/core/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@
int arch_swap(unsigned int key)
{
/*
* struct k_thread * _kernel.current is the currently runnig thread
* struct k_thread * _current is the currently runnig thread
* struct k_thread * _kernel.ready_q.cache contains the next thread to
* run (cannot be NULL)
*
* Here a "real" arch would save all processor registers, stack pointer
* and so forth. But we do not need to do so because we use posix
* threads => those are all nicely kept by the native OS kernel
*/
_kernel.current->callee_saved.key = key;
_kernel.current->callee_saved.retval = -EAGAIN;
_current->callee_saved.key = key;
_current->callee_saved.retval = -EAGAIN;

/* retval may be modified with a call to
* arch_thread_return_value_set()
Expand All @@ -43,10 +43,10 @@ int arch_swap(unsigned int key)

posix_thread_status_t *this_thread_ptr =
(posix_thread_status_t *)
_kernel.current->callee_saved.thread_status;
_current->callee_saved.thread_status;


_kernel.current = _kernel.ready_q.cache;
_current = _kernel.ready_q.cache;

/*
* Here a "real" arch would load all processor registers for the thread
Expand All @@ -59,9 +59,9 @@ int arch_swap(unsigned int key)

/* When we continue, _kernel->current points back to this thread */

irq_unlock(_kernel.current->callee_saved.key);
irq_unlock(_current->callee_saved.key);

return _kernel.current->callee_saved.retval;
return _current->callee_saved.retval;
}


Expand All @@ -83,7 +83,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread,

sys_trace_thread_switched_out();

_kernel.current = _kernel.ready_q.cache;
_current = _kernel.ready_q.cache;

sys_trace_thread_switched_in();

Expand Down
2 changes: 1 addition & 1 deletion arch/posix/include/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)

static inline bool arch_is_in_isr(void)
{
return _kernel.nested != 0U;
return _kernel.cpus[0].nested != 0U;
}

#endif /* _ASMLANGUAGE */
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/core/isr.S
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ is_interrupt:
RV_OP_STOREREG t0, 0x00(sp)

on_irq_stack:
/* Increment _kernel.nested variable */
/* Increment _kernel.cpus[0].nested variable */
lw t3, _kernel_offset_to_nested(t2)
addi t3, t3, 1
sw t3, _kernel_offset_to_nested(t2)
Expand Down Expand Up @@ -337,7 +337,7 @@ on_thread_stack:
/* Get reference to _kernel */
la t1, _kernel

/* Decrement _kernel.nested variable */
/* Decrement _kernel.cpus[0].nested variable */
lw t2, _kernel_offset_to_nested(t1)
addi t2, t2, -1
sw t2, _kernel_offset_to_nested(t1)
Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/include/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,

static inline bool arch_is_in_isr(void)
{
return _kernel.nested != 0U;
return _kernel.cpus[0].nested != 0U;
}

#ifdef CONFIG_IRQ_OFFLOAD
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ static inline bool arch_is_in_isr(void)
__asm__ volatile ("popf");
return ret;
#else
return _kernel.nested != 0U;
return _kernel.cpus[0].nested != 0U;
#endif
}

Expand Down
6 changes: 3 additions & 3 deletions boards/posix/native_posix/irq_handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ void posix_irq_handler(void)
return;
}

if (_kernel.nested == 0) {
if (_kernel.cpus[0].nested == 0) {
may_swap = 0;
}

_kernel.nested++;
_kernel.cpus[0].nested++;

while ((irq_nbr = hw_irq_ctrl_get_highest_prio_irq()) != -1) {
int last_current_running_prio = hw_irq_ctrl_get_cur_prio();
Expand All @@ -96,7 +96,7 @@ void posix_irq_handler(void)
hw_irq_ctrl_set_cur_prio(last_current_running_prio);
}

_kernel.nested--;
_kernel.cpus[0].nested--;

/* Call swap if all the following is true:
* 1) may_swap was enabled
Expand Down
6 changes: 3 additions & 3 deletions boards/posix/nrf52_bsim/irq_handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,11 @@ void posix_irq_handler(void)
return;
}

if (_kernel.nested == 0) {
if (_kernel.cpus[0].nested == 0) {
may_swap = 0;
}

_kernel.nested++;
_kernel.cpus[0].nested++;

while ((irq_nbr = hw_irq_ctrl_get_highest_prio_irq()) != -1) {
int last_current_running_prio = hw_irq_ctrl_get_cur_prio();
Expand All @@ -153,7 +153,7 @@ void posix_irq_handler(void)
hw_irq_ctrl_set_cur_prio(last_current_running_prio);
}

_kernel.nested--;
_kernel.cpus[0].nested--;

/* Call swap if all the following is true:
* 1) may_swap was enabled
Expand Down
6 changes: 3 additions & 3 deletions include/arch/x86/ia32/arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ static inline void arch_isr_direct_header(void)
/* We're not going to unlock IRQs, but we still need to increment this
* so that arch_is_in_isr() works
*/
++_kernel.nested;
++_kernel.cpus[0].nested;
}

/*
Expand All @@ -269,15 +269,15 @@ static inline void arch_isr_direct_footer(int swap)
#if defined(CONFIG_TRACING)
sys_trace_isr_exit();
#endif
--_kernel.nested;
--_kernel.cpus[0].nested;

/* Call swap if all the following is true:
*
* 1) swap argument was enabled to this function
* 2) We are not in a nested interrupt
* 3) Next thread to run in the ready queue is not this thread
*/
if (swap != 0 && _kernel.nested == 0 &&
if (swap != 0 && _kernel.cpus[0].nested == 0 &&
_kernel.ready_q.cache != _current) {
unsigned int flags;

Expand Down
22 changes: 2 additions & 20 deletions include/kernel_structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,25 +133,7 @@ struct _cpu {
typedef struct _cpu _cpu_t;

struct z_kernel {
/* For compatibility with pre-SMP code, union the first CPU
* record with the legacy fields so code can continue to use
* the "_kernel.XXX" expressions and assembly offsets.
*/
union {
struct _cpu cpus[CONFIG_MP_NUM_CPUS];
#ifndef CONFIG_SMP
struct {
/* nested interrupt count */
u32_t nested;

/* interrupt stack pointer base */
char *irq_stack;

/* currently scheduled thread */
struct k_thread *current;
};
#endif
};
struct _cpu cpus[CONFIG_MP_NUM_CPUS];

#ifdef CONFIG_SYS_CLOCK_EXISTS
/* queue of timeouts */
Expand Down Expand Up @@ -204,7 +186,7 @@ bool z_smp_cpu_mobile(void);

#else
#define _current_cpu (&_kernel.cpus[0])
#define _current _kernel.current
#define _current _kernel.cpus[0].current
#endif

#define _timeout_q _kernel.timeout_q
Expand Down
6 changes: 0 additions & 6 deletions kernel/include/kernel_offsets.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,6 @@

GEN_ABS_SYM_BEGIN(_OffsetAbsSyms)

#ifndef CONFIG_SMP
GEN_OFFSET_SYM(_kernel_t, current);
GEN_OFFSET_SYM(_kernel_t, nested);
GEN_OFFSET_SYM(_kernel_t, irq_stack);
#endif

GEN_OFFSET_SYM(_cpu_t, current);
GEN_OFFSET_SYM(_cpu_t, nested);
GEN_OFFSET_SYM(_cpu_t, irq_stack);
Expand Down
11 changes: 7 additions & 4 deletions kernel/include/offsets_short.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,18 @@
/* kernel */

/* main */

#ifndef CONFIG_SMP
/* Relies on _kernel.cpu being the first member of _kernel and having 1 element
*/
#define _kernel_offset_to_nested \
(___kernel_t_nested_OFFSET)
(___cpu_t_nested_OFFSET)

#define _kernel_offset_to_irq_stack \
(___kernel_t_irq_stack_OFFSET)
(___cpu_t_irq_stack_OFFSET)

#define _kernel_offset_to_current \
(___kernel_t_current_OFFSET)
(___cpu_t_current_OFFSET)
#endif /* CONFIG_SMP */

#define _kernel_offset_to_idle \
(___kernel_t_idle_OFFSET)
Expand Down
6 changes: 5 additions & 1 deletion subsys/debug/openocd.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ enum {
OPENOCD_OFFSET_T_COOP_FLOAT,
};

#if CONFIG_MP_NUM_CPUS > 1
#error "This code doesn't work properly with multiple CPUs enabled"
#endif

/* Forward-compatibility notes: 1) Only append items to this table; otherwise
* OpenOCD versions that expect less items will read garbage values.
* 2) Avoid incompatible changes that affect the interpretation of existing
Expand All @@ -36,7 +40,7 @@ __attribute__((used, section(".openocd_dbg")))
size_t _kernel_openocd_offsets[] = {
/* Version 0 starts */
[OPENOCD_OFFSET_VERSION] = 1,
[OPENOCD_OFFSET_K_CURR_THREAD] = offsetof(struct z_kernel, current),
[OPENOCD_OFFSET_K_CURR_THREAD] = offsetof(struct _cpu, current),
[OPENOCD_OFFSET_K_THREADS] = offsetof(struct z_kernel, threads),
[OPENOCD_OFFSET_T_ENTRY] = offsetof(struct k_thread, entry),
[OPENOCD_OFFSET_T_NEXT_THREAD] = offsetof(struct k_thread, next_thread),
Expand Down

0 comments on commit a203d21

Please sign in to comment.