unified/arc: add unified kernel support for ARC arch

- the interrupt (both regular and fast) now does not do rescheduling
  if the current thread is a coop thread or if the scheduler is not locked

- the _nanokernel.flags cache of _current.flags is not used anymore
  (could be a source of bugs) and is not needed in the scheduling algo

- there is no 'task' field in the _nanokernel anymore: scheduling routines
  call _get_next_ready_thread instead

- the _nanokernel.fiber field is replaced by a more sophisticated
  ready_q, based on the microkernel's priority-bitmap-based one

- thread initialization initializes new fields in the tcs, and does not
  initialize obsolete ones

- nano_private includes nano_internal.h from the unified directory

- The FIBER, TASK and PREEMPTIBLE flags do not exist anymore: the thread
  priority drives the behaviour

- the tcs uses a dlist for queuing in both ready and wait queues instead
  of a custom singly-linked list

- other new fields in the tcs include a schedule-lock count, a
  back-pointer to init data (when the task is static) and a pointer to
  swap data, needed when a thread pending on _Swap() must be passed more
  then just one value (e.g. k_stack_pop() needs an error code and data)

- the 'fiber' and 'task' fields of _nanokernel are replaced with an O(1)
  ready queue (taken from the microkernel)

- fiberRtnValueSet() is aliased to _set_thread_return_value since it
  also operates on preempt threads now

- _set_thread_return_value_with_data() sets the swap_data field in
  addition to a return value from _Swap()

- convenience aliases are created for shorter names:

  - _current is defined as _nanokernel.current
  - _ready_q is defined as _nanokernel.ready_q

- _Swap() sets the threads's return code to -EAGAIN before swapping out
  to prevent timeouts to have to set it (solves hard issues in some
  kernel objects).

Change-Id: Ib9690173cbc36c36a9ec67e65590b40d758673de
Signed-off-by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com>
This commit is contained in:
Dmitriy Korovkin 2016-09-30 11:02:37 -04:00 committed by Anas Nashif
commit 909bfffda9
8 changed files with 276 additions and 13 deletions

View file

@ -1,6 +1,10 @@
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y += -I$(srctree)/kernel/unified/include
else
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
endif
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
obj-y += thread.o thread_entry_wrapper.o \
cpu_idle.o fast_irq.o fatal.o fault.o \

View file

@ -37,6 +37,9 @@ GTEXT(_firq_stack_setup)
#if CONFIG_RGF_NUM_BANKS != 1
GDATA(_firq_stack)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
SECTION_VAR(NOINIT, _firq_stack)
.space CONFIG_FIRQ_STACK_SIZE
@ -138,14 +141,37 @@ SECTION_FUNC(TEXT, _firq_exit)
* means that another bit is set so an interrupt was interrupted.
*/
breq r3, 1, _check_if_current_is_the_task
breq r3, 1, _firq_check_for_swap
_firq_return
#endif
.balign 4
_check_if_current_is_the_task:
#ifdef CONFIG_KERNEL_V2
_firq_check_for_swap:
/* coop thread ? do not schedule */
ld_s r0, [r2, __tTCS_prio_OFFSET]
brge r0, 0, _firq_reschedule
_firq_return
/* scheduler locked ? do not schedule */
ld_s r0, [r2, __tTCS_sched_locked_OFFSET]
brle r0, 0, _firq_reschedule
_firq_return
/* check if the current thread needs to be rescheduled */
push_s r2
push_s r1
push_s blink
jl _is_next_thread_current
pop_s blink
pop_s r1
pop_s r2
brne r0, 0, _firq_reschedule
_firq_return
#else
_firq_check_for_swap:
/* Check if the current is a task */
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz _check_if_a_fiber_is_ready
@ -156,6 +182,7 @@ _check_if_a_fiber_is_ready:
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne r0, 0, _firq_reschedule
_firq_return
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_firq_reschedule:
@ -193,11 +220,27 @@ _firq_reschedule:
st _CAUSE_FIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
#ifdef CONFIG_KERNEL_V2
/*
* Save needed registers to callee saved ones. It is faster than
* pushing them to stack. It is possible to do since program has
* just saved them and the calling routine will save them in turn
* if it uses them.
*/
mov_s r13, blink
mov_s r14, r1
jl _get_next_ready_thread
mov_s blink, r13
mov_s r1, r14
mov_s r2, r0
st_s r2, [r1, __tNANO_current_OFFSET]
#else
ld_s r2, [r1, __tNANO_fiber_OFFSET]
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
/* Use stack top and down registers from restored context */

View file

@ -189,7 +189,30 @@ _trap_return:
#endif
.balign 4
#ifdef CONFIG_KERNEL_V2
_trap_check_for_swap:
/* coop thread ? do not schedule */
ld_s r0, [r2, __tTCS_prio_OFFSET]
brlt r0, 0, _trap_return
/* scheduler locked ? do not schedule */
ld_s r0, [r2, __tTCS_sched_locked_OFFSET]
brgt r0, 0, _trap_return
/* check if the current thread needs to be rescheduled */
push_s r2
push_s r1
push_s blink
jl _is_next_thread_current
pop_s blink
pop_s r1
pop_s r2
brne r0, 0, _trap_return
/* go on to _trap_reschedule by default */
#else
_trap_check_for_swap:
/* Check if the current is a task */
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz _e_check_if_a_fiber_is_ready
@ -200,6 +223,7 @@ _e_check_if_a_fiber_is_ready:
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne r0, 0, _trap_reschedule
b _trap_return
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_trap_reschedule:
@ -209,11 +233,29 @@ _trap_reschedule:
st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
/* note: Ok to use _CAUSE_RIRQ since everything is saved */
#ifdef CONFIG_KERNEL_V2
/*
* Save needed registers to callee saved ones. It is faster than
* pushing registers to stack. It is possible to do since program has
* just saved them and the calling routine will save them in turn
* if it uses them.
*/
mov_s r13, blink
mov_s r14, r0
mov_s r15, r1
jl _get_next_ready_thread
mov_s r2, r0
mov_s r1, r15
mov_s r0, r14
mov_s blink, r13
st_s r2, [r1, __tNANO_current_OFFSET]
#else
ld_s r2, [r1, __tNANO_fiber_OFFSET]
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#endif /* CONFIG_KERNEL_V2 */
/* clear AE bit to forget this was an exception */
lr r3, [_ARC_V2_STATUS32]

View file

@ -35,6 +35,9 @@
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
GTEXT(_rirq_common_interrupt_swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
#if CONFIG_NUM_IRQ_PRIO_LEVELS > 2
#error "NUM_IRQ_PRIO_LEVELS>2 is not supported."
@ -118,6 +121,41 @@ SECTION_FUNC(TEXT, _rirq_exit)
* b) needs to load it to restore the interrupted context.
*/
#ifdef CONFIG_KERNEL_V2
/* coop thread ? do not schedule */
ld_s r0, [r2, __tTCS_prio_OFFSET]
cmp_s r0, 0
blt.d _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
/* scheduler locked ? do not schedule */
ld_s r0, [r2, __tTCS_sched_locked_OFFSET]
brgt.d r0, 0, _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
push_s r2
push_s r1
/* check if the current thread needs to be rescheduled */
push_s blink
jl _is_next_thread_current
pop_s blink
pop_s r1
pop_s r2
breq.d r0, 0, _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
/*
* Get the next scheduled thread. On _get_next_ready_thread
* return it is stored in r0.
*/
push_s r2
push_s r1
push_s blink
jl _get_next_ready_thread
pop_s blink
pop_s r1
pop_s r2
#else
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bz.d _rirq_no_reschedule
@ -127,6 +165,7 @@ SECTION_FUNC(TEXT, _rirq_exit)
cmp r0, 0
bz.d _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_rirq_reschedule:
@ -139,8 +178,10 @@ _rirq_reschedule:
/* incoming fiber is in r0: it becomes the new 'current' */
mov r2, r0
st_s r2, [r1, __tNANO_current_OFFSET]
#if !defined(CONFIG_KERNEL_V2)
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#endif
_rirq_common_interrupt_swap:
/* r2 contains pointer to new thread */

View file

@ -35,6 +35,11 @@
GTEXT(_Swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread)
GDATA(_k_neg_eagain)
#endif
GDATA(_nanokernel)
/**
@ -80,6 +85,17 @@ SECTION_FUNC(TEXT, _Swap)
st_s r0, [r2, __tTCS_intlock_key_OFFSET]
st _CAUSE_COOP, [r2, __tTCS_relinquish_cause_OFFSET]
#ifdef CONFIG_KERNEL_V2
/*
* Carve space for the return value. Setting it to a defafult of
* -EAGAIN eliminates the need for the timeout code to set it.
* If another value is ever needed, it can be modified with
* fiberRtnValueSet().
*/
ld r3, [_k_neg_eagain]
st_s r3, [r2, __tTCS_return_value_OFFSET]
#endif /* CONFIG_KERNEL_V2 */
/*
* Save status32 and blink on the stack before the callee-saved registers.
* This is the same layout as the start of an IRQ stack frame.
@ -95,6 +111,24 @@ SECTION_FUNC(TEXT, _Swap)
_save_callee_saved_regs
/* find out incoming thread (fiber or task) */
#ifdef CONFIG_KERNEL_V2
/*
* Save needed registers to callee saved ones. It is faster than
* pushing them to stack. It is possible to do since program has
* just saved them and the calling routine will save them in turn
* if it uses them.
*/
mov_s r13, blink
mov_s r14, r0
mov_s r15, r1
jl _get_next_ready_thread
mov_s r2, r0
mov_s r1, r15
mov_s r0, r14
mov_s blink, r13
#else
ld_s r2, [r1, __tNANO_fiber_OFFSET]
breq r2, 0, _swap_to_the_task
@ -111,13 +145,14 @@ _swap_to_the_task:
ld_s r2, [r1, __tNANO_task_OFFSET]
/* fall through */
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_finish_swapping_to_thread:
/* entering here, r2 contains the new current thread */
#if 0
#if 0 && !defined(CONFIG_KERNEL_V2)
/* don't save flags in tNANO: slower, error-prone, and might not even give
* a speed boost where it's supposed to */
ld_s r3, [r2, __tTCS_flags_OFFSET]

View file

@ -25,6 +25,10 @@
#include <toolchain.h>
#include <sections.h>
#ifdef CONFIG_KERNEL_V2
#include <nano_private.h>
#endif
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PRINTK(...) printk(__VA_ARGS__)
@ -33,11 +37,16 @@
#endif
#ifdef CONFIG_MICROKERNEL
extern void _TaskAbort(void);
static inline void nonEssentialTaskAbort(void)
{
PRINTK("Fatal fault in task ! Aborting task.\n");
#if defined(CONFIG_KERNEL_V2)
k_thread_abort(_current);
#else
extern void _TaskAbort(void);
_TaskAbort();
#endif
}
#define NON_ESSENTIAL_TASK_ABORT() nonEssentialTaskAbort()
#else

View file

@ -96,8 +96,8 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param fiber priority, -1 for task
* @param options is unused (saved for future expansion)
* @param priority thread priority
* @param options thread options: ESSENTIAL
*
* @return N/A
*/
@ -138,8 +138,19 @@ void _new_thread(char *pStackMem, unsigned stackSize,
#else
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
#endif
#ifdef CONFIG_KERNEL_V2
/* k_q_node initialized upon first insertion in a list */
tcs->flags = options | K_PRESTART;
tcs->sched_locked = 0;
/* static threads overwrite them afterwards with real values */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
#else
tcs->link = NULL;
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
#endif
tcs->prio = priority;
#ifdef CONFIG_THREAD_CUSTOM_DATA
@ -156,7 +167,7 @@ void _new_thread(char *pStackMem, unsigned stackSize,
tcs->entry = (struct __thread_entry *)(pInitCtx);
#endif
#ifdef CONFIG_MICROKERNEL
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);

View file

@ -40,8 +40,13 @@ extern "C" {
#include <vector_table.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
#include <kernel.h>
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <stdint.h>
#include <misc/util.h>
#include <misc/dlist.h>
@ -134,13 +139,28 @@ typedef struct callee_saved tCalleeSaved;
/* Bitmask definitions for the struct tcs->flags bit field */
#ifdef CONFIG_KERNEL_V2
#define K_STATIC 0x00000800
#define K_READY 0x00000000 /* Thread is ready to run */
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
#define K_PRESTART 0x00004000 /* Thread has not yet started */
#define K_DEAD 0x00008000 /* Thread has terminated */
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
#define K_DUMMY 0x00020000 /* Not a real thread */
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
K_DEAD | K_SUSPENDED | K_DUMMY)
#else
#define FIBER 0x000
#define TASK 0x001 /* 1 = task, 0 = fiber */
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
#endif
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
@ -162,15 +182,38 @@ typedef struct callee_saved tCalleeSaved;
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
/* 'struct tcs_base' must match the beginning of 'struct tcs' */
struct tcs_base {
sys_dnode_t k_q_node;
uint32_t flags;
int prio;
void *swap_data;
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
};
#endif
struct tcs {
#ifdef CONFIG_KERNEL_V2
sys_dnode_t k_q_node; /* node object in any kernel queue */
uint32_t flags;
int prio;
void *swap_data;
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
#else
struct tcs *link; /* node in singly-linked list
* _nanokernel.fibers
*/
uint32_t flags; /* bitmask of flags above */
int prio; /* fiber priority, -1 for a task */
#endif /* CONFIG_KERNEL_V2 */
uint32_t intlock_key; /* interrupt key when relinquishing control */
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
unsigned int return_value;/* return value from _Swap */
int prio; /* fiber priority, -1 for a task */
#ifdef CONFIG_THREAD_CUSTOM_DATA
void *custom_data; /* available for custom use */
#endif
@ -180,7 +223,7 @@ struct tcs {
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
#endif
#ifdef CONFIG_NANO_TIMEOUTS
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_NANO_TIMEOUTS)
struct _nano_timeout nano_timeout;
#endif
#ifdef CONFIG_ERRNO
@ -189,14 +232,29 @@ struct tcs {
#ifdef CONFIG_ARC_STACK_CHECKING
uint32_t stack_top;
#endif
#ifdef CONFIG_MICROKERNEL
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
void *uk_task_ptr;
#endif
#ifdef CONFIG_KERNEL_V2
atomic_t sched_locked;
void *init_data;
void (*fn_abort)(void);
#endif
};
#ifdef CONFIG_KERNEL_V2
struct ready_q {
struct k_thread *cache;
uint32_t prio_bmap[1];
sys_dlist_t q[K_NUM_PRIORITIES];
};
#endif
struct s_NANO {
#if !defined(CONFIG_KERNEL_V2)
struct tcs *fiber; /* singly linked list of runnable fibers */
struct tcs *task; /* current task the nanokernel knows about */
#endif
struct tcs *current; /* currently scheduled thread (fiber or task) */
#ifdef CONFIG_THREAD_MONITOR
@ -224,6 +282,9 @@ struct s_NANO {
int32_t task_timeout;
#endif
#endif
#ifdef CONFIG_KERNEL_V2
struct ready_q ready_q;
#endif
};
typedef struct s_NANO tNANO;
@ -255,13 +316,28 @@ static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, unsigned int value
fiber->return_value = value;
}
#ifdef CONFIG_KERNEL_V2
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
static ALWAYS_INLINE void
_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
void *data)
{
_set_thread_return_value(thread, value);
thread->swap_data = data;
}
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
#endif /* CONFIG_KERNEL_V2 */
/**
*
* @brief Indicates if kernel is handling interrupt
*
* @return 1 if interrupt handler is executed, 0 otherwise
*/
static ALWAYS_INLINE int _IS_IN_ISR(void)
static ALWAYS_INLINE int _is_in_isr(void)
{
uint32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
#if CONFIG_IRQ_OFFLOAD
@ -274,6 +350,8 @@ static ALWAYS_INLINE int _IS_IN_ISR(void)
return ((act & 0xffff) != 0);
}
#define _IS_IN_ISR _is_in_isr
/**
*
* @bried Indicates the interrupt number of the highest priority