nios2: port to unified kernel

With this patch we introduce unified kernel support for NIOS II.
Not all test cases have been ported, but the following command
currently succeeds with 43/43 passing test cases:

 $ sanitycheck --arch=nios2 -xKERNEL_TYPE=unified \
         --tag=unified_capable

Issue: ZEP-934
Change-Id: Id8effa0369a6a22c4d0a789fa2a8e108af0e0786
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-10-25 11:47:52 -07:00 committed by Benjamin Walsh
commit 431607c20a
10 changed files with 212 additions and 54 deletions

View file

@ -1,6 +1,10 @@
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y += -I$(srctree)/kernel/unified/include
else
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
endif
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
obj-y += reset.o irq_manage.o fatal.o swap.o thread.o \
cpu_idle.o irq_offload.o prep_c.o crt0.o \

View file

@ -25,6 +25,9 @@ GTEXT(_exception)
/* import */
GTEXT(_Fault)
GTEXT(_Swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload)
GTEXT(_offload_routine)
@ -125,6 +128,24 @@ on_irq_stack:
movhi r10, %hi(_nanokernel)
ori r10, r10, %lo(_nanokernel)
ldw r11, __tNANO_current_OFFSET(r10)
#ifdef CONFIG_KERNEL_V2
/* Determine whether the exception of the ISR requires context
* switch
*/
/* Do not reschedule coop threads (threads that have negative prio) */
ldw r12, __tTCS_prio_OFFSET(r11)
blt r12, zero, no_reschedule
/* Do not reschedule if scheduler is locked */
ldw r12, __tTCS_sched_locked_OFFSET(r11)
bne r12, zero, no_reschedule
/* Call into the kernel to see if a scheduling decision is necessary */
call _is_next_thread_current
bne r2, zero, no_reschedule
#else
/* Determine whether the execution of the ISR requires a context
* switch. If the interrupted thread is PREEMPTIBLE (a task) and
* _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
@ -133,7 +154,6 @@ on_irq_stack:
/* Check (_nanokernel.current->flags & PREEMPTIBLE), if not
* goto no_reschedule
*/
ldw r11, __tNANO_current_OFFSET(r10)
ldw r12, __tTCS_flags_OFFSET(r11)
movi r13, PREEMPTIBLE
and r12, r13, r12
@ -142,6 +162,7 @@ on_irq_stack:
/* Check _nanokernel.fiber != NULL, if NULL goto no_reschedule */
ldw r11, __tNANO_fiber_OFFSET(r10)
beq r11, zero, no_reschedule
#endif /* CONFIG_KERNEL_V2 */
/*
* A context reschedule is required: keep the volatile registers of

View file

@ -234,7 +234,11 @@ FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
if (curCtx == NANO_CTX_TASK) {
extern FUNC_NORETURN void _TaskAbort(void);
printk("Fatal task error! Aborting task.\n");
#ifdef CONFIG_KERNEL_V2
k_thread_abort(_current);
#else
_TaskAbort();
#endif
} else
#endif /* CONFIG_MICROKERNEL */
{

View file

@ -80,6 +80,8 @@ void _enter_irq(uint32_t ipending)
{
int index;
_nanokernel.nested++;
#ifdef CONFIG_IRQ_OFFLOAD
_irq_do_offload();
#endif
@ -97,5 +99,7 @@ void _enter_irq(uint32_t ipending)
ite = &_sw_isr_table[index];
ite->isr(ite->arg);
}
_nanokernel.nested--;
}

View file

@ -25,6 +25,10 @@ GTEXT(_thread_entry_wrapper)
/* imports */
GTEXT(_sys_k_event_logger_context_switch)
#ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread)
GTEXT(_k_neg_eagain)
#endif
/* unsigned int _Swap(unsigned int key)
*
@ -61,6 +65,14 @@ SECTION_FUNC(exception.other, _Swap)
*/
stw r4, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r11)
#ifdef CONFIG_KERNEL_V2
/* Populate default return value */
movhi r5, %hi(_k_neg_eagain)
ori r5, r5, %lo(_k_neg_eagain)
ldw r4, (r5)
stw r4, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r11)
#endif /* CONFIG_KERNEL_V2 */
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
call _sys_k_event_logger_context_switch
/* Restore caller-saved r10. We could have stuck its value
@ -68,51 +80,65 @@ SECTION_FUNC(exception.other, _Swap)
*/
movhi r10, %hi(_nanokernel)
ori r10, r10, %lo(_nanokernel)
#endif
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
#ifdef CONFIG_KERNEL_V2
/* Assign to _nanokernel.current the return value of
* _get_next_ready_thread()
*/
call _get_next_ready_thread
movhi r10, %hi(_nanokernel)
ori r10, r10, %lo(_nanokernel)
stw r2, __tNANO_current_OFFSET(r10)
#else
/* Find the next context to run. Choose _nanokernel.fiber
* if non-NULL */
ldw r11, __tNANO_fiber_OFFSET(r10)
beq r11, zero, not_fiber
ldw r2, __tNANO_fiber_OFFSET(r10)
beq r2, zero, not_fiber
/* _nanokernel.fiber = _nanokernel.fiber->link */
ldw r14, __tTCS_link_OFFSET(r11)
ldw r14, __tTCS_link_OFFSET(r2)
stw r14, __tNANO_fiber_OFFSET(r10)
br next_chosen
not_fiber:
/* Fiber was NULL, we'll choose nanokernel.task */
ldw r11, __tNANO_task_OFFSET(r10)
ldw r2, __tNANO_task_OFFSET(r10)
next_chosen:
/* Set _nanokernel.current to value we chose for r11 */
stw r11, __tNANO_current_OFFSET(r10)
/* Set _nanokernel.current to value we chose for r2 */
stw r2, __tNANO_current_OFFSET(r10)
#endif /* CONFIG_KERNEL_V2 */
/* At this point r2 points to the next thread to be swapped in */
/* Restore callee-saved registers and switch to the incoming
* thread's stack
*/
ldw r16, __tTCS_coopReg_OFFSET + __t_coop_r16_OFFSET(r11)
ldw r17, __tTCS_coopReg_OFFSET + __t_coop_r17_OFFSET(r11)
ldw r18, __tTCS_coopReg_OFFSET + __t_coop_r18_OFFSET(r11)
ldw r19, __tTCS_coopReg_OFFSET + __t_coop_r19_OFFSET(r11)
ldw r20, __tTCS_coopReg_OFFSET + __t_coop_r20_OFFSET(r11)
ldw r21, __tTCS_coopReg_OFFSET + __t_coop_r21_OFFSET(r11)
ldw r22, __tTCS_coopReg_OFFSET + __t_coop_r22_OFFSET(r11)
ldw r23, __tTCS_coopReg_OFFSET + __t_coop_r23_OFFSET(r11)
ldw r28, __tTCS_coopReg_OFFSET + __t_coop_r28_OFFSET(r11)
ldw ra, __tTCS_coopReg_OFFSET + __t_coop_ra_OFFSET(r11)
ldw sp, __tTCS_coopReg_OFFSET + __t_coop_sp_OFFSET(r11)
ldw r16, __tTCS_coopReg_OFFSET + __t_coop_r16_OFFSET(r2)
ldw r17, __tTCS_coopReg_OFFSET + __t_coop_r17_OFFSET(r2)
ldw r18, __tTCS_coopReg_OFFSET + __t_coop_r18_OFFSET(r2)
ldw r19, __tTCS_coopReg_OFFSET + __t_coop_r19_OFFSET(r2)
ldw r20, __tTCS_coopReg_OFFSET + __t_coop_r20_OFFSET(r2)
ldw r21, __tTCS_coopReg_OFFSET + __t_coop_r21_OFFSET(r2)
ldw r22, __tTCS_coopReg_OFFSET + __t_coop_r22_OFFSET(r2)
ldw r23, __tTCS_coopReg_OFFSET + __t_coop_r23_OFFSET(r2)
ldw r28, __tTCS_coopReg_OFFSET + __t_coop_r28_OFFSET(r2)
ldw ra, __tTCS_coopReg_OFFSET + __t_coop_ra_OFFSET(r2)
ldw sp, __tTCS_coopReg_OFFSET + __t_coop_sp_OFFSET(r2)
/* Load return value into r2 (return value register). garbage
/* We need to irq_unlock(current->coopReg.key);
* key was supplied as argument to _Swap(). Fetch it.
*/
ldw r3, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r2)
/* Load return value into r2 (return value register). -EAGAIN
* unless someone previously called fiberRtnValueSet(). Do this
* before we potentially unlock interrupts.
*/
ldw r2, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r11)
ldw r2, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r2)
/* irq_unlock(fiber->coopReg.key);
* key was supplied as argument to _Swap()
*/
ldw r3, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r11)
/* Now do irq_unlock(current->coopReg.key) */
#if (ALT_CPU_NUM_OF_SHADOW_REG_SETS > 0) || \
(defined ALT_CPU_EIC_PRESENT) || \
(defined ALT_CPU_MMU_PRESENT) || \

View file

@ -14,7 +14,14 @@
* limitations under the License.
*/
#include <nanokernel.h>
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> /* public kernel API */
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <nano_private.h>
#include <wait_q.h>
#include <string.h>
@ -44,9 +51,7 @@ static ALWAYS_INLINE void thread_monitor_init(struct tcs *tcs)
* to _thread_entry() since this arch puts the first four arguments
* in r4-r7 and not on the stack
*/
void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
_thread_arg_t, _thread_arg_t);
void _thread_entry_wrapper(_thread_entry_t, void *, void *, void *);
struct init_stack_frame {
/* top of the stack / most recently pushed */
@ -55,9 +60,9 @@ struct init_stack_frame {
* into argument registers before calling _thread_entry()
*/
_thread_entry_t entry_point;
_thread_arg_t arg1;
_thread_arg_t arg2;
_thread_arg_t arg3;
void *arg1;
void *arg2;
void *arg3;
/* least recently pushed */
};
@ -65,8 +70,7 @@ struct init_stack_frame {
void _new_thread(char *stack_memory, unsigned stack_size,
void *uk_task_ptr, _thread_entry_t thread_func,
_thread_arg_t arg1, _thread_arg_t arg2,
_thread_arg_t arg3,
void *arg1, void *arg2, void *arg3,
int priority, unsigned options)
{
struct tcs *tcs;
@ -87,22 +91,31 @@ void _new_thread(char *stack_memory, unsigned stack_size,
/* Initialize various struct tcs members */
tcs = (struct tcs *)stack_memory;
tcs->link = (struct tcs *)NULL;
tcs->prio = priority;
#ifdef CONFIG_KERNEL_V2
/* k_q_node initialized upon first insertion in a list */
tcs->flags = options | K_PRESTART;
tcs->sched_locked = 0;
/* static threads overwrite it afterwards with real value */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
#else
if (priority == -1) {
tcs->flags = PREEMPTIBLE | TASK;
} else {
tcs->flags = FIBER;
}
tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */
#endif /* CONFIG_KERNEL_V2 */
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
tcs->custom_data = NULL;
#endif
#ifdef CONFIG_MICROKERNEL
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);

View file

@ -39,21 +39,40 @@ extern "C" {
#include <arch/cpu.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> /* public kernel API */
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <stdint.h>
#include <misc/util.h>
#include <misc/dlist.h>
#endif
/* Bitmask definitions for the struct tcs->flags bit field */
#ifdef CONFIG_KERNEL_V2
#define K_STATIC 0x00000800
#define K_READY 0x00000000 /* Thread is ready to run */
#define K_TIMING 0x00001000 /* Thread is waiting on a timeout */
#define K_PENDING 0x00002000 /* Thread is waiting on an object */
#define K_PRESTART 0x00004000 /* Thread has not yet started */
#define K_DEAD 0x00008000 /* Thread has terminated */
#define K_SUSPENDED 0x00010000 /* Thread is suspended */
#define K_DUMMY 0x00020000 /* Not a real thread */
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
K_DEAD | K_SUSPENDED | K_DUMMY)
#else
#define FIBER 0x000
#define TASK 0x001 /* 1 = task, 0 = fiber */
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
#endif
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
#define ESSENTIAL 0x200 /* 1 = system thread that must not abort */
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
@ -107,36 +126,78 @@ struct preempt {
*/
};
#ifdef CONFIG_KERNEL_V2
/* 'struct tcs_base' must match the beginning of 'struct tcs' */
struct tcs_base {
sys_dnode_t k_q_node;
uint32_t flags;
int prio; /* thread priority used to sort linked list */
void *swap_data;
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
};
#endif
struct tcs {
#ifdef CONFIG_KERNEL_V2
sys_dnode_t k_q_node; /* node object in any kernel queue */
int flags;
int prio; /* thread priority used to sort linked list */
void *swap_data;
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
#else
struct tcs *link; /* node in singly-linked list
* _nanokernel.fibers
*/
uint32_t flags; /* bitmask of flags above */
int prio; /* fiber priority, -1 for a task */
#endif /* CONFIG_KERNEL_V2 */
struct preempt preempReg;
t_coop coopReg;
#ifdef CONFIG_ERRNO
int errno_var;
#endif
#ifdef CONFIG_NANO_TIMEOUTS
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_NANO_TIMEOUTS)
struct _nano_timeout nano_timeout;
#endif
#if defined(CONFIG_THREAD_MONITOR)
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
#endif
#ifdef CONFIG_MICROKERNEL
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
void *uk_task_ptr;
#endif
#ifdef CONFIG_THREAD_CUSTOM_DATA
void *custom_data; /* available for custom use */
#endif
#ifdef CONFIG_KERNEL_V2
atomic_t sched_locked;
void *init_data;
void (*fn_abort)(void);
#endif
};
#ifdef CONFIG_KERNEL_V2
struct ready_q {
struct k_thread *cache;
uint32_t prio_bmap[1];
sys_dlist_t q[K_NUM_PRIORITIES];
};
#endif
struct s_NANO {
#if !defined(CONFIG_KERNEL_V2)
struct tcs *fiber; /* singly linked list of runnable fibers */
struct tcs *task; /* current task the nanokernel knows about */
#endif
struct tcs *current; /* currently scheduled thread (fiber or task) */
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
@ -148,7 +209,9 @@ struct s_NANO {
#if defined(CONFIG_THREAD_MONITOR)
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
#endif
#ifdef CONFIG_KERNEL_V2
struct ready_q ready_q;
#endif
/* Nios II-specific members */
char *irq_sp; /* Interrupt stack pointer */
@ -175,6 +238,22 @@ static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber,
fiber->coopReg.retval = value;
}
#ifdef CONFIG_KERNEL_V2
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
static ALWAYS_INLINE void
_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
void *data)
{
_set_thread_return_value(thread, value);
thread->swap_data = data;
}
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
#endif /* CONFIG_KERNEL_V2 */
static inline void _IntLibInit(void)
{
/* No special initialization of the interrupt subsystem required */
@ -184,18 +263,8 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *esf);
static ALWAYS_INLINE int _IS_IN_ISR(void)
{
char *sp = (char *)_nios2_read_sp();
/* Make sure we're on the interrupt stack somewhere */
if (sp < _interrupt_stack ||
sp >= (char *)(STACK_ROUND_DOWN(_interrupt_stack +
CONFIG_ISR_STACK_SIZE))) {
return 0;
}
return 1;
}
#define _IS_IN_ISR() (_nanokernel.nested != 0)
#define _is_in_isr() (_nanokernel.nested != 0)
#ifdef CONFIG_IRQ_OFFLOAD
void _irq_do_offload(void);

View file

@ -242,6 +242,17 @@ SECTIONS
_k_event_list_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(_k_memory_pool, (OPTIONAL),)
{
*(._k_memory_pool.struct*)
KEEP(*(SORT_BY_NAME("._k_memory_pool.struct*")))
_k_mem_pool_start = .;
*(._k_memory_pool.*)
KEEP(*(SORT_BY_NAME("._k_memory_pool*")))
_k_mem_pool_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{

View file

@ -1774,7 +1774,11 @@ __asm__(".macro _build_mem_pool name, min_size, max_size, n_max\n\t"
static void __attribute__ ((used)) __k_mem_pool_quad_block_size_define(void)
{
__asm__(".globl __memory_pool_quad_block_size\n\t"
#ifdef CONFIG_NIOS2
"__memory_pool_quad_block_size = %0\n\t"
#else
"__memory_pool_quad_block_size = %c0\n\t"
#endif
:
: "n"(sizeof(struct k_mem_pool_quad_block)));
}

View file

@ -4,6 +4,8 @@ qemu_cortex_m3
qemu_x86
--platform
qemu_x86_iamcu
--platform
qemu_nios2
--tag
unified_capable
-x