nios2: remove support for legacy kernels

Change-Id: If16533a478e5cd10f924eb6abe4b25c9902733c5
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-11-03 13:18:26 -07:00
commit a62d4be68b
6 changed files with 5 additions and 121 deletions

View file

@ -1,10 +1,5 @@
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y += -I$(srctree)/kernel/unified/include
else
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
endif
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
ccflags-y += -I$(srctree)/arch/$(ARCH)/include
obj-y += reset.o irq_manage.o fatal.o swap.o thread.o \
cpu_idle.o irq_offload.o prep_c.o crt0.o \

View file

@ -25,9 +25,7 @@ GTEXT(_exception)
/* import */
GTEXT(_Fault)
GTEXT(_Swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload)
GTEXT(_offload_routine)
@ -129,7 +127,6 @@ on_irq_stack:
ori r10, r10, %lo(_nanokernel)
ldw r11, __tNANO_current_OFFSET(r10)
#ifdef CONFIG_KERNEL_V2
/* Determine whether the exception of the ISR requires context
* switch
*/
@ -145,24 +142,6 @@ on_irq_stack:
/* Call into the kernel to see if a scheduling decision is necessary */
call _is_next_thread_current
bne r2, zero, no_reschedule
#else
/* Determine whether the execution of the ISR requires a context
* switch. If the interrupted thread is PREEMPTIBLE (a task) and
* _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
*/
/* Check (_nanokernel.current->flags & PREEMPTIBLE), if not
* goto no_reschedule
*/
ldw r12, __tTCS_flags_OFFSET(r11)
movi r13, PREEMPTIBLE
and r12, r13, r12
beq r12, zero, no_reschedule
/* Check _nanokernel.fiber != NULL, if NULL goto no_reschedule */
ldw r11, __tNANO_fiber_OFFSET(r10)
beq r11, zero, no_reschedule
#endif /* CONFIG_KERNEL_V2 */
/*
* A context reschedule is required: keep the volatile registers of

View file

@ -230,18 +230,11 @@ FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
ARG_UNUSED(pEsf);
if ((curCtx != NANO_CTX_ISR) && !_is_thread_essential()) {
#ifdef CONFIG_MICROKERNEL
if (curCtx == NANO_CTX_TASK) {
extern FUNC_NORETURN void _TaskAbort(void);
printk("Fatal task error! Aborting task.\n");
#ifdef CONFIG_KERNEL_V2
k_thread_abort(_current);
#else
_TaskAbort();
#endif
} else
#endif /* CONFIG_MICROKERNEL */
{
} else {
printk("Fatal fiber error! Aborting fiber.\n");
fiber_abort();
}

View file

@ -25,10 +25,8 @@ GTEXT(_thread_entry_wrapper)
/* imports */
GTEXT(_sys_k_event_logger_context_switch)
#ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread)
GTEXT(_k_neg_eagain)
#endif
/* unsigned int _Swap(unsigned int key)
*
@ -65,13 +63,11 @@ SECTION_FUNC(exception.other, _Swap)
*/
stw r4, __tTCS_coopReg_OFFSET + __t_coop_key_OFFSET(r11)
#ifdef CONFIG_KERNEL_V2
/* Populate default return value */
movhi r5, %hi(_k_neg_eagain)
ori r5, r5, %lo(_k_neg_eagain)
ldw r4, (r5)
stw r4, __tTCS_coopReg_OFFSET + __t_coop_retval_OFFSET(r11)
#endif /* CONFIG_KERNEL_V2 */
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
call _sys_k_event_logger_context_switch
@ -82,7 +78,6 @@ SECTION_FUNC(exception.other, _Swap)
ori r10, r10, %lo(_nanokernel)
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
#ifdef CONFIG_KERNEL_V2
/* Assign to _nanokernel.current the return value of
* _get_next_ready_thread()
*/
@ -90,25 +85,6 @@ SECTION_FUNC(exception.other, _Swap)
movhi r10, %hi(_nanokernel)
ori r10, r10, %lo(_nanokernel)
stw r2, __tNANO_current_OFFSET(r10)
#else
/* Find the next context to run. Choose _nanokernel.fiber
* if non-NULL */
ldw r2, __tNANO_fiber_OFFSET(r10)
beq r2, zero, not_fiber
/* _nanokernel.fiber = _nanokernel.fiber->link */
ldw r14, __tTCS_link_OFFSET(r2)
stw r14, __tNANO_fiber_OFFSET(r10)
br next_chosen
not_fiber:
/* Fiber was NULL, we'll choose nanokernel.task */
ldw r2, __tNANO_task_OFFSET(r10)
next_chosen:
/* Set _nanokernel.current to value we chose for r2 */
stw r2, __tNANO_current_OFFSET(r10)
#endif /* CONFIG_KERNEL_V2 */
/* At this point r2 points to the next thread to be swapped in */

View file

@ -14,13 +14,8 @@
* limitations under the License.
*/
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> /* public kernel API */
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <nano_private.h>
#include <wait_q.h>
@ -93,7 +88,6 @@ void _new_thread(char *stack_memory, unsigned stack_size,
tcs = (struct tcs *)stack_memory;
tcs->prio = priority;
#ifdef CONFIG_KERNEL_V2
/* k_q_node initialized upon first insertion in a list */
tcs->flags = options | K_PRESTART;
tcs->sched_locked = 0;
@ -101,26 +95,13 @@ void _new_thread(char *stack_memory, unsigned stack_size,
/* static threads overwrite it afterwards with real value */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
#else
if (priority == -1) {
tcs->flags = PREEMPTIBLE | TASK;
} else {
tcs->flags = FIBER;
}
ARG_UNUSED(options);
tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */
#endif /* CONFIG_KERNEL_V2 */
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
tcs->custom_data = NULL;
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);
#endif
tcs->coopReg.sp = (uint32_t)iframe;
tcs->coopReg.ra = (uint32_t)_thread_entry_wrapper;
tcs->coopReg.key = NIOS2_STATUS_PIE_MSK;

View file

@ -39,20 +39,14 @@ extern "C" {
#include <arch/cpu.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> /* public kernel API */
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <stdint.h>
#include <misc/util.h>
#include <misc/dlist.h>
#endif
/* Bitmask definitions for the struct tcs->flags bit field */
#ifdef CONFIG_KERNEL_V2
#define K_STATIC 0x00000800
#define K_READY 0x00000000 /* Thread is ready to run */
@ -64,11 +58,6 @@ extern "C" {
#define K_DUMMY 0x00020000 /* Not a real thread */
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
K_DEAD | K_SUSPENDED | K_DUMMY)
#else
#define FIBER 0x000
#define TASK 0x001 /* 1 = task, 0 = fiber */
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
#endif
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
@ -127,7 +116,6 @@ struct preempt {
};
#ifdef CONFIG_KERNEL_V2
/* 'struct tcs_base' must match the beginning of 'struct tcs' */
struct tcs_base {
sys_dnode_t k_q_node;
@ -138,11 +126,9 @@ struct tcs_base {
struct _timeout timeout;
#endif
};
#endif
struct tcs {
#ifdef CONFIG_KERNEL_V2
sys_dnode_t k_q_node; /* node object in any kernel queue */
int flags;
int prio; /* thread priority used to sort linked list */
@ -150,68 +136,42 @@ struct tcs {
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
#else
struct tcs *link; /* node in singly-linked list
* _nanokernel.fibers
*/
uint32_t flags; /* bitmask of flags above */
int prio; /* fiber priority, -1 for a task */
#endif /* CONFIG_KERNEL_V2 */
struct preempt preempReg;
t_coop coopReg;
#ifdef CONFIG_ERRNO
int errno_var;
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_NANO_TIMEOUTS)
struct _nano_timeout nano_timeout;
#endif
#if defined(CONFIG_THREAD_MONITOR)
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
void *uk_task_ptr;
#endif
#ifdef CONFIG_THREAD_CUSTOM_DATA
void *custom_data; /* available for custom use */
#endif
#ifdef CONFIG_KERNEL_V2
atomic_t sched_locked;
void *init_data;
void (*fn_abort)(void);
#endif
};
#ifdef CONFIG_KERNEL_V2
struct ready_q {
struct k_thread *cache;
uint32_t prio_bmap[1];
sys_dlist_t q[K_NUM_PRIORITIES];
};
#endif
struct s_NANO {
#if !defined(CONFIG_KERNEL_V2)
struct tcs *fiber; /* singly linked list of runnable fibers */
struct tcs *task; /* current task the nanokernel knows about */
#endif
struct tcs *current; /* currently scheduled thread (fiber or task) */
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
sys_dlist_t timeout_q;
#ifndef CONFIG_KERNEL_V2
int32_t task_timeout;
#endif
#endif
#if defined(CONFIG_THREAD_MONITOR)
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
#endif
#ifdef CONFIG_KERNEL_V2
struct ready_q ready_q;
#endif
/* Nios II-specific members */
char *irq_sp; /* Interrupt stack pointer */
@ -238,11 +198,11 @@ static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber,
fiber->coopReg.retval = value;
}
#ifdef CONFIG_KERNEL_V2
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
static ALWAYS_INLINE void
_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
void *data)
@ -250,8 +210,8 @@ _set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
_set_thread_return_value(thread, value);
thread->swap_data = data;
}
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
#endif /* CONFIG_KERNEL_V2 */
static inline void _IntLibInit(void)