arc: remove support for legacy kernels

Change-Id: Id3d8285dc39428752758ee47fb1b826f05b1f3e0
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-11-03 13:11:13 -07:00
commit 8871cf0994
9 changed files with 12 additions and 231 deletions

View file

@ -1,9 +1,4 @@
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y += -I$(srctree)/kernel/unified/include
else
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
endif
ccflags-y +=-I$(srctree)/arch/$(ARCH)/include
obj-y += thread.o thread_entry_wrapper.o \

View file

@ -37,9 +37,7 @@ GTEXT(_firq_stack_setup)
#if CONFIG_RGF_NUM_BANKS != 1
GDATA(_firq_stack)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
SECTION_VAR(NOINIT, _firq_stack)
.space CONFIG_FIRQ_STACK_SIZE
@ -147,7 +145,6 @@ SECTION_FUNC(TEXT, _firq_exit)
#endif
.balign 4
#ifdef CONFIG_KERNEL_V2
_firq_check_for_swap:
/* coop thread ? do not schedule */
ld_s r0, [r2, __tTCS_prio_OFFSET]
@ -178,20 +175,6 @@ _firq_check_for_swap:
#endif
breq r0, 0, _firq_reschedule
/* fall to no rescheduling */
#else
_firq_check_for_swap:
/* Check if the current is a task */
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz _check_if_a_fiber_is_ready
_firq_return
.balign 4
_check_if_a_fiber_is_ready:
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne r0, 0, _firq_reschedule
/* fall to no rescheduling */
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_firq_no_reschedule:
@ -262,7 +245,6 @@ _firq_reschedule:
st _CAUSE_FIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
#ifdef CONFIG_KERNEL_V2
/*
* Save needed registers to callee saved ones. It is faster than
* pushing them to stack. It is possible to do since program has
@ -276,13 +258,6 @@ _firq_reschedule:
mov_s r1, r14
mov_s r2, r0
st_s r2, [r1, __tNANO_current_OFFSET]
#else
ld_s r2, [r1, __tNANO_fiber_OFFSET]
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
/* Use stack top and down registers from restored context */

View file

@ -148,17 +148,6 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
pop_s r2
#endif
#ifndef CONFIG_MICROKERNEL
st sp, [saved_stack_pointer]
#if CONFIG_RGF_NUM_BANKS == 1
mov_s sp, _exception_stack
add sp, sp, 512
#else
mov_s sp, _firq_stack
add sp, sp, CONFIG_FIRQ_STACK_SIZE
#endif
#endif
/* save caller saved registers */
_create_irq_stack_frame
@ -169,7 +158,6 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
jl _irq_do_offload
#ifdef CONFIG_MICROKERNEL
mov_s r1, _nanokernel
ld_s r2, [r1, __tNANO_current_OFFSET]
#if CONFIG_NUM_IRQ_PRIO_LEVELS > 1
@ -189,7 +177,6 @@ _trap_return:
#endif
.balign 4
#ifdef CONFIG_KERNEL_V2
_trap_check_for_swap:
/* coop thread ? do not schedule */
ld_s r0, [r2, __tTCS_prio_OFFSET]
@ -208,32 +195,12 @@ _trap_check_for_swap:
pop_s r1
pop_s r2
brne r0, 0, _trap_return
/* go on to _trap_reschedule by default */
#else
_trap_check_for_swap:
/* Check if the current is a task */
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bnz _e_check_if_a_fiber_is_ready
b _trap_return
.balign 4
_e_check_if_a_fiber_is_ready:
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
brne r0, 0, _trap_reschedule
b _trap_return
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_trap_reschedule:
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, __tTCS_relinquish_cause_OFFSET]
/* note: Ok to use _CAUSE_RIRQ since everything is saved */
#ifdef CONFIG_KERNEL_V2
/*
* Save needed registers to callee saved ones. It is faster than
* pushing registers to stack. It is possible to do since program has
@ -249,13 +216,6 @@ _trap_reschedule:
mov_s r0, r14
mov_s blink, r13
st_s r2, [r1, __tNANO_current_OFFSET]
#else
ld_s r2, [r1, __tNANO_fiber_OFFSET]
st_s r2, [r1, __tNANO_current_OFFSET]
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#endif /* CONFIG_KERNEL_V2 */
/* clear AE bit to forget this was an exception */
lr r3, [_ARC_V2_STATUS32]
@ -268,15 +228,5 @@ _trap_reschedule:
/* Assumption: r2 has current thread */
b _rirq_common_interrupt_swap
#else
/* Nanokernel-only just returns from exception */
/* if _Fault returns, restore the registers */
_pop_irq_stack_frame
/* now restore the stack */
ld sp,[saved_stack_pointer]
rtie
#endif
#endif /* CONFIG_IRQ_OFFLOAD */

View file

@ -42,9 +42,7 @@ SECTION_VAR(BSS, saved_r0)
.word 0
#endif
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
GTEXT(_power_save_idle_exit)
#elif defined(CONFIG_KERNEL_V2) && defined(CONFIG_SYS_POWER_MANAGEMENT)
#if defined(CONFIG_SYS_POWER_MANAGEMENT)
GTEXT(_sys_power_save_idle_exit)
#endif
@ -311,17 +309,7 @@ GTEXT(_sys_k_event_logger_interrupt)
#define log_interrupt_k_event
#endif
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
.macro exit_tickless_idle
clri r0 /* do not interrupt exiting tickless idle operations */
push_s r0
push_s blink
jl _power_save_idle_exit
pop_s blink
pop_s r0
seti r0
.endm
#elif defined(CONFIG_KERNEL_V2) && defined(CONFIG_SYS_POWER_MANAGEMENT)
#if defined(CONFIG_SYS_POWER_MANAGEMENT)
.macro exit_tickless_idle
clri r0 /* do not interrupt exiting tickless idle operations */
push_s r1

View file

@ -35,9 +35,7 @@
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
GTEXT(_rirq_common_interrupt_swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
#if 0 /* TODO: when FIRQ is not present, all would be regular */
#define NUM_REGULAR_IRQ_PRIO_LEVELS CONFIG_NUM_IRQ_PRIO_LEVELS
@ -131,7 +129,6 @@ SECTION_FUNC(TEXT, _rirq_exit)
* b) needs to load it to restore the interrupted context.
*/
#ifdef CONFIG_KERNEL_V2
/* coop thread ? do not schedule */
ld_s r0, [r2, __tTCS_prio_OFFSET]
cmp_s r0, 0
@ -165,17 +162,6 @@ SECTION_FUNC(TEXT, _rirq_exit)
pop_s blink
pop_s r1
pop_s r2
#else
ld_s r0, [r2, __tTCS_flags_OFFSET]
and.f r0, r0, PREEMPTIBLE
bz.d _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
ld_s r0, [r1, __tNANO_fiber_OFFSET] /* incoming fiber in r0 */
cmp r0, 0
bz.d _rirq_no_reschedule
ld sp, [r2, __tTCS_preempReg_OFFSET + __tPreempt_sp_OFFSET]
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_rirq_reschedule:
@ -188,11 +174,8 @@ _rirq_reschedule:
/* incoming fiber is in r0: it becomes the new 'current' */
mov r2, r0
st_s r2, [r1, __tNANO_current_OFFSET]
#if !defined(CONFIG_KERNEL_V2)
ld_s r3, [r2, __tTCS_link_OFFSET]
st_s r3, [r1, __tNANO_fiber_OFFSET]
#endif
.balign 4
_rirq_common_interrupt_swap:
/* r2 contains pointer to new thread */

View file

@ -34,12 +34,8 @@
#include "swap_macros.h"
GTEXT(_Swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread)
GDATA(_k_neg_eagain)
#endif
GDATA(_nanokernel)
/**
@ -85,7 +81,6 @@ SECTION_FUNC(TEXT, _Swap)
st_s r0, [r2, __tTCS_intlock_key_OFFSET]
st _CAUSE_COOP, [r2, __tTCS_relinquish_cause_OFFSET]
#ifdef CONFIG_KERNEL_V2
/*
* Carve space for the return value. Setting it to a defafult of
* -EAGAIN eliminates the need for the timeout code to set it.
@ -94,7 +89,6 @@ SECTION_FUNC(TEXT, _Swap)
*/
ld r3, [_k_neg_eagain]
st_s r3, [r2, __tTCS_return_value_OFFSET]
#endif /* CONFIG_KERNEL_V2 */
/*
* Save status32 and blink on the stack before the callee-saved registers.
@ -113,7 +107,6 @@ SECTION_FUNC(TEXT, _Swap)
/* find out incoming thread (fiber or task) */
#ifdef CONFIG_KERNEL_V2
/*
* Save needed registers to callee saved ones. It is faster than
* pushing them to stack. It is possible to do since program has
@ -128,36 +121,8 @@ SECTION_FUNC(TEXT, _Swap)
mov_s r1, r15
mov_s r0, r14
mov_s blink, r13
#else
ld_s r2, [r1, __tNANO_fiber_OFFSET]
breq r2, 0, _swap_to_the_task
.balign 4
_swap_to_a_fiber:
ld_s r3, [r2, __tTCS_link_OFFSET]
b.d _finish_swapping_to_thread /* always execute delay slot */
st_s r3, [r1, __tNANO_fiber_OFFSET] /* delay slot */
.balign 4
_swap_to_the_task:
ld_s r2, [r1, __tNANO_task_OFFSET]
/* fall through */
#endif /* CONFIG_KERNEL_V2 */
.balign 4
_finish_swapping_to_thread:
/* entering here, r2 contains the new current thread */
#if 0 && !defined(CONFIG_KERNEL_V2)
/* don't save flags in tNANO: slower, error-prone, and might not even give
* a speed boost where it's supposed to */
ld_s r3, [r2, __tTCS_flags_OFFSET]
st_s r3, [r1, __tNANO_flags_OFFSET]
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
/* Use stack top and down registers from restored context */
add r3, r2, __tTCS_NOFLOAT_SIZEOF

View file

@ -24,10 +24,7 @@
#include <nanokernel.h>
#include <toolchain.h>
#include <sections.h>
#ifdef CONFIG_KERNEL_V2
#include <nano_private.h>
#endif
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
@ -36,25 +33,6 @@
#define PRINTK(...)
#endif
#ifdef CONFIG_MICROKERNEL
static inline void nonEssentialTaskAbort(void)
{
PRINTK("Fatal fault in task ! Aborting task.\n");
#if defined(CONFIG_KERNEL_V2)
k_thread_abort(_current);
#else
extern void _TaskAbort(void);
_TaskAbort();
#endif
}
#define NON_ESSENTIAL_TASK_ABORT() nonEssentialTaskAbort()
#else
#define NON_ESSENTIAL_TASK_ABORT() \
do {/* nothing */ \
} while ((0))
#endif
/**
*
* @brief Fatal error handler
@ -98,5 +76,6 @@ void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF * pEsf)
return;
}
NON_ESSENTIAL_TASK_ABORT();
PRINTK("Fatal fault in task ! Aborting task.\n");
k_thread_abort(_current);
}

View file

@ -124,7 +124,6 @@ void _new_thread(char *pStackMem, unsigned stackSize,
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
#endif
#ifdef CONFIG_KERNEL_V2
/* k_q_node initialized upon first insertion in a list */
tcs->flags = options | K_PRESTART;
tcs->sched_locked = 0;
@ -132,11 +131,6 @@ void _new_thread(char *pStackMem, unsigned stackSize,
/* static threads overwrite them afterwards with real values */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
#else
tcs->link = NULL;
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
ARG_UNUSED(options);
#endif
tcs->prio = priority;
#ifdef CONFIG_THREAD_CUSTOM_DATA
@ -153,11 +147,7 @@ void _new_thread(char *pStackMem, unsigned stackSize,
tcs->entry = (struct __thread_entry *)(pInitCtx);
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);
#endif
/*
* intlock_key is constructed based on ARCv2 ISA Programmer's

View file

@ -40,13 +40,8 @@ extern "C" {
#include <vector_table.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
#include <kernel.h>
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <stdint.h>
#include <misc/util.h>
#include <misc/dlist.h>
@ -139,7 +134,6 @@ typedef struct callee_saved tCalleeSaved;
/* Bitmask definitions for the struct tcs->flags bit field */
#ifdef CONFIG_KERNEL_V2
#define K_STATIC 0x00000800
#define K_READY 0x00000000 /* Thread is ready to run */
@ -151,15 +145,6 @@ typedef struct callee_saved tCalleeSaved;
#define K_DUMMY 0x00020000 /* Not a real thread */
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
K_DEAD | K_SUSPENDED | K_DUMMY)
#else
#define FIBER 0x000
#define TASK 0x001 /* 1 = task, 0 = fiber */
#define INT_ACTIVE 0x002 /* 1 = executing context is interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = executing context is exception handler */
#define PREEMPTIBLE 0x020 /* 1 = preemptible thread */
#endif
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
#define NO_METRICS 0x400 /* 1 = _Swap() not to update task metrics */
@ -182,7 +167,6 @@ typedef struct callee_saved tCalleeSaved;
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
/* 'struct tcs_base' must match the beginning of 'struct tcs' */
struct tcs_base {
sys_dnode_t k_q_node;
@ -193,10 +177,8 @@ struct tcs_base {
struct _timeout timeout;
#endif
};
#endif
struct tcs {
#ifdef CONFIG_KERNEL_V2
sys_dnode_t k_q_node; /* node object in any kernel queue */
uint32_t flags;
int prio;
@ -204,13 +186,6 @@ struct tcs {
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
#else
struct tcs *link; /* node in singly-linked list
* _nanokernel.fibers
*/
uint32_t flags; /* bitmask of flags above */
int prio; /* fiber priority, -1 for a task */
#endif /* CONFIG_KERNEL_V2 */
uint32_t intlock_key; /* interrupt key when relinquishing control */
int relinquish_cause; /* one of the _CAUSE_xxxx definitions above */
unsigned int return_value;/* return value from _Swap */
@ -223,38 +198,24 @@ struct tcs {
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_NANO_TIMEOUTS)
struct _nano_timeout nano_timeout;
#endif
#ifdef CONFIG_ERRNO
int errno_var;
#endif
#ifdef CONFIG_ARC_STACK_CHECKING
uint32_t stack_top;
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
void *uk_task_ptr;
#endif
#ifdef CONFIG_KERNEL_V2
atomic_t sched_locked;
void *init_data;
void (*fn_abort)(void);
#endif
};
#ifdef CONFIG_KERNEL_V2
struct ready_q {
struct k_thread *cache;
uint32_t prio_bmap[1];
sys_dlist_t q[K_NUM_PRIORITIES];
};
#endif
struct s_NANO {
#if !defined(CONFIG_KERNEL_V2)
struct tcs *fiber; /* singly linked list of runnable fibers */
struct tcs *task; /* current task the nanokernel knows about */
#endif
struct tcs *current; /* currently scheduled thread (fiber or task) */
#ifdef CONFIG_THREAD_MONITOR
@ -278,13 +239,8 @@ struct s_NANO {
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
sys_dlist_t timeout_q;
#ifndef CONFIG_KERNEL_V2
int32_t task_timeout;
#endif
#endif
#ifdef CONFIG_KERNEL_V2
struct ready_q ready_q;
#endif
};
typedef struct s_NANO tNANO;
@ -316,11 +272,11 @@ static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber, unsigned int value
fiber->return_value = value;
}
#ifdef CONFIG_KERNEL_V2
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
static ALWAYS_INLINE void
_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
void *data)
@ -328,8 +284,8 @@ _set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
_set_thread_return_value(thread, value);
thread->swap_data = data;
}
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
#endif /* CONFIG_KERNEL_V2 */
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
/**
*