arm: remove support for legacy kernels

Change-Id: I93c2dd6bf7286f50cb2702a94cbc85dc3bdee807
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-11-03 13:00:41 -07:00 committed by Kumar Gala
commit 3a6bd2a552
10 changed files with 31 additions and 318 deletions

View file

@ -1,10 +1,4 @@
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y += -I$(srctree)/kernel/unified/include
else
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
endif
asflags-y := ${ccflags-y}
obj-y = exc_exit.o irq_init.o \
@ -12,14 +6,7 @@ obj-y = exc_exit.o irq_init.o \
fault.o \
irq_manage.o thread.o cpu_idle.o \
fault_s.o isr_wrapper.o \
fatal.o sys_fatal_error_handler.o
ifeq ($(CONFIG_KERNEL_V2),y)
obj-y += thread_abort.o
else
obj-y += fiber_abort.o
obj-$(CONFIG_MICROKERNEL) += task_abort.o
endif
fatal.o sys_fatal_error_handler.o thread_abort.o
obj-$(CONFIG_GDB_INFO) += gdb_stub_irq_vector_table.o gdb_stub.o
obj-$(CONFIG_CPLUSPLUS) += __aeabi_atexit.o

View file

@ -1,10 +1,6 @@
ccflags-y +=-I$(srctree)/include/drivers
ccflags-y +=-I$(srctree)/arch/arm/soc/$(SOC_PATH)
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y +=-I$(srctree)/kernel/unified/include
else
ccflags-y +=-I$(srctree)/kernel/nanokernel/include
endif
asflags-y = $(ccflags-y)

View file

@ -36,10 +36,7 @@ _ASM_FILE_PROLOGUE
GTEXT(_ExcExit)
GTEXT(_IntExit)
GDATA(_nanokernel)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
#if CONFIG_GDB_INFO
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
@ -93,44 +90,28 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r1, =_nanokernel
#ifdef CONFIG_KERNEL_V2
ldr r1, [r1, #__tNANO_current_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET]
ldr r2, [r1, #__tTCS_prio_OFFSET]
ldr r3, [r1, #__tTCS_sched_locked_OFFSET]
ldr r2, [r1, #__tTCS_prio_OFFSET]
ldr r3, [r1, #__tTCS_sched_locked_OFFSET]
/* coop thread ? do not schedule */
cmp r2, #0
blt _EXIT_EXC
/* scheduler locked ? do not schedule */
cmp r3, #0
bgt _EXIT_EXC
push {lr}
blx _is_next_thread_current
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
pop {r1}
mov lr, r1
#else
pop {lr}
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
cmp r0, #0
bne _EXIT_EXC
#else
/* is the current thread preemptible (task) ? */
ldr r2, [r1, #__tNANO_flags_OFFSET]
ldr r3, =PREEMPTIBLE
ands r2, r3
_EXIT_EXC_IF_FIBER_PREEMPTED
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
/* coop thread ? do not schedule */
cmp r2, #0
_EXIT_EXC_IF_FIBER_NOT_READY
blt _EXIT_EXC
#endif
/* scheduler locked ? do not schedule */
cmp r3, #0
bgt _EXIT_EXC
push {lr}
blx _is_next_thread_current
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
pop {r1}
mov lr, r1
#else
pop {lr}
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
cmp r0, #0
bne _EXIT_EXC
/* context switch required, pend the PendSV exception */
ldr r1, =_SCS_ICSR

View file

@ -1,61 +0,0 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARM Cortex-M fiber_abort() routine
*
* The ARM Cortex-M architecture provides its own fiber_abort() to deal with
* different CPU modes (handler vs thread) when a fiber aborts. When its entry
* point returns or when it aborts itself, the CPU is in thread mode and must
* call _Swap() (which triggers a service call), but when in handler mode, the
* CPU must exit handler mode to cause the context switch, and thus must queue
* the PendSV exception.
*/
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
#include <micro_private_types.h>
#endif
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
#include <nanokernel.h>
#include <arch/cpu.h>
/**
*
* @brief Abort the currently executing fiber
*
* Possible reasons for a fiber aborting:
*
* - the fiber explicitly aborts itself by calling this routine
* - the fiber implicitly aborts by returning from its entry point
* - the fiber encounters a fatal exception
*
* @return N/A
*/
void fiber_abort(void)
{
_thread_monitor_exit(_nanokernel.current);
if (_ScbIsInThreadMode()) {
_nano_fiber_swap();
} else {
_ScbPendsvSet();
}
}

View file

@ -39,9 +39,6 @@
/* ARM-specific tNANO structure member offsets */
#if !defined(CONFIG_KERNEL_V2)
GEN_OFFSET_SYM(tNANO, flags);
#endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT
GEN_OFFSET_SYM(tNANO, idle);
#endif

View file

@ -36,10 +36,8 @@ GTEXT(_Swap)
GTEXT(__svc)
#endif
GTEXT(__pendsv)
#ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread)
GDATA(_k_neg_eagain)
#endif
GDATA(_nanokernel)
@ -122,45 +120,14 @@ SECTION_FUNC(TEXT, __pendsv)
/* find out incoming thread (fiber or task) */
#ifdef CONFIG_KERNEL_V2
mov.n v2, lr
movs.n v1, r1
blx _get_next_ready_thread
movs.n r1, v1
mov.n lr, v2
movs.n r2, r0
#else
/* is there a fiber ready ? */
ldr r2, [r1, #__tNANO_fiber_OFFSET]
cmp r2, #0
/*
* if so, remove fiber from list
* else, the task is the thread we're switching in
*/
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* branch over remove if eq otherwise we branch over switch */
beq _switch_in_task
ldr r0, [r2, #__tTCS_link_OFFSET] /* then */
str r0, [r1, #__tNANO_fiber_OFFSET] /* then */
bne _switch_in_task_endif
_switch_in_task:
ldr r2, [r1, #__tNANO_task_OFFSET] /* else */
_switch_in_task_endif:
#else /* CONFIG_CPU_CORTEX_M3_M4 */
itte ne
ldrne.w r0, [r2, #__tTCS_link_OFFSET] /* then */
strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */
ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
#endif /* CONFIG_KERNEL_V2 */
/* r2 contains the new thread */
#if !defined(CONFIG_KERNEL_V2)
ldr r0, [r2, #__tTCS_flags_OFFSET]
str r0, [r1, #__tNANO_flags_OFFSET]
#endif
str r2, [r1, #__tNANO_current_OFFSET]
/*
@ -271,7 +238,6 @@ SECTION_FUNC(TEXT, __svc)
_context_switch:
#endif
#if CONFIG_KERNEL_V2
/*
* Set _Swap()'s default return code to -EAGAIN. This eliminates the
* need for the timeout code to invoke fiberRtnValueSet().
@ -281,7 +247,6 @@ _context_switch:
ldr r3, =_k_neg_eagain
ldr r3, [r3, #0]
str r3, [r2, #__tESF_a1_OFFSET]
#endif
/*
* Unlock interrupts:

View file

@ -22,13 +22,10 @@
* platforms.
*/
#include <nanokernel.h>
#include <kernel.h>
#include <toolchain.h>
#include <sections.h>
#ifdef CONFIG_KERNEL_V2
#include <nano_private.h>
#endif
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
@ -37,25 +34,6 @@
#define PRINTK(...)
#endif
#ifdef CONFIG_MICROKERNEL
static inline void nonEssentialTaskAbort(void)
{
PRINTK("Fatal fault in task ! Aborting task.\n");
#if defined(CONFIG_KERNEL_V2)
k_thread_abort(_current);
#else
extern void _TaskAbort(void);
_TaskAbort();
#endif
}
#define NON_ESSENTIAL_TASK_ABORT() nonEssentialTaskAbort()
#else
#define NON_ESSENTIAL_TASK_ABORT() \
do {/* nothing */ \
} while ((0))
#endif
/**
*
* @brief Fatal error handler
@ -99,5 +77,7 @@ void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF * pEsf)
return;
}
NON_ESSENTIAL_TASK_ABORT();
PRINTK("Fatal fault in task ! Aborting task.\n");
k_thread_abort(_current);
}

View file

@ -1,70 +0,0 @@
/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARM Cortex-M _TaskAbort() routine
*
* The ARM Cortex-M architecture provides its own _TaskAbort() to deal with
* different CPU modes (handler vs thread) when a task aborts. When its entry
* point returns or when it aborts itself, the CPU is in thread mode and must
* call the equivalent of task_abort(<self>), but when in handler mode, the
* CPU must queue a packet to _k_server(), then exit handler mode to queue the
* PendSV exception and cause the immediate context switch to _k_server.
*/
#ifdef CONFIG_MICROKERNEL
#include <toolchain.h>
#include <sections.h>
#include <micro_private.h>
#include <nano_private.h>
#include <microkernel.h>
#include <nanokernel.h>
#include <misc/__assert.h>
static struct k_args cmd_packet;
/**
*
* @brief Abort the current task
*
* Possible reasons for a task aborting:
*
* - the task explicitly aborts itself by calling this routine
* - the task implicitly aborts by returning from its entry point
* - the task encounters a fatal exception
*
* @return N/A
*/
void _TaskAbort(void)
{
const int taskAbortCode = 1;
if (_ScbIsInThreadMode()) {
_task_ioctl(_k_current_task->id, taskAbortCode);
} else {
cmd_packet.Comm = _K_SVC_TASK_OP;
cmd_packet.args.g1.task = _k_current_task->id;
cmd_packet.args.g1.opt = taskAbortCode;
cmd_packet.alloc = false;
_k_current_task->args = &cmd_packet;
nano_isr_stack_push(&_k_command_stack, (uint32_t) &cmd_packet);
_ScbPendsvSet();
}
}
#endif /* CONFIG_MICROKERNEL */

View file

@ -112,7 +112,6 @@ void _new_thread(char *pStackMem, unsigned stackSize,
pInitCtx->xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
#ifdef CONFIG_KERNEL_V2
/* k_q_node initialized upon first insertion in a list */
tcs->flags = options | K_PRESTART;
tcs->sched_locked = 0;
@ -120,11 +119,6 @@ void _new_thread(char *pStackMem, unsigned stackSize,
/* static threads overwrite it afterwards with real value */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
#else
tcs->link = NULL;
tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER;
ARG_UNUSED(options);
#endif
tcs->prio = priority;
#ifdef CONFIG_THREAD_CUSTOM_DATA
@ -141,11 +135,7 @@ void _new_thread(char *pStackMem, unsigned stackSize,
tcs->entry = (struct __thread_entry *)(pInitCtx);
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);
#endif
tcs->preempReg.psp = (uint32_t)pInitCtx;
tcs->basepri = 0;

View file

@ -39,12 +39,8 @@ extern "C" {
#include <arch/cpu.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
#include <kernel.h>
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <stdint.h>
#include <misc/dlist.h>
#include <atomic.h>
@ -99,7 +95,6 @@ typedef struct preempt tPreempt;
/* Bitmask definitions for the struct tcs.flags bit field */
#ifdef CONFIG_KERNEL_V2
#define K_STATIC 0x00000800
#define K_READY 0x00000000 /* Thread is ready to run */
@ -111,18 +106,6 @@ typedef struct preempt tPreempt;
#define K_DUMMY 0x00020000 /* Not a real thread */
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
K_DEAD | K_SUSPENDED | K_DUMMY)
#else
#define FIBER 0x000
#define TASK 0x001 /* 1 = task, 0 = fiber */
#define INT_ACTIVE 0x002 /* 1 = executino context is interrupt handler */
#define EXC_ACTIVE 0x004 /* 1 = executino context is exception handler */
#define PREEMPTIBLE \
0x020 /* 1 = preemptible thread \
* NOTE: the value must be < 0x100 to be able to \
* use a small thumb instr with immediate \
* when loading PREEMPTIBLE in a GPR \
*/
#endif
#define USE_FP 0x010 /* 1 = thread uses floating point unit */
#define K_ESSENTIAL 0x200 /* 1 = system thread that must not abort */
@ -161,7 +144,6 @@ struct preemp_float {
};
#endif
#if CONFIG_KERNEL_V2
/* 'struct tcs_base' must match the beginning of 'struct tcs' */
struct tcs_base {
sys_dnode_t k_q_node;
@ -172,21 +154,14 @@ struct tcs_base {
struct _timeout timeout;
#endif
};
#endif
struct tcs {
#if CONFIG_KERNEL_V2
sys_dnode_t k_q_node; /* node object in any kernel queue */
uint32_t flags;
int prio;
void *swap_data;
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
#else
struct tcs *link; /* singly-linked list in _nanokernel.fibers */
uint32_t flags;
int prio;
#endif
uint32_t basepri;
#ifdef CONFIG_THREAD_CUSTOM_DATA
@ -198,22 +173,12 @@ struct tcs {
struct __thread_entry *entry; /* thread entry and parameters description */
struct tcs *next_thread; /* next item in list of ALL fiber+tasks */
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_NANO_TIMEOUTS)
struct _nano_timeout nano_timeout;
#endif
#ifdef CONFIG_ERRNO
int errno_var;
#endif
#if !defined(CONFIG_KERNEL_V2)
#ifdef CONFIG_MICROKERNEL
void *uk_task_ptr;
#endif
#endif
#ifdef CONFIG_KERNEL_V2
atomic_t sched_locked;
void *init_data;
void (*fn_abort)(void);
#endif
#ifdef CONFIG_FLOAT
/*
* No cooperative floating point register set structure exists for
@ -224,23 +189,14 @@ struct tcs {
#endif
};
#ifdef CONFIG_KERNEL_V2
struct ready_q {
struct k_thread *cache;
uint32_t prio_bmap[1];
sys_dlist_t q[K_NUM_PRIORITIES];
};
#endif
struct s_NANO {
#if !defined(CONFIG_KERNEL_V2)
struct tcs *fiber; /* singly linked list of runnable fiber */
struct tcs *task; /* pointer to runnable task */
#endif
struct tcs *current; /* currently scheduled thread (fiber or task) */
#if !defined(CONFIG_KERNEL_V2)
int flags; /* struct tcs->flags of 'current' thread */
#endif
#if defined(CONFIG_THREAD_MONITOR)
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
@ -256,13 +212,8 @@ struct s_NANO {
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
sys_dlist_t timeout_q;
#ifndef CONFIG_KERNEL_V2
int32_t task_timeout;
#endif
#endif
#ifdef CONFIG_KERNEL_V2
struct ready_q ready_q;
#endif
};
typedef struct s_NANO tNANO;
@ -275,9 +226,6 @@ extern void _FaultInit(void);
extern void _CpuIdleInit(void);
static ALWAYS_INLINE void nanoArchInit(void)
{
#if !defined(CONFIG_KERNEL_V2)
_nanokernel.flags = FIBER;
#endif
_InterruptStackSetup();
_ExcSetup();
_FaultInit();
@ -305,11 +253,11 @@ static ALWAYS_INLINE void fiberRtnValueSet(struct tcs *fiber,
pEsf->a1 = value;
}
#ifdef CONFIG_KERNEL_V2
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
#define _set_thread_return_value fiberRtnValueSet
static ALWAYS_INLINE void
_set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
void *data)
@ -317,8 +265,8 @@ _set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
_set_thread_return_value(thread, value);
thread->swap_data = data;
}
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
#endif /* CONFIG_KERNEL_V2 */
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
extern void nano_cpu_atomic_idle(unsigned int);