x86: remove legacy kernel support

Change-Id: I81111a58d1305bd521ea93adc40c66b43f20977c
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-11-03 12:47:32 -07:00
commit 6e172b8abd
8 changed files with 2 additions and 360 deletions

View file

@ -1,9 +1,4 @@
ifeq ($(CONFIG_KERNEL_V2),y)
ccflags-y += -I$(srctree)/kernel/unified/include
else
ccflags-y += -I$(srctree)/kernel/nanokernel/include
ccflags-y += -I$(srctree)/kernel/microkernel/include
endif
ifeq ($(COMPILER)$(CONFIG_X86_IAMCU),clang)
# We rely on GAS for assembling, so don't use the integrated assembler
@ -23,9 +18,6 @@ obj-y += cpuhalt.o \
obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o
obj-$(CONFIG_FP_SHARING) += float.o
ifneq ($(CONFIG_KERNEL_V2),y)
obj-$(CONFIG_MICROKERNEL) += strtask.o
endif
obj-$(CONFIG_GDT_DYNAMIC) += gdt.o
obj-$(CONFIG_REBOOT_RST_CNT) += reboot_rst_cnt.o

View file

@ -76,13 +76,6 @@
* system to enable FP resource sharing on its behalf.
*/
#if !defined(CONFIG_KERNEL_V2)
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
#include <micro_private_types.h>
#endif /* CONFIG_MICROKERNEL */
#endif
#include <nano_private.h>
#include <toolchain.h>
#include <asm_inline.h>
@ -267,7 +260,6 @@ void _FpEnable(struct tcs *tcs, unsigned int options)
irq_unlock(imask);
}
#ifdef CONFIG_KERNEL_V2
/**
*
* @brief Enable preservation of non-integer context information
@ -280,33 +272,6 @@ void _FpEnable(struct tcs *tcs, unsigned int options)
* @return N/A
*/
FUNC_ALIAS(_FpEnable, k_float_enable, void);
#else
/**
*
* @brief Enable preservation of non-integer context information
*
* This routine allows a fiber to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers.
*
* See the description of _FpEnable() for further details.
*
* @return N/A
*/
FUNC_ALIAS(_FpEnable, fiber_float_enable, void);
/**
*
* @brief Enable preservation of non-integer context information
*
* This routine allows a task to permit a task/fiber (including itself) to
* safely share the system's floating point registers with other tasks/fibers.
*
* See the description of _FpEnable() for further details.
*
* @return N/A
*/
FUNC_ALIAS(_FpEnable, task_float_enable, void);
#endif /* CONFIG_KERNEL_V2 */
/**
*
@ -367,7 +332,6 @@ void _FpDisable(struct tcs *tcs)
irq_unlock(imask);
}
#ifdef CONFIG_KERNEL_V2
/**
*
* @brief Disable preservation of non-integer context information
@ -383,40 +347,6 @@ void _FpDisable(struct tcs *tcs)
* @return N/A
*/
FUNC_ALIAS(_FpDisable, k_float_disable, void);
#else
/**
*
* @brief Disable preservation of non-integer context
*information
*
* This routine allows a fiber to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other
* tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* @return N/A
*/
FUNC_ALIAS(_FpDisable, fiber_float_disable, void);
/**
*
* @brief Disable preservation of non-integer context information
*
* This routine allows a task to disallow a task/fiber (including itself) from
* safely sharing any of the system's floating point registers with other
* tasks/fibers.
*
* WARNING
* This routine should only be used to disable floating point support for
* a task/fiber that currently has such support enabled.
*
* @return N/A
*/
FUNC_ALIAS(_FpDisable, task_float_disable, void);
#endif /* CONFIG_KERNEL_V2 */
/**
*

View file

@ -42,10 +42,7 @@
/* externs */
GTEXT(_Swap)
#ifdef CONFIG_KERNEL_V2
GTEXT(_is_next_thread_current)
#endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
@ -289,7 +286,6 @@ alreadyOnIntStack:
movl __tNANO_current_OFFSET (%ecx), %edx
#ifdef CONFIG_KERNEL_V2
/*
* Determine whether the execution of the ISR requires a context
* switch. If the thread is preemptible, scheduler is not locked and
@ -309,18 +305,6 @@ alreadyOnIntStack:
call _is_next_thread_current
testl %eax, %eax
jnz noReschedule
#else
/*
* Determine whether the execution of the ISR requires a context
* switch. If the interrupted thread is PREEMPTIBLE (a task) and
* _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
*/
testl $PREEMPTIBLE, __tTCS_flags_OFFSET(%edx)
je noReschedule
cmpl $0, __tNANO_fiber_OFFSET (%ecx)
je noReschedule
#endif
/*
* Set the INT_ACTIVE bit in the tTCS to allow the upcoming call to
@ -334,13 +318,11 @@ alreadyOnIntStack:
*/
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
#ifdef CONFIG_KERNEL_V2
/*
* Reload _nanokernel.current as _is_next_thread_current()
* might have clobbered it.
*/
movl _nanokernel + __tNANO_current_OFFSET, %edx
#endif
orl $INT_ACTIVE, __tTCS_flags_OFFSET(%edx)
#endif

View file

@ -1,82 +0,0 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Intel nanokernel APIs to start a task
*
* Intel-specific parts of start_task(). Only FP functionality currently.
*/
#if !defined(CONFIG_KERNEL_V2)
#ifdef CONFIG_MICROKERNEL
#include <start_task_arch.h>
/*
* The following IA-32-specific task group is used for tasks that use SSE
* instructions. It is *not* formally reserved by SysGen for this purpose.
* See comments in thread.c regarding the use of SSE_GROUP, and comments
* in task.h regarding task groups reserved by SysGen.
*
* This identifier corresponds to the first user-defined task group.
* It must be updated if any changes are made to the reserved groups.
*/
#define SSE_GROUP 0x10
/**
* @brief Intel-specific parts of task initialization
*
* @param X pointer to task control block
* @param pOpt thread options container
*
* @return N/A
*/
void _StartTaskArch(struct k_task *X, unsigned int *pOpt)
{
/*
* The IA-32 nanokernel implementation uses the USE_FP bit in the
* struct tcs->flags structure as a "dirty bit". The USE_FP flag bit
* will be set whenever a thread uses any non-integer capability,
* whether it's just the x87 FPU capability, SSE instructions, or a
* combination of both. The USE_SSE flag bit will only be set if a
* thread uses SSE instructions.
*
* However, callers of fiber_fiber_start(), task_fiber_start(), or even
* _new_thread() don't need to follow the protocol used by the IA-32
* nanokernel w.r.t. managing the struct tcs->flags field. If a thread
* will be utilizing just the x87 FPU capability, then the USE_FP
* option bit is specified. If a thread will be utilizing SSE
* instructions (and possibly x87 FPU capability), then only the
* USE_SSE option bit needs to be specified.
*
* Likewise, the placement of tasks into "groups" doesn't need to follow
* the protocol used by the IA-32 nanokernel w.r.t. managing the
* struct tcs->flags field. If a task will utilize just the x87 FPU
* capability, then the task only needs to be placed in the FPU_GROUP
* group. If a task utilizes SSE instructions (and possibly x87 FPU
* capability), then the task only needs to be placed in the SSE_GROUP
* group.
*/
*pOpt |= (X->group & SSE_GROUP) ? USE_SSE
: (X->group & FPU_GROUP) ? USE_FP : 0;
}
#endif /* CONFIG_MICROKERNEL */
#endif

View file

@ -38,12 +38,8 @@
GTEXT(_Swap)
/* externs */
#ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread)
GDATA(_k_neg_eagain)
#endif
/**
*
@ -120,7 +116,6 @@ SECTION_FUNC(TEXT, _Swap)
pushl %ebx
pushl %ebp
#if CONFIG_KERNEL_V2
/*
* Carve space for the return value. Setting it to a defafult of
* -EAGAIN eliminates the need for the timeout code to set it.
@ -129,9 +124,6 @@ SECTION_FUNC(TEXT, _Swap)
*/
pushl _k_neg_eagain
#else
pushl %ebx
#endif
/* save esp into tTCS structure */
@ -143,37 +135,7 @@ SECTION_FUNC(TEXT, _Swap)
/* Register the context switch */
call _sys_k_event_logger_context_switch
#endif
#ifdef CONFIG_KERNEL_V2
call _get_next_ready_thread
#else
/*
* Determine what thread needs to be swapped in.
* Note that the %edi still contains &_nanokernel.
*/
movl __tNANO_fiber_OFFSET (%edi), %eax
testl %eax, %eax
jz swapTask /* Jump if no ready fibers */
/* remove the head 'TCS *' from the runnable fiber list */
movl __tTCS_link_OFFSET (%eax), %ebx
movl %ebx, __tNANO_fiber_OFFSET (%edi)
jmp restoreContext
/*
* There are no fiber in the run queue, thus swap in the task
* (_nanokernel.task). The 'task' field will _never_ be NULL.
*/
swapTask:
movl __tNANO_task_OFFSET (%edi), %eax
#endif
/* fall through to 'restoreContext' */
/*
* At this point, the %eax register contains the 'tTCS *' of
@ -181,8 +143,6 @@ swapTask:
* contains &_nanokernel.
*/
restoreContext:
#ifdef CONFIG_FP_SHARING
/*
* Clear the CR0[TS] bit (in the event the current thread

View file

@ -25,13 +25,10 @@
#include <nanokernel.h>
#include <toolchain.h>
#include <sections.h>
#ifdef CONFIG_KERNEL_V2
#include <nano_private.h> /* to get access to '_current' */
#endif
/* override PRINTK from nano_private.h */
#if defined(CONFIG_KERNEL_V2) && defined(PRINTK)
#ifdef PRINTK
#undef PRINTK
#endif
@ -71,18 +68,11 @@ FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
ARG_UNUSED(pEsf);
if ((curCtx != NANO_CTX_ISR) && !_is_thread_essential()) {
#ifdef CONFIG_MICROKERNEL
if (curCtx == NANO_CTX_TASK) {
extern FUNC_NORETURN void _TaskAbort(void);
PRINTK("Fatal task error! Aborting task.\n");
#ifdef CONFIG_KERNEL_V2
k_thread_abort(_current);
#else
_TaskAbort();
#endif
} else
#endif /* CONFIG_MICROKERNEL */
{
} else {
PRINTK("Fatal fiber error! Aborting fiber.\n");
fiber_abort();
}

View file

@ -22,12 +22,6 @@
* processor architecture.
*/
#if !defined(CONFIG_KERNEL_V2)
#ifdef CONFIG_MICROKERNEL
#include <microkernel.h>
#include <micro_private_types.h>
#endif /* CONFIG_MICROKERNEL */
#endif
#ifdef CONFIG_INIT_STACKS
#include <string.h>
#endif /* CONFIG_INIT_STACKS */
@ -98,7 +92,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
tcs->excNestCount = 0;
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
#ifdef CONFIG_KERNEL_V2
/* k_q_node initialized upon first insertion in a list */
#ifdef CONFIG_FP_SHARING
/* ensure USE_FP is set when USE_SSE is set */
@ -112,14 +105,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
/* static threads overwrite it afterwards with real value */
tcs->init_data = NULL;
tcs->fn_abort = NULL;
#else
if (priority == -1)
tcs->flags = PREEMPTIBLE | TASK;
else
tcs->flags = FIBER;
ARG_UNUSED(options);
tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */
#endif
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
@ -127,11 +112,7 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
tcs->custom_data = NULL;
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
tcs->uk_task_ptr = uk_task_ptr;
#else
ARG_UNUSED(uk_task_ptr);
#endif
/*
* The creation of the initial stack for the task has already been done.
@ -163,64 +144,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
tcs->coopReg.esp = (unsigned long)pInitialCtx;
PRINTK("\nInitial context ESP = 0x%x\n", tcs->coopReg.esp);
#ifndef CONFIG_KERNEL_V2
#ifdef CONFIG_FP_SHARING
/*
* Indicate if the thread is permitted to use floating point instructions.
*
* The first time the new thread is scheduled by _Swap() it is guaranteed
* to inherit an FPU that is in a "sane" state (if the most recent user of
* the FPU was cooperatively swapped out) or a completely "clean" state
* (if the most recent user of the FPU was pre-empted, or if the new thread
* is the first user of the FPU).
*
* The USE_FP flag bit is set in the struct tcs structure if a thread is
* authorized to use _any_ non-integer capability, whether it's the basic
* x87 FPU/MMX capability, SSE instructions, or a combination of both. The
* USE_SSE flag bit is set only if a thread can use SSE instructions.
*
* Note: Callers need not follow the aforementioned protocol when passing
* in thread options. It is legal for the caller to specify _only_ the
* USE_SSE option bit if a thread will be utilizing SSE instructions (and
* possibly x87 FPU/MMX instructions).
*/
/*
* Implementation Remark:
* Until SysGen reserves SSE_GROUP as 0x10, the following conditional is
* required so that at least systems configured with FLOAT will still operate
* correctly. The issue is that SysGen will utilize group 0x10 user-defined
* groups, and thus tasks placed in the user-defined group will have the
* SSE_GROUP (but not the FPU_GROUP) bit set. This results in both the USE_FP
* and USE_SSE bits being set in the struct tcs. For systems configured only with
* FLOAT, the setting of the USE_SSE is harmless, but the setting of USE_FP is
* wasteful. Thus to ensure that that systems configured only with FLOAT
* behave as expected, the USE_SSE option bit is ignored.
*
* Clearly, even with the following conditional, systems configured with
* SSE will not behave as expected since tasks may still be inadvertantly
* have the USE_SSE+USE_FP sets even though they are integer only.
*
* Once the generator tool has been updated to reserve the SSE_GROUP, the
* correct code to use is:
*
* options &= USE_FP | USE_SSE;
*
*/
#ifdef CONFIG_SSE
options &= USE_FP | USE_SSE;
#else
options &= USE_FP;
#endif
if (options != 0) {
tcs->flags |= (options | USE_FP);
}
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_KERNEL_V2 */
PRINTK("\nstruct tcs * = 0x%x", tcs);
thread_monitor_init(tcs);

View file

@ -40,13 +40,8 @@
#include <exception.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_KERNEL_V2
#include <kernel.h> /* public kernel API */
#include <../../../kernel/unified/include/nano_internal.h>
#else
#include <nanokernel.h> /* public nanokernel API */
#include <../../../kernel/nanokernel/include/nano_internal.h>
#endif
#include <stdint.h>
#include <misc/dlist.h>
#endif
@ -67,7 +62,6 @@
* nanokernel/x86/arch.h.
*/
#ifdef CONFIG_KERNEL_V2
#define K_STATIC 0x00000800
#define K_READY 0x00000000 /* Thread is ready to run */
@ -79,11 +73,6 @@
#define K_DUMMY 0x00020000 /* Not a real thread */
#define K_EXECUTION_MASK (K_TIMING | K_PENDING | K_PRESTART | \
K_DEAD | K_SUSPENDED | K_DUMMY)
#else
#define FIBER 0
#define TASK 0x1 /* 1 = task, 0 = fiber */
#define PREEMPTIBLE 0x100 /* 1 = preemptible thread */
#endif
#define INT_ACTIVE 0x2 /* 1 = executing context is interrupt handler */
#define EXC_ACTIVE 0x4 /* 1 = executing context is exception handler */
@ -624,7 +613,6 @@ typedef struct s_preempFloatReg {
} floatRegsUnion;
} tPreempFloatReg;
#if CONFIG_KERNEL_V2
/* 'struct tcs_base' must match the beginning of 'struct tcs' */
struct tcs_base {
sys_dnode_t k_q_node;
@ -635,7 +623,6 @@ struct tcs_base {
struct _timeout timeout;
#endif
};
#endif
/*
* The thread control stucture definition. It contains the
@ -651,7 +638,6 @@ struct tcs {
* nanokernel FIFO).
*/
#if CONFIG_KERNEL_V2
sys_dnode_t k_q_node; /* node object in any kernel queue */
int flags;
int prio; /* thread priority used to sort linked list */
@ -659,20 +645,6 @@ struct tcs {
#ifdef CONFIG_NANO_TIMEOUTS
struct _timeout timeout;
#endif
#else
struct tcs *link;
/*
* See the above flag definitions above for valid bit settings. This
* field must remain near the start of struct tcs, specifically
* before any #ifdef'ed fields since the host tools currently use a
* fixed
* offset to read the 'flags' field.
*/
int flags;
int prio; /* thread priority used to sort linked list */
#endif
/*
* Storage space for integer registers. These must also remain near
@ -704,25 +676,13 @@ struct tcs {
void *custom_data; /* available for custom use */
#endif
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_NANO_TIMEOUTS)
struct _nano_timeout nano_timeout;
#endif
#ifdef CONFIG_ERRNO
int errno_var;
#endif
#if !defined(CONFIG_KERNEL_V2)
#ifdef CONFIG_MICROKERNEL
void *uk_task_ptr;
#endif
#endif
#ifdef CONFIG_KERNEL_V2
atomic_t sched_locked;
void *init_data;
void (*fn_abort)(void);
#endif
/*
* The location of all floating point related structures/fields MUST be
@ -743,13 +703,11 @@ struct tcs {
};
#ifdef CONFIG_KERNEL_V2
struct ready_q {
struct k_thread *cache;
uint32_t prio_bmap[1];
sys_dlist_t q[K_NUM_PRIORITIES];
};
#endif
/*
@ -758,10 +716,6 @@ struct ready_q {
*/
typedef struct s_NANO {
#if !defined(CONFIG_KERNEL_V2)
struct tcs *fiber; /* singly linked list of runnable fibers */
struct tcs *task; /* pointer to runnable task */
#endif
struct tcs *current; /* currently scheduled thread (fiber or task) */
#if defined(CONFIG_THREAD_MONITOR)
struct tcs *threads; /* singly linked list of ALL fiber+tasks */
@ -792,13 +746,8 @@ typedef struct s_NANO {
#endif /* CONFIG_FP_SHARING */
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
sys_dlist_t timeout_q;
#ifndef CONFIG_KERNEL_V2
int32_t task_timeout;
#endif
#endif
#ifdef CONFIG_KERNEL_V2
struct ready_q ready_q;
#endif
} tNANO;
/* stack alignment related macros: STACK_ALIGN_SIZE is defined above */
@ -878,7 +827,6 @@ static inline void fiberRtnValueSet(struct tcs *fiber, unsigned int value)
*(unsigned int *)(fiber->coopReg.esp) = value;
}
#ifdef CONFIG_KERNEL_V2
#define _current _nanokernel.current
#define _ready_q _nanokernel.ready_q
#define _timeout_q _nanokernel.timeout_q
@ -891,7 +839,6 @@ _set_thread_return_value_with_data(struct k_thread *thread, unsigned int value,
thread->swap_data = data;
}
#define _IDLE_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES)
#endif /* CONFIG_KERNEL_V2 */
/* function prototypes */