x86: merge IAMCU and SYS V core arch code
Having two parallel implementations is a maintenance issue, especially when some strategically placed #ifdefs will suffice. We prefer the ASM versions for SYS V, as we need complete control of the emitted assembly for interrupt handling and context switching. The SYS V code is far more mature. IAMCU C code has known issues with -fomit-frame-pointer. The only difference between the two calling conventions is that the first three function arguments are provided in eax, edx, ecx instead of on the stack. Issue: ZEP-49 Change-Id: I9245e4b0ffbeb6d890a4f08bc8a3a49faa6d8e7b Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
8be61102da
commit
8c524a291e
13 changed files with 129 additions and 628 deletions
|
@ -11,16 +11,11 @@ endif
|
||||||
# character starts a comment
|
# character starts a comment
|
||||||
KBUILD_AFLAGS += -Wa,--divide
|
KBUILD_AFLAGS += -Wa,--divide
|
||||||
|
|
||||||
ifndef CONFIG_X86_IAMCU
|
|
||||||
obj-y += i386_sysV_abi/
|
|
||||||
else
|
|
||||||
obj-y += iamcu_abi/
|
|
||||||
endif
|
|
||||||
|
|
||||||
obj-y += fatal.o cpuhalt.o \
|
obj-y += fatal.o cpuhalt.o \
|
||||||
msr.o dynamic.o intconnect.o \
|
msr.o dynamic.o intconnect.o \
|
||||||
excconnect.o sys_fatal_error_handler.o \
|
excconnect.o sys_fatal_error_handler.o \
|
||||||
crt0.o atomic.o cache_s.o cache.o excstub.o
|
crt0.o atomic.o cache_s.o cache.o excstub.o \
|
||||||
|
intstub.o swap.o thread.o
|
||||||
|
|
||||||
obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o
|
obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o
|
||||||
obj-$(CONFIG_FP_SHARING) += float.o
|
obj-$(CONFIG_FP_SHARING) += float.o
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
ccflags-y += -I$(srctree)/kernel/nanokernel/include
|
|
||||||
ccflags-y += -I$(srctree)/kernel/microkernel/include
|
|
||||||
|
|
||||||
# see explanation for flags in arch/x86/core/Makefile
|
|
||||||
ifeq ($(COMPILER)$(CONFIG_X86_IAMCU),clang)
|
|
||||||
KBUILD_AFLAGS += -no-integrated-as
|
|
||||||
endif
|
|
||||||
KBUILD_AFLAGS += -Wa,--divide
|
|
||||||
|
|
||||||
obj-y = intstub.o swap.o thread.o
|
|
|
@ -1,6 +0,0 @@
|
||||||
ccflags-y += -I$(srctree)/kernel/nanokernel/include
|
|
||||||
ccflags-y += -I$(srctree)/kernel/microkernel/include
|
|
||||||
|
|
||||||
obj-y += swap.o intstub.o thread.o \
|
|
||||||
iamcu.o
|
|
||||||
|
|
|
@ -1,129 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2015 Intel Corporation.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define _ASMLANGUAGE
|
|
||||||
|
|
||||||
#include <toolchain.h>
|
|
||||||
#include <sections.h>
|
|
||||||
#include <nano_private.h>
|
|
||||||
#include <arch/x86/asm.h>
|
|
||||||
#include <offsets.h> /* nanokernel structure offset definitions */
|
|
||||||
#include <arch/cpu.h> /* _NANO_ERR_SPURIOUS_INT */
|
|
||||||
|
|
||||||
|
|
||||||
GTEXT(_thread_entry_wrapper)
|
|
||||||
GTEXT(_thread_entry)
|
|
||||||
GTEXT(_irq_sw_handler)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @brief Wrapper for _thread_entry
|
|
||||||
*
|
|
||||||
* The routine pops parameters for the _thread_entry from stack frame, prepared
|
|
||||||
* by the _new_thread() routine.
|
|
||||||
*
|
|
||||||
* @return N/A
|
|
||||||
*/
|
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _thread_entry_wrapper)
|
|
||||||
popl %eax
|
|
||||||
popl %edx
|
|
||||||
popl %ecx
|
|
||||||
jmp _thread_entry
|
|
||||||
|
|
||||||
|
|
||||||
#if CONFIG_IRQ_OFFLOAD
|
|
||||||
SECTION_FUNC(TEXT, _irq_sw_handler)
|
|
||||||
pushl %eax
|
|
||||||
pushl %edx
|
|
||||||
pushl %ecx
|
|
||||||
movl $_irq_do_offload, %eax
|
|
||||||
call _execute_handler
|
|
||||||
pop %ecx
|
|
||||||
pop %edx
|
|
||||||
pop %eax
|
|
||||||
iret
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if ALL_DYN_IRQ_STUBS > 0
|
|
||||||
BRANCH_LABEL(_DynIntStubCommon)
|
|
||||||
pushl %eax
|
|
||||||
pushl %ecx
|
|
||||||
movl $_common_dynamic_irq_handler, %eax
|
|
||||||
call _execute_handler
|
|
||||||
/* Clean up and call IRET */
|
|
||||||
pop %ecx
|
|
||||||
pop %eax
|
|
||||||
pop %edx
|
|
||||||
iret
|
|
||||||
|
|
||||||
|
|
||||||
/* Create all the dynamic IRQ stubs
|
|
||||||
*
|
|
||||||
* NOTE: Please update DYN_STUB_SIZE in include/arch/x86/arch.h if you change
|
|
||||||
* how large the generated stubs are, otherwise _get_dynamic_stub() will
|
|
||||||
* be unable to correctly determine the offset
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create nice labels for all the stubs so we can see where we
|
|
||||||
* are in a debugger
|
|
||||||
*/
|
|
||||||
.altmacro
|
|
||||||
.macro __INT_STUB_NUM id
|
|
||||||
BRANCH_LABEL(_DynIntStub\id)
|
|
||||||
.endm
|
|
||||||
.macro INT_STUB_NUM id
|
|
||||||
__INT_STUB_NUM %id
|
|
||||||
.endm
|
|
||||||
GTEXT(_DynIntStubsBegin)
|
|
||||||
SECTION_FUNC(TEXT, _DynIntStubsBegin)
|
|
||||||
stub_num = 0
|
|
||||||
|
|
||||||
.rept ((ALL_DYN_IRQ_STUBS + DYN_STUB_PER_BLOCK - 1) / DYN_STUB_PER_BLOCK)
|
|
||||||
block_counter = 0
|
|
||||||
.rept DYN_STUB_PER_BLOCK
|
|
||||||
.if stub_num < ALL_DYN_IRQ_STUBS
|
|
||||||
INT_STUB_NUM stub_num
|
|
||||||
|
|
||||||
pushl %edx
|
|
||||||
|
|
||||||
/*
|
|
||||||
* _common_dynamic_irq_handler() uses this to determine
|
|
||||||
* which ISR/param to use, see intconnect.c
|
|
||||||
*/
|
|
||||||
movl $stub_num, %edx
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check to make sure this isn't the last stub in
|
|
||||||
* a block, in which case we just fall through
|
|
||||||
*/
|
|
||||||
.if (block_counter <> (DYN_STUB_PER_BLOCK - 1) && \
|
|
||||||
(stub_num <> ALL_DYN_IRQ_STUBS - 1))
|
|
||||||
/* This should always be a 2-byte jmp rel8 */
|
|
||||||
jmp 1f
|
|
||||||
.endif
|
|
||||||
stub_num = stub_num + 1
|
|
||||||
block_counter = block_counter + 1
|
|
||||||
.endif
|
|
||||||
.endr
|
|
||||||
/*
|
|
||||||
* This must a 5-bvte jump rel32, which is why _DynStubCommon
|
|
||||||
* is before the actual stubs
|
|
||||||
*/
|
|
||||||
1: jmp _DynIntStubCommon
|
|
||||||
.endr
|
|
||||||
#endif /* ALL_DYN_IRQ_STUBS */
|
|
||||||
|
|
|
@ -1,152 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2015 Intel Corporation
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <nano_private.h>
|
|
||||||
#include <drivers/loapic.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
||||||
|
|
||||||
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
|
|
||||||
extern void _power_save_idle_exit(void);
|
|
||||||
#else
|
|
||||||
extern void _sys_power_save_idle_exit(int32_t ticks);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_NESTED_INTERRUPTS
|
|
||||||
static inline void enable_nested_interrupts(void)
|
|
||||||
{
|
|
||||||
__asm__ volatile("sti");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void disable_nested_interrupts(void)
|
|
||||||
{
|
|
||||||
__asm__ volatile("cli");
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void enable_nested_interrupts(void){};
|
|
||||||
static inline void disable_nested_interrupts(void){};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
|
||||||
extern void _sys_k_event_logger_interrupt(void);
|
|
||||||
#else
|
|
||||||
#define _sys_k_event_logger_interrupt()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
|
|
||||||
extern void _sys_k_event_logger_exit_sleep(void);
|
|
||||||
#else
|
|
||||||
#define _sys_k_event_logger_exit_sleep()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef void (*int_handler_t) (int context);
|
|
||||||
|
|
||||||
void _execute_handler(int_handler_t function, int context)
|
|
||||||
{
|
|
||||||
_int_latency_start();
|
|
||||||
|
|
||||||
_sys_k_event_logger_interrupt();
|
|
||||||
|
|
||||||
_sys_k_event_logger_exit_sleep();
|
|
||||||
|
|
||||||
#ifdef CONFIG_NESTED_INTERRUPTS
|
|
||||||
if (!_nanokernel.nested)
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
/* move to the interrupt stack and push current stack
|
|
||||||
* pointer onto interrupt stack
|
|
||||||
*/
|
|
||||||
__asm__ volatile ("movl %%esp, %%edx \n\t"
|
|
||||||
"movl %0, %%esp \n\t"
|
|
||||||
"pushl %%edx \n\t"
|
|
||||||
:
|
|
||||||
:"m" (_nanokernel.common_isp)
|
|
||||||
:"%edx"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
_nanokernel.nested++;
|
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
||||||
|
|
||||||
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
|
|
||||||
_power_save_idle_exit();
|
|
||||||
#else
|
|
||||||
if (_nanokernel.idle) {
|
|
||||||
_sys_power_save_idle_exit(_nanokernel.idle);
|
|
||||||
_nanokernel.idle = 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
_int_latency_stop();
|
|
||||||
enable_nested_interrupts();
|
|
||||||
|
|
||||||
(*function)(context);
|
|
||||||
_loapic_eoi();
|
|
||||||
|
|
||||||
disable_nested_interrupts();
|
|
||||||
_nanokernel.nested--;
|
|
||||||
|
|
||||||
/* Are we returning to a task or fiber context? If so we need
|
|
||||||
* to do some work based on the context that was interrupted
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_NESTED_INTERRUPTS
|
|
||||||
if (!_nanokernel.nested)
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
/* switch to kernel stack */
|
|
||||||
__asm__ volatile ("popl %esp");
|
|
||||||
|
|
||||||
/* if the interrupted context was a task we need to
|
|
||||||
* swap back to the interrupted context
|
|
||||||
*/
|
|
||||||
if ((_nanokernel.current->flags & PREEMPTIBLE) &&
|
|
||||||
_nanokernel.fiber) {
|
|
||||||
/* move flags into arg0 we can't use local
|
|
||||||
* variables here since the stack may have
|
|
||||||
* changed.
|
|
||||||
*/
|
|
||||||
__asm__ volatile ("pushfl \n\t"
|
|
||||||
"popl %eax \n\t"
|
|
||||||
"call _Swap");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void _SpuriousIntHandler(void)
|
|
||||||
{
|
|
||||||
__asm__ volatile("cld"); /* clear direction flag */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The task's regular stack is being used, but push the value of ESP
|
|
||||||
* anyway so that _ExcExit can "recover the stack pointer"
|
|
||||||
* without determining whether the exception occurred while CPL=3
|
|
||||||
*/
|
|
||||||
__asm__ volatile ("pushl %esp");
|
|
||||||
|
|
||||||
again:
|
|
||||||
_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
|
||||||
/* The handler should no return but if it does call it again */
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
|
|
||||||
void _SpuriousIntNoErrCodeHandler(void)
|
|
||||||
{
|
|
||||||
__asm__ volatile ("pushl %eax");
|
|
||||||
_SpuriousIntHandler();
|
|
||||||
}
|
|
|
@ -1,81 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2015 Intel Corporation
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <toolchain.h>
|
|
||||||
#include <sections.h>
|
|
||||||
#include <nano_private.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
|
|
||||||
extern void _sys_k_event_logger_context_switch(void);
|
|
||||||
#else
|
|
||||||
#define _sys_k_event_logger_context_switch()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Stack protector disabled here; we switch stacks, so the sentinel
|
|
||||||
* placed by the stack protection code isn't there when it checks for it
|
|
||||||
* at the end of the function
|
|
||||||
*/
|
|
||||||
unsigned int __attribute__((optimize("-fno-stack-protector")))
|
|
||||||
_Swap(unsigned int eflags)
|
|
||||||
{
|
|
||||||
struct tcs *next;
|
|
||||||
int rv;
|
|
||||||
|
|
||||||
/* Save the current context onto the stack */
|
|
||||||
__asm__ volatile("pushl %eax\n\t" /* push eflags _Swap argumet*/
|
|
||||||
"pushl %edi\n\t"
|
|
||||||
"pushl %esi\n\t"
|
|
||||||
"pushl %ebx\n\t"
|
|
||||||
"pushl %ebp\n\t"
|
|
||||||
"pushl %ebx\n\t"); /* eax slot for fiber return */
|
|
||||||
|
|
||||||
/* save the stack pointer into the current context structure */
|
|
||||||
__asm__ volatile("mov %%esp, %0"
|
|
||||||
:"=m" (_nanokernel.current->coopReg.esp));
|
|
||||||
|
|
||||||
_sys_k_event_logger_context_switch();
|
|
||||||
|
|
||||||
/* find the next context to run */
|
|
||||||
if (_nanokernel.fiber) {
|
|
||||||
next = _nanokernel.fiber;
|
|
||||||
_nanokernel.fiber = (struct tcs *)_nanokernel.fiber->link;
|
|
||||||
} else {
|
|
||||||
next = _nanokernel.task;
|
|
||||||
}
|
|
||||||
_nanokernel.current = next;
|
|
||||||
|
|
||||||
/* recover the stack pointer for the incoming context */
|
|
||||||
__asm__ volatile("mov %0, %%esp"
|
|
||||||
:
|
|
||||||
:"m" (next->coopReg.esp));
|
|
||||||
|
|
||||||
/* restore the context */
|
|
||||||
__asm__ volatile("popl %eax\n\t"
|
|
||||||
"popl %ebp\n\t"
|
|
||||||
"popl %ebx\n\t"
|
|
||||||
"popl %esi\n\t"
|
|
||||||
"popl %edi\n\t"
|
|
||||||
"popfl \n\t"
|
|
||||||
);
|
|
||||||
|
|
||||||
/* The correct return value is already in eax but this makes
|
|
||||||
* the compiler happy
|
|
||||||
*/
|
|
||||||
__asm__ volatile("mov %%eax, %0"
|
|
||||||
:"=m" (rv)
|
|
||||||
);
|
|
||||||
return rv;
|
|
||||||
}
|
|
|
@ -1,146 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2015 Intel Corporation
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef CONFIG_MICROKERNEL
|
|
||||||
#include <microkernel.h>
|
|
||||||
#include <micro_private_types.h>
|
|
||||||
#endif /* CONFIG_MICROKERNEL */
|
|
||||||
#ifdef CONFIG_INIT_STACKS
|
|
||||||
#include <string.h>
|
|
||||||
#endif /* CONFIG_INIT_STACKS */
|
|
||||||
|
|
||||||
#include <toolchain.h>
|
|
||||||
#include <sections.h>
|
|
||||||
#include <nano_private.h>
|
|
||||||
#include <wait_q.h>
|
|
||||||
|
|
||||||
/* the one and only nanokernel control structure */
|
|
||||||
|
|
||||||
tNANO _nanokernel = {0};
|
|
||||||
|
|
||||||
/* forward declaration to asm function to adjust setup the arguments
|
|
||||||
* to _thread_entry()
|
|
||||||
*/
|
|
||||||
void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
|
|
||||||
_thread_arg_t, _thread_arg_t);
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @brief Create a new kernel execution context
|
|
||||||
*
|
|
||||||
* This function initializes a thread control structure (TCS) for a
|
|
||||||
* new kernel execution context. A fake stack frame is created as if
|
|
||||||
* the context had been "swapped out" via _Swap()
|
|
||||||
*
|
|
||||||
* @param stack_memory pointer to the context stack area
|
|
||||||
* @param stack_size size of contexts stack area
|
|
||||||
* @param thread_func new contexts entry function
|
|
||||||
* @param parameter1 first entry function parameter
|
|
||||||
* @param parameter2 second entry function parameter
|
|
||||||
* @param parameter3 third entry function parameter
|
|
||||||
* @param priority Priority of the new context
|
|
||||||
* @param options Additional options for the context
|
|
||||||
*
|
|
||||||
* @return none
|
|
||||||
*
|
|
||||||
* \NOMANUAL
|
|
||||||
*/
|
|
||||||
|
|
||||||
void _new_thread(char *stack_memory, unsigned stack_size,
|
|
||||||
void *uk_task_ptr, _thread_entry_t thread_func,
|
|
||||||
void *parameter1, void *parameter2, void *parameter3,
|
|
||||||
int priority, unsigned options)
|
|
||||||
{
|
|
||||||
unsigned long *thread_context;
|
|
||||||
struct tcs *tcs = (struct tcs *)stack_memory;
|
|
||||||
|
|
||||||
ARG_UNUSED(options);
|
|
||||||
|
|
||||||
#ifdef CONFIG_INIT_STACKS
|
|
||||||
memset(stack_memory, 0xaa, stack_size);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */
|
|
||||||
tcs->prio = priority;
|
|
||||||
|
|
||||||
if (priority == -1) {
|
|
||||||
tcs->flags = PREEMPTIBLE | TASK;
|
|
||||||
} else {
|
|
||||||
tcs->flags = FIBER;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
|
||||||
/* Initialize custom data field (value is opaque to kernel) */
|
|
||||||
|
|
||||||
tcs->custom_data = NULL;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* carve the thread entry struct from the "base" of the stack */
|
|
||||||
|
|
||||||
thread_context =
|
|
||||||
(unsigned long *)STACK_ROUND_DOWN(stack_memory + stack_size);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create an initial context on the stack expected by the _Swap()
|
|
||||||
* primitive.
|
|
||||||
* Given that both task and fibers execute at privilege 0, the
|
|
||||||
* setup for both threads are equivalent.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* push arguments required by _thread_entry() */
|
|
||||||
|
|
||||||
*--thread_context = (unsigned long)parameter3;
|
|
||||||
*--thread_context = (unsigned long)parameter2;
|
|
||||||
*--thread_context = (unsigned long)parameter1;
|
|
||||||
*--thread_context = (unsigned long)thread_func;
|
|
||||||
|
|
||||||
/* push initial EFLAGS; only modify IF and IOPL bits */
|
|
||||||
|
|
||||||
*--thread_context = (unsigned long)_thread_entry_wrapper;
|
|
||||||
*--thread_context = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
|
|
||||||
*--thread_context = 0;
|
|
||||||
*--thread_context = 0;
|
|
||||||
*--thread_context = 0;
|
|
||||||
--thread_context;
|
|
||||||
*thread_context = (unsigned long)(thread_context + 4);
|
|
||||||
*--thread_context = 0;
|
|
||||||
|
|
||||||
#ifdef CONFIG_MICROKERNEL
|
|
||||||
tcs->uk_task_ptr = uk_task_ptr;
|
|
||||||
#else
|
|
||||||
ARG_UNUSED(uk_task_ptr);
|
|
||||||
#endif
|
|
||||||
tcs->coopReg.esp = (unsigned long)thread_context;
|
|
||||||
#if defined(CONFIG_THREAD_MONITOR)
|
|
||||||
{
|
|
||||||
unsigned int imask;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Add the newly initialized thread to head of the
|
|
||||||
* list of threads. This singly linked list of threads
|
|
||||||
* maintains ALL the threads in the system: both tasks
|
|
||||||
* and fibers regardless of whether they are runnable.
|
|
||||||
*/
|
|
||||||
|
|
||||||
imask = irq_lock();
|
|
||||||
tcs->next_thread = _nanokernel.threads;
|
|
||||||
_nanokernel.threads = tcs;
|
|
||||||
irq_unlock(imask);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_THREAD_MONITOR */
|
|
||||||
|
|
||||||
_nano_timeout_tcs_init(tcs);
|
|
||||||
}
|
|
|
@ -307,7 +307,7 @@ int _arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
|
||||||
*
|
*
|
||||||
* @param stub_idx Index into the dyn_irq_list array
|
* @param stub_idx Index into the dyn_irq_list array
|
||||||
*/
|
*/
|
||||||
void _common_dynamic_irq_handler(uint32_t stub_idx)
|
void _common_dynamic_irq_handler(uint8_t stub_idx)
|
||||||
{
|
{
|
||||||
dyn_irq_list[stub_idx].handler(dyn_irq_list[stub_idx].param);
|
dyn_irq_list[stub_idx].handler(dyn_irq_list[stub_idx].param);
|
||||||
}
|
}
|
||||||
|
|
|
@ -245,6 +245,7 @@ BRANCH_LABEL(alreadyOnIntStack)
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||||
BRANCH_LABEL(_HandleIdle)
|
BRANCH_LABEL(_HandleIdle)
|
||||||
|
/* Preserve eax which contains stub return address */
|
||||||
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
|
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
|
||||||
pushl %eax
|
pushl %eax
|
||||||
call _power_save_idle_exit
|
call _power_save_idle_exit
|
||||||
|
@ -272,15 +273,19 @@ BRANCH_LABEL(_HandleIdle)
|
||||||
#endif /* CONFIG_SYS_POWER_MANAGEMENT */
|
#endif /* CONFIG_SYS_POWER_MANAGEMENT */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Perform EOI, clean up stack, and do interrupt exit
|
* @brief Perform EOI and do interrupt exit
|
||||||
*
|
*
|
||||||
* This is used by the interrupt stubs, which all leave the stack in
|
* This is used by the interrupt stubs, which all leave the stack in
|
||||||
* a particular state and need to poke the interrupt controller.
|
* a particular state and need to poke the interrupt controller.
|
||||||
* Prior to running the logic in _IntExit, the ISR parameter is popped off
|
|
||||||
* the stack and EOI is set to the LOAPIC.
|
|
||||||
*/
|
*/
|
||||||
SECTION_FUNC(TEXT, _IntExitWithEoi)
|
SECTION_FUNC(TEXT, _IntExitWithEoi)
|
||||||
popl %eax /* Pushed onto stack by stub */
|
#ifndef CONFIG_X86_IAMCU
|
||||||
|
/* For SYS V, the stub pushes an argument onto the stack to be
|
||||||
|
* consumed by the handler, remove it since the handler is now
|
||||||
|
* finished
|
||||||
|
*/
|
||||||
|
popl %eax
|
||||||
|
#endif
|
||||||
#if CONFIG_EOI_FORWARDING_BUG
|
#if CONFIG_EOI_FORWARDING_BUG
|
||||||
call _lakemont_eoi
|
call _lakemont_eoi
|
||||||
#endif
|
#endif
|
||||||
|
@ -380,8 +385,16 @@ BRANCH_LABEL(_IntExit)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pushfl /* push KERNEL_LOCK_KEY argument */
|
pushfl /* push KERNEL_LOCK_KEY argument */
|
||||||
|
#ifdef CONFIG_X86_IAMCU
|
||||||
|
/* IAMCU first argument goes into a register, not the stack.
|
||||||
|
*/
|
||||||
|
popl %eax
|
||||||
|
#endif
|
||||||
call _Swap
|
call _Swap
|
||||||
|
|
||||||
|
#ifndef CONFIG_X86_IAMCU
|
||||||
|
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
|
||||||
|
#endif
|
||||||
/*
|
/*
|
||||||
* The interrupted thread has now been scheduled,
|
* The interrupted thread has now been scheduled,
|
||||||
* as the result of a _later_ invocation of _Swap().
|
* as the result of a _later_ invocation of _Swap().
|
||||||
|
@ -390,7 +403,6 @@ BRANCH_LABEL(_IntExit)
|
||||||
* returning control to it at the point where it was interrupted ...
|
* returning control to it at the point where it was interrupted ...
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#if ( defined(CONFIG_FP_SHARING) || \
|
#if ( defined(CONFIG_FP_SHARING) || \
|
||||||
defined(CONFIG_GDB_INFO) )
|
defined(CONFIG_GDB_INFO) )
|
||||||
/*
|
/*
|
||||||
|
@ -403,12 +415,6 @@ BRANCH_LABEL(_IntExit)
|
||||||
andl $~INT_ACTIVE, __tTCS_flags_OFFSET (%eax)
|
andl $~INT_ACTIVE, __tTCS_flags_OFFSET (%eax)
|
||||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||||
|
|
||||||
|
|
||||||
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* Restore volatile registers and return to the interrupted thread */
|
/* Restore volatile registers and return to the interrupted thread */
|
||||||
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
#ifdef CONFIG_INT_LATENCY_BENCHMARK
|
||||||
call _int_latency_stop
|
call _int_latency_stop
|
||||||
|
@ -520,8 +526,11 @@ SECTION_FUNC(TEXT, _SpuriousIntHandler)
|
||||||
* anyway so that _ExcExit can "recover the stack pointer"
|
* anyway so that _ExcExit can "recover the stack pointer"
|
||||||
* without determining whether the exception occurred while CPL=3
|
* without determining whether the exception occurred while CPL=3
|
||||||
*/
|
*/
|
||||||
|
#ifndef CONFIG_X86_IAMCU
|
||||||
pushl %esp /* push cur stack pointer: pEsf arg */
|
pushl %esp /* push cur stack pointer: pEsf arg */
|
||||||
|
#else
|
||||||
|
mov %esp, %edx
|
||||||
|
#endif
|
||||||
|
|
||||||
BRANCH_LABEL(finishSpuriousInt)
|
BRANCH_LABEL(finishSpuriousInt)
|
||||||
|
|
||||||
|
@ -530,18 +539,18 @@ BRANCH_LABEL(finishSpuriousInt)
|
||||||
sti
|
sti
|
||||||
|
|
||||||
/* push the 'unsigned int reason' parameter */
|
/* push the 'unsigned int reason' parameter */
|
||||||
|
#ifndef CONFIG_X86_IAMCU
|
||||||
pushl $_NANO_ERR_SPURIOUS_INT
|
pushl $_NANO_ERR_SPURIOUS_INT
|
||||||
|
#else
|
||||||
|
movl $_NANO_ERR_SPURIOUS_INT, %eax
|
||||||
|
#endif
|
||||||
|
|
||||||
BRANCH_LABEL(callFatalHandler)
|
BRANCH_LABEL(callFatalHandler)
|
||||||
|
|
||||||
/* call the fatal error handler */
|
/* call the fatal error handler */
|
||||||
|
|
||||||
call _NanoFatalErrorHandler
|
call _NanoFatalErrorHandler
|
||||||
|
|
||||||
/* handler shouldn't return, but call it again if it does */
|
/* handler doesn't return */
|
||||||
|
|
||||||
jmp callFatalHandler
|
|
||||||
|
|
||||||
#if ALL_DYN_IRQ_STUBS > 0
|
#if ALL_DYN_IRQ_STUBS > 0
|
||||||
BRANCH_LABEL(_DynIntStubCommon)
|
BRANCH_LABEL(_DynIntStubCommon)
|
||||||
|
@ -582,12 +591,22 @@ stub_num = 0
|
||||||
* stub_num to the irq stack
|
* stub_num to the irq stack
|
||||||
*/
|
*/
|
||||||
call _IntEnt
|
call _IntEnt
|
||||||
|
#if CONFIG_X86_IAMCU
|
||||||
|
/*
|
||||||
|
* 2-byte mov imm8 to r8
|
||||||
|
* Put the stub id in the lower 8 bits of EAX,
|
||||||
|
* which will be the 1st arg of
|
||||||
|
* _common_dynamic_irq_handlder.
|
||||||
|
*/
|
||||||
|
movb $stub_num, %al
|
||||||
|
#else
|
||||||
/*
|
/*
|
||||||
* 2-byte push imm8. Consumed by
|
* 2-byte push imm8. Consumed by
|
||||||
* common_dynamic_handler(), see intconnect.c
|
* common_dynamic_handler(), see intconnect.c
|
||||||
*/
|
*/
|
||||||
|
|
||||||
push $stub_num
|
push $stub_num
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to make sure this isn't the last stub in
|
* Check to make sure this isn't the last stub in
|
|
@ -93,6 +93,13 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, _Swap)
|
SECTION_FUNC(TEXT, _Swap)
|
||||||
|
#ifdef CONFIG_X86_IAMCU
|
||||||
|
/* save EFLAGS on stack right before return address, just as SYSV would
|
||||||
|
* have done
|
||||||
|
*/
|
||||||
|
pushl 0(%esp)
|
||||||
|
movl %eax, 4(%esp)
|
||||||
|
#endif
|
||||||
movl $_nanokernel, %eax
|
movl $_nanokernel, %eax
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -375,4 +382,12 @@ BRANCH_LABEL(CROHandlingDone)
|
||||||
BRANCH_LABEL(skipIntLatencyStop)
|
BRANCH_LABEL(skipIntLatencyStop)
|
||||||
#endif
|
#endif
|
||||||
popfl
|
popfl
|
||||||
|
#if CONFIG_X86_IAMCU
|
||||||
|
/* Remember that eflags we stuck into the stack before the return
|
||||||
|
* address? need to get it out of there since the calling convention
|
||||||
|
* will not do that for us.
|
||||||
|
*/
|
||||||
|
popl %edx
|
||||||
|
movl %edx, (%esp)
|
||||||
|
#endif
|
||||||
ret
|
ret
|
|
@ -41,7 +41,8 @@ tNANO _nanokernel = {0};
|
||||||
|
|
||||||
/* forward declaration */
|
/* forward declaration */
|
||||||
|
|
||||||
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO)
|
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|
||||||
|
|| defined(CONFIG_X86_IAMCU)
|
||||||
void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
|
void _thread_entry_wrapper(_thread_entry_t, _thread_arg_t,
|
||||||
_thread_arg_t, _thread_arg_t);
|
_thread_arg_t, _thread_arg_t);
|
||||||
#endif
|
#endif
|
||||||
|
@ -116,10 +117,13 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||||
tcs->entry = (struct __thread_entry *)(pInitialCtx -
|
tcs->entry = (struct __thread_entry *)(pInitialCtx -
|
||||||
sizeof(struct __thread_entry));
|
sizeof(struct __thread_entry));
|
||||||
#endif
|
#endif
|
||||||
/*
|
|
||||||
* We subtract 11 here to account for the thread entry routine
|
/* The stack needs to be set up so that when we do an initial switch
|
||||||
* parameters
|
* to it in the middle of _Swap(), it needs to be set up as follows:
|
||||||
* (4 of them), eflags, eip, and the edi/esi/ebx/ebp/eax registers.
|
* - 4 thread entry routine parameters
|
||||||
|
* - eflags
|
||||||
|
* - eip (so that _Swap() "returns" to the entry point)
|
||||||
|
* - edi, esi, ebx, ebp, eax
|
||||||
*/
|
*/
|
||||||
pInitialCtx -= 11;
|
pInitialCtx -= 11;
|
||||||
|
|
||||||
|
@ -204,20 +208,25 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||||
_nano_timeout_tcs_init(tcs);
|
_nano_timeout_tcs_init(tcs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO)
|
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|
||||||
|
|| defined(CONFIG_X86_IAMCU)
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @brief Adjust stack before invoking _thread_entry
|
* @brief Adjust stack/parameters before invoking _thread_entry
|
||||||
*
|
*
|
||||||
* This function adjusts the initial stack frame created by _new_thread()
|
* This function adjusts the initial stack frame created by _new_thread() such
|
||||||
* such that the GDB stack frame unwinders recognize it as the outermost frame
|
* that the GDB stack frame unwinders recognize it as the outermost frame in
|
||||||
* in the thread's stack. The function then jumps to _thread_entry().
|
* the thread's stack. For targets that use the IAMCU calling convention, the
|
||||||
|
* first three arguments are popped into eax, edx, and ecx. The function then
|
||||||
|
* jumps to _thread_entry().
|
||||||
*
|
*
|
||||||
* GDB normally stops unwinding a stack when it detects that it has
|
* GDB normally stops unwinding a stack when it detects that it has
|
||||||
* reached a function called main(). Kernel tasks, however, do not have
|
* reached a function called main(). Kernel tasks, however, do not have
|
||||||
* a main() function, and there does not appear to be a simple way of stopping
|
* a main() function, and there does not appear to be a simple way of stopping
|
||||||
* the unwinding of the stack.
|
* the unwinding of the stack.
|
||||||
*
|
*
|
||||||
|
* SYS V Systems:
|
||||||
|
*
|
||||||
* Given the initial thread created by _new_thread(), GDB expects to find a
|
* Given the initial thread created by _new_thread(), GDB expects to find a
|
||||||
* return address on the stack immediately above the thread entry routine
|
* return address on the stack immediately above the thread entry routine
|
||||||
* _thread_entry, in the location occupied by the initial EFLAGS.
|
* _thread_entry, in the location occupied by the initial EFLAGS.
|
||||||
|
@ -229,6 +238,20 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||||
* an invalid access to address zero and returns an error, which causes the
|
* an invalid access to address zero and returns an error, which causes the
|
||||||
* GDB stack unwinder to stop somewhat gracefully.
|
* GDB stack unwinder to stop somewhat gracefully.
|
||||||
*
|
*
|
||||||
|
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
|
||||||
|
* the new thread for the first time. This routine is called by _Swap() the
|
||||||
|
* first time that the new thread is swapped in, and it jumps to
|
||||||
|
* _thread_entry after it has done its work.
|
||||||
|
*
|
||||||
|
* IAMCU Systems:
|
||||||
|
*
|
||||||
|
* There is no EFLAGS on the stack when we get here. _thread_entry() takes
|
||||||
|
* four arguments, and we need to pop off the first three into the
|
||||||
|
* appropriate registers. Instead of using the 'call' instruction, we push
|
||||||
|
* a NULL return address onto the stack and jump into _thread_entry,
|
||||||
|
* ensuring the stack won't be unwound further. Placing some kind of return
|
||||||
|
* address on the stack is mandatory so this isn't conditionally compiled.
|
||||||
|
*
|
||||||
* __________________
|
* __________________
|
||||||
* | param3 | <------ Top of the stack
|
* | param3 | <------ Top of the stack
|
||||||
* |__________________|
|
* |__________________|
|
||||||
|
@ -236,28 +259,12 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
||||||
* |__________________| |
|
* |__________________| |
|
||||||
* | param1 | V
|
* | param1 | V
|
||||||
* |__________________|
|
* |__________________|
|
||||||
* | pEntry |
|
* | pEntry | <---- ESP when invoked by _Swap() on IAMCU
|
||||||
* |__________________|
|
|
||||||
* | initial EFLAGS | <---- ESP when invoked by _Swap()
|
|
||||||
* |__________________| (Zeroed by this routine)
|
|
||||||
* | entryRtn | <----- Thread Entry Routine invoked by _Swap()
|
|
||||||
* |__________________| (This routine if GDB_INFO)
|
|
||||||
* | <edi> | \
|
|
||||||
* |__________________| |
|
|
||||||
* | <esi> | |
|
|
||||||
* |__________________| |
|
|
||||||
* | <ebx> | |---- Initial registers restored by _Swap()
|
|
||||||
* |__________________| |
|
|
||||||
* | <ebp> | |
|
|
||||||
* |__________________| |
|
|
||||||
* | <eax> | /
|
|
||||||
* |__________________|
|
* |__________________|
|
||||||
|
* | initial EFLAGS | <---- ESP when invoked by _Swap() on Sys V
|
||||||
|
* |__________________| (Zeroed by this routine on Sys V)
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
|
|
||||||
* the new thread for the first time. This routine is called by _Swap() the
|
|
||||||
* first time that the new thread is swapped in, and it jumps to
|
|
||||||
* _thread_entry after it has done its work.
|
|
||||||
*
|
*
|
||||||
* @return this routine does NOT return.
|
* @return this routine does NOT return.
|
||||||
*/
|
*/
|
||||||
|
@ -266,9 +273,20 @@ __asm__("\t.globl _thread_entry\n"
|
||||||
"_thread_entry_wrapper:\n" /* should place this func .S file and use
|
"_thread_entry_wrapper:\n" /* should place this func .S file and use
|
||||||
* SECTION_FUNC
|
* SECTION_FUNC
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_IAMCU
|
||||||
|
/* IAMCU calling convention has first 3 arguments supplied in
|
||||||
|
* registers not the stack
|
||||||
|
*/
|
||||||
|
"\tpopl %eax\n"
|
||||||
|
"\tpopl %edx\n"
|
||||||
|
"\tpopl %ecx\n"
|
||||||
|
"\tpushl $0\n" /* Null return address */
|
||||||
|
#elif defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO)
|
||||||
"\tmovl $0, (%esp)\n" /* zero initialEFLAGS location */
|
"\tmovl $0, (%esp)\n" /* zero initialEFLAGS location */
|
||||||
|
#endif
|
||||||
"\tjmp _thread_entry\n");
|
"\tjmp _thread_entry\n");
|
||||||
#endif /* defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) */
|
#endif /* CONFIG_GDB_INFO || CONFIG_DEBUG_INFO) || CONFIG_X86_IAMCU */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -326,8 +344,8 @@ void _new_thread(char *pStackMem, unsigned stackSize,
|
||||||
|
|
||||||
*--pInitialThread = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
|
*--pInitialThread = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
|
||||||
|
|
||||||
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO)
|
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|
||||||
|
|| defined(CONFIG_X86_IAMCU)
|
||||||
/*
|
/*
|
||||||
* Arrange for the _thread_entry_wrapper() function to be called
|
* Arrange for the _thread_entry_wrapper() function to be called
|
||||||
* to adjust the stack before _thread_entry() is invoked.
|
* to adjust the stack before _thread_entry() is invoked.
|
|
@ -161,40 +161,18 @@ typedef struct s_isrList {
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inline assembly code for the interrupt stub
|
* Assembly code to populate ISR's argument
|
||||||
*
|
*
|
||||||
* This is the actual assembly code which gets run when the interrupt
|
* IAMCU it goes in EAX. Sys V on the stack, _IntExitWithEoi pops it.
|
||||||
* is triggered. Due to different calling convention semantics we have
|
|
||||||
* different versions for IAMCU and SYSV.
|
|
||||||
*
|
|
||||||
* For IAMCU case, we call _execute_handler() with the isr and its argument
|
|
||||||
* as parameters.
|
|
||||||
*
|
|
||||||
* For SysV case, we first call _IntEnt to properly enter Zephyr's interrupt
|
|
||||||
* handling context, and then directly call the isr. A jump is done to
|
|
||||||
* _IntExitWithEoi which does EOI to the interrupt controller, restores
|
|
||||||
* context, and finally does 'iret'.
|
|
||||||
*
|
*
|
||||||
* This is only intended to be used by the IRQ_CONNECT() macro.
|
* This is only intended to be used by the IRQ_CONNECT() macro.
|
||||||
*/
|
*/
|
||||||
#if CONFIG_X86_IAMCU
|
#ifdef CONFIG_X86_IAMCU
|
||||||
#define _IRQ_STUB_ASM \
|
#define _ISR_ARG_ASM \
|
||||||
"pushl %%eax\n\t" \
|
"movl %[isr_param], %%eax\n\t"
|
||||||
"pushl %%edx\n\t" \
|
|
||||||
"pushl %%ecx\n\t" \
|
|
||||||
"movl %[isr], %%eax\n\t" \
|
|
||||||
"movl %[isr_param], %%edx\n\t" \
|
|
||||||
"call _execute_handler\n\t" \
|
|
||||||
"popl %%ecx\n\t" \
|
|
||||||
"popl %%edx\n\t" \
|
|
||||||
"popl %%eax\n\t" \
|
|
||||||
"iret\n\t"
|
|
||||||
#else
|
#else
|
||||||
#define _IRQ_STUB_ASM \
|
#define _ISR_ARG_ASM \
|
||||||
"call _IntEnt\n\t" \
|
"pushl %[isr_param]\n\t"
|
||||||
"pushl %[isr_param]\n\t" \
|
|
||||||
"call %P[isr]\n\t" \
|
|
||||||
"jmp _IntExitWithEoi\n\t"
|
|
||||||
#endif /* CONFIG_X86_IAMCU */
|
#endif /* CONFIG_X86_IAMCU */
|
||||||
|
|
||||||
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
|
||||||
|
@ -251,6 +229,11 @@ typedef struct s_isrList {
|
||||||
* of the calling function due to the initial 'jmp' instruction at the
|
* of the calling function due to the initial 'jmp' instruction at the
|
||||||
* beginning of the assembly block, but a pointer to it gets saved in the IDT.
|
* beginning of the assembly block, but a pointer to it gets saved in the IDT.
|
||||||
*
|
*
|
||||||
|
* The stub calls _IntEnt to save interrupted context and switch to the IRQ
|
||||||
|
* stack. There is a calling convention specific macro to populate the
|
||||||
|
* argument to the ISR, and then the ISR is called. _IntExitWithEoi
|
||||||
|
* restores calling context and 'iret's.
|
||||||
|
*
|
||||||
* 4. _SysIntVecProgram() is called at runtime to set the mapping between
|
* 4. _SysIntVecProgram() is called at runtime to set the mapping between
|
||||||
* the vector and the IRQ line.
|
* the vector and the IRQ line.
|
||||||
*
|
*
|
||||||
|
@ -275,7 +258,10 @@ typedef struct s_isrList {
|
||||||
".popsection\n\t" \
|
".popsection\n\t" \
|
||||||
"1:\n\t" \
|
"1:\n\t" \
|
||||||
_IRQ_STUB_LABEL \
|
_IRQ_STUB_LABEL \
|
||||||
_IRQ_STUB_ASM \
|
"call _IntEnt\n\t" \
|
||||||
|
_ISR_ARG_ASM \
|
||||||
|
"call %P[isr]\n\t" \
|
||||||
|
"jmp _IntExitWithEoi\n\t" \
|
||||||
"2:\n\t" \
|
"2:\n\t" \
|
||||||
: \
|
: \
|
||||||
: [isr] "i" (isr_p), \
|
: [isr] "i" (isr_p), \
|
||||||
|
|
|
@ -49,26 +49,18 @@ SYS_NANO_CPU_EXC_CONNECT(exc_divide_error_handler,IV_DIVIDE_ERROR)
|
||||||
|
|
||||||
|
|
||||||
GTEXT(nanoIntStub)
|
GTEXT(nanoIntStub)
|
||||||
#if !defined(CONFIG_X86_IAMCU)
|
|
||||||
SECTION_FUNC(TEXT, nanoIntStub)
|
SECTION_FUNC(TEXT, nanoIntStub)
|
||||||
call _IntEnt
|
call _IntEnt
|
||||||
pushl $0
|
#ifdef CONFIG_X86_IAMCU
|
||||||
call isr_handler
|
movl $0, %eax
|
||||||
addl $4, %esp
|
|
||||||
jmp _IntExit
|
|
||||||
#else
|
#else
|
||||||
SECTION_FUNC(TEXT, nanoIntStub)
|
pushl $0
|
||||||
pushl %eax
|
|
||||||
pushl %edx
|
|
||||||
pushl %ecx
|
|
||||||
movl $isr_handler, %eax
|
|
||||||
movl $0, %edx
|
|
||||||
call _execute_handler
|
|
||||||
pop %ecx
|
|
||||||
pop %edx
|
|
||||||
pop %eax
|
|
||||||
iret
|
|
||||||
#endif
|
#endif
|
||||||
|
call isr_handler
|
||||||
|
#ifndef CONFIG_X86_IAMCU
|
||||||
|
addl $4, %esp
|
||||||
|
#endif
|
||||||
|
jmp _IntExit
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#error Arch not supported
|
#error Arch not supported
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue