2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2010-2014 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2015-12-04 16:09:39 +01:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Interrupt management support for IA-32 architecture
|
|
|
|
*
|
|
|
|
* This module implements assembly routines to manage interrupts on
|
|
|
|
* the Intel IA-32 architecture. More specifically, the interrupt (asynchronous
|
|
|
|
* exception) stubs are implemented in this module. The stubs are invoked when
|
|
|
|
* entering and exiting a C interrupt handler.
|
|
|
|
*/
|
2023-07-06 22:27:04 +02:00
|
|
|
#define LOAPIC_BASE_ADDRESS DT_REG_ADDR(DT_NODELABEL(intc_loapic))
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2022-05-09 13:56:13 +02:00
|
|
|
#include <zephyr/arch/x86/ia32/asm.h>
|
2016-11-08 16:36:50 +01:00
|
|
|
#include <offsets_short.h>
|
2022-05-09 13:56:13 +02:00
|
|
|
#include <zephyr/arch/cpu.h>
|
|
|
|
#include <zephyr/drivers/interrupt_controller/sysapic.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* exports (internal APIs) */
|
|
|
|
|
2016-09-23 23:01:39 +02:00
|
|
|
GTEXT(_interrupt_enter)
|
2019-03-14 16:20:46 +01:00
|
|
|
GTEXT(z_SpuriousIntNoErrCodeHandler)
|
|
|
|
GTEXT(z_SpuriousIntHandler)
|
2015-11-17 23:08:45 +01:00
|
|
|
GTEXT(_irq_sw_handler)
|
2018-10-31 00:55:38 +01:00
|
|
|
GTEXT(z_dynamic_stubs_begin)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-11-17 19:31:42 +01:00
|
|
|
/* externs */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-11-07 21:43:29 +01:00
|
|
|
GTEXT(arch_swap)
|
2016-09-02 22:34:35 +02:00
|
|
|
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM
|
2024-05-31 23:56:03 +02:00
|
|
|
GTEXT(pm_system_resume)
|
2016-03-19 00:43:40 +01:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief Inform the kernel of an interrupt
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2016-01-27 19:07:31 +01:00
|
|
|
* This function is called from the interrupt stub created by IRQ_CONNECT()
|
2015-07-01 23:22:39 +02:00
|
|
|
* to inform the kernel of an interrupt. This routine increments
|
2016-11-08 16:36:50 +01:00
|
|
|
* _kernel.nested (to support interrupt nesting), switches to the
|
2015-07-01 23:22:39 +02:00
|
|
|
* base of the interrupt stack, if not already on the interrupt stack, and then
|
|
|
|
* saves the volatile integer registers onto the stack. Finally, control is
|
|
|
|
* returned back to the interrupt stub code (which will then invoke the
|
|
|
|
* "application" interrupt service routine).
|
|
|
|
*
|
|
|
|
* Only the volatile integer registers are saved since ISRs are assumed not to
|
2018-03-06 22:42:29 +01:00
|
|
|
* utilize floating point (or SSE) instructions.
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* WARNINGS
|
|
|
|
*
|
|
|
|
* Host-based tools and the target-based GDB agent depend on the stack frame
|
|
|
|
* created by this routine to determine the locations of volatile registers.
|
|
|
|
* These tools must be updated to reflect any changes to the stack frame.
|
|
|
|
*
|
|
|
|
* C function prototype:
|
|
|
|
*
|
2016-09-23 23:01:39 +02:00
|
|
|
* void _interrupt_enter(void *isr, void *isr_param);
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2021-02-26 01:42:53 +01:00
|
|
|
SECTION_FUNC(PINNED_TEXT, _interrupt_enter)
|
2019-02-07 00:35:24 +01:00
|
|
|
/*
|
|
|
|
* Note that the processor has pushed both the EFLAGS register
|
|
|
|
* and the logical return address (cs:eip) onto the stack prior
|
|
|
|
* to invoking the handler specified in the IDT. The stack looks
|
|
|
|
* like this:
|
|
|
|
*
|
|
|
|
* 24 SS (only on privilege level change)
|
|
|
|
* 20 ESP (only on privilege level change)
|
|
|
|
* 16 EFLAGS
|
|
|
|
* 12 CS
|
|
|
|
* 8 EIP
|
|
|
|
* 4 isr_param
|
|
|
|
* 0 isr <-- stack pointer
|
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/*
|
2016-08-02 00:59:10 +02:00
|
|
|
* The gen_idt tool creates an interrupt-gate descriptor for
|
2015-04-11 01:44:37 +02:00
|
|
|
* all connections. The processor will automatically clear the IF
|
2016-09-23 23:01:39 +02:00
|
|
|
* bit in the EFLAGS register upon execution of the handler, hence
|
|
|
|
* this need not issue an 'cli' as the first instruction.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
|
|
|
* Clear the direction flag. It is automatically restored when the
|
|
|
|
* interrupt exits via the IRET instruction.
|
|
|
|
*/
|
|
|
|
|
|
|
|
cld
|
|
|
|
|
2019-02-07 00:35:24 +01:00
|
|
|
#ifdef CONFIG_X86_KPTI
|
|
|
|
call z_x86_trampoline_to_kernel
|
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2016-09-23 23:01:39 +02:00
|
|
|
* Swap EAX with isr_param and EDX with isr.
|
|
|
|
* Push ECX onto the stack
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2016-09-23 23:01:39 +02:00
|
|
|
xchgl %eax, 4(%esp)
|
|
|
|
xchgl %edx, (%esp)
|
|
|
|
pushl %ecx
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-09-23 23:01:39 +02:00
|
|
|
/* Now the stack looks like:
|
|
|
|
*
|
|
|
|
* EFLAGS
|
|
|
|
* CS
|
|
|
|
* EIP
|
|
|
|
* saved EAX
|
|
|
|
* saved EDX
|
|
|
|
* saved ECX
|
|
|
|
*
|
|
|
|
* EAX = isr_param, EDX = isr
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2016-09-23 23:01:39 +02:00
|
|
|
/* Push EDI as we will use it for scratch space.
|
|
|
|
* Rest of the callee-saved regs get saved by invocation of C
|
2019-11-07 21:43:29 +01:00
|
|
|
* functions (isr handler, arch_swap(), etc)
|
2016-09-23 23:01:39 +02:00
|
|
|
*/
|
|
|
|
pushl %edi
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
/* load %ecx with &_kernel */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl $_kernel, %ecx
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* switch to the interrupt stack for the non-nested case */
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
incl _kernel_offset_to_nested(%ecx)
|
|
|
|
|
|
|
|
/* use interrupt stack if not nested */
|
|
|
|
cmpl $1, _kernel_offset_to_nested(%ecx)
|
2015-04-11 01:44:37 +02:00
|
|
|
jne alreadyOnIntStack
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
/*
|
|
|
|
* switch to base of the interrupt stack: save esp in edi, then load
|
|
|
|
* irq_stack pointer
|
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl %esp, %edi
|
|
|
|
movl _kernel_offset_to_irq_stack(%ecx), %esp
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
2015-08-20 17:04:01 +02:00
|
|
|
/* save thread's stack pointer onto base of interrupt stack */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-09-23 23:01:39 +02:00
|
|
|
pushl %edi /* Save stack pointer */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM
|
2016-11-08 16:36:50 +01:00
|
|
|
cmpl $0, _kernel_offset_to_idle(%ecx)
|
2016-09-26 22:15:58 +02:00
|
|
|
jne handle_idle
|
2015-04-11 01:44:37 +02:00
|
|
|
/* fast path is !idle, in the pipeline */
|
2020-09-02 00:31:40 +02:00
|
|
|
#endif /* CONFIG_PM */
|
2016-09-26 22:15:58 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/* fall through to nested case */
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
alreadyOnIntStack:
|
2016-09-23 23:01:39 +02:00
|
|
|
|
2019-08-23 04:46:50 +02:00
|
|
|
push %eax /* interrupt handler argument */
|
|
|
|
|
2020-08-20 20:30:28 +02:00
|
|
|
#if defined(CONFIG_TRACING_ISR)
|
|
|
|
/* Save these as we are using to keep track of isr and isr_param */
|
|
|
|
pushl %eax
|
|
|
|
pushl %edx
|
|
|
|
call sys_trace_isr_enter
|
|
|
|
popl %edx
|
|
|
|
popl %eax
|
|
|
|
#endif
|
|
|
|
|
2015-10-19 21:08:43 +02:00
|
|
|
#ifdef CONFIG_NESTED_INTERRUPTS
|
2015-04-11 01:44:37 +02:00
|
|
|
sti /* re-enable interrupts */
|
2015-08-20 00:01:26 +02:00
|
|
|
#endif
|
2016-09-23 23:01:39 +02:00
|
|
|
/* Now call the interrupt handler */
|
2019-12-19 00:11:59 +01:00
|
|
|
call *%edx
|
2016-09-23 23:01:39 +02:00
|
|
|
/* Discard ISR argument */
|
|
|
|
addl $0x4, %esp
|
|
|
|
#ifdef CONFIG_NESTED_INTERRUPTS
|
|
|
|
cli /* disable interrupts again */
|
2016-07-08 22:53:50 +02:00
|
|
|
#endif
|
2016-08-02 21:05:08 +02:00
|
|
|
|
2020-08-20 20:30:28 +02:00
|
|
|
#if defined(CONFIG_TRACING_ISR)
|
|
|
|
pushl %eax
|
|
|
|
call sys_trace_isr_exit
|
|
|
|
popl %eax
|
|
|
|
#endif
|
|
|
|
|
2023-05-10 07:40:32 +02:00
|
|
|
#if defined(CONFIG_X86_RUNTIME_IRQ_STATS)
|
|
|
|
/*
|
|
|
|
* The runtime_irq_stats() function should be implemented
|
|
|
|
* by platform with this config.
|
|
|
|
*/
|
|
|
|
pushl %eax
|
|
|
|
call runtime_irq_stats
|
|
|
|
popl %eax
|
|
|
|
#endif
|
|
|
|
|
2019-06-28 22:06:37 +02:00
|
|
|
xorl %eax, %eax
|
2019-09-11 20:20:49 +02:00
|
|
|
#if defined(CONFIG_X2APIC)
|
|
|
|
xorl %edx, %edx
|
|
|
|
movl $(X86_X2APIC_BASE_MSR + (LOAPIC_EOI >> 4)), %ecx
|
|
|
|
wrmsr
|
|
|
|
#else /* xAPIC */
|
2020-06-26 21:09:01 +02:00
|
|
|
#ifdef DEVICE_MMIO_IS_IN_RAM
|
2023-07-06 22:27:04 +02:00
|
|
|
movl Z_TOPLEVEL_RAM_NAME(LOAPIC_REGS_STR), %edx
|
2020-06-26 21:09:01 +02:00
|
|
|
movl %eax, LOAPIC_EOI(%edx)
|
|
|
|
#else
|
2023-07-06 22:27:04 +02:00
|
|
|
movl %eax, (LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
|
2020-06-26 21:09:01 +02:00
|
|
|
#endif /* DEVICE_MMIO_IS_IN_RAM */
|
|
|
|
#endif /* CONFIG_X2APIC */
|
2016-08-02 21:05:08 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/* determine whether exiting from a nested interrupt */
|
2016-11-08 16:36:50 +01:00
|
|
|
movl $_kernel, %ecx
|
|
|
|
decl _kernel_offset_to_nested(%ecx) /* dec interrupt nest count */
|
2015-04-11 01:44:37 +02:00
|
|
|
jne nestedInterrupt /* 'iret' if nested case */
|
|
|
|
|
2016-12-14 20:34:29 +01:00
|
|
|
#ifdef CONFIG_PREEMPT_ENABLED
|
2016-11-08 16:36:50 +01:00
|
|
|
movl _kernel_offset_to_current(%ecx), %edx
|
2016-09-02 22:34:35 +02:00
|
|
|
|
|
|
|
/* reschedule only if the scheduler says that we must do so */
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 16:37:27 +01:00
|
|
|
cmpl %edx, _kernel_offset_to_ready_q_cache(%ecx)
|
|
|
|
je noReschedule
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/*
|
2019-09-19 01:30:39 +02:00
|
|
|
* Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
|
2019-11-07 21:43:29 +01:00
|
|
|
* to arch_swap() to determine whether non-floating registers need to be
|
2015-04-11 01:44:37 +02:00
|
|
|
* preserved using the lazy save/restore algorithm, or to indicate to
|
|
|
|
* debug tools that a preemptive context switch has occurred.
|
|
|
|
*/
|
2015-08-23 00:41:06 +02:00
|
|
|
|
2020-05-03 11:18:37 +02:00
|
|
|
#if defined(CONFIG_LAZY_FPU_SHARING)
|
2019-09-19 01:30:39 +02:00
|
|
|
orb $X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx)
|
2015-04-11 01:44:37 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A context reschedule is required: keep the volatile registers of
|
2015-08-20 17:04:01 +02:00
|
|
|
* the interrupted thread on the context's stack. Utilize
|
2019-11-07 21:43:29 +01:00
|
|
|
* the existing arch_swap() primitive to save the remaining
|
2015-04-11 01:44:37 +02:00
|
|
|
* thread's registers (including floating point) and perform
|
2015-08-20 17:04:01 +02:00
|
|
|
* a switch to the new thread.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2016-09-02 22:34:35 +02:00
|
|
|
popl %esp /* switch back to outgoing thread's stack */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2017-06-07 18:33:16 +02:00
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
2019-03-08 22:19:05 +01:00
|
|
|
call z_check_stack_sentinel
|
kernel: tickless: Add tickless kernel support
Adds event based scheduling logic to the kernel. Updates
management of timeouts, timers, idling etc. based on
time tracked at events rather than periodic ticks. Provides
interfaces for timers to announce and get next timer expiry
based on kernel scheduling decisions involving time slicing
of threads, timeouts and idling. Uses wall time units instead
of ticks in all scheduling activities.
The implementation involves changes in the following areas
1. Management of time in wall units like ms/us instead of ticks
The existing implementation already had an option to configure
number of ticks in a second. The new implementation builds on
top of that feature and provides option to set the size of the
scheduling granurality to mili seconds or micro seconds. This
allows most of the current implementation to be reused. Due to
this re-use and co-existence with tick based kernel, the names
of variables may contain the word "tick". However, in the
tickless kernel implementation, it represents the currently
configured time unit, which would be be mili seconds or
micro seconds. The APIs that take time as a parameter are not
impacted and they continue to pass time in mili seconds.
2. Timers would not be programmed in periodic mode
generating ticks. Instead they would be programmed in one
shot mode to generate events at the time the kernel scheduler
needs to gain control for its scheduling activities like
timers, timeouts, time slicing, idling etc.
3. The scheduler provides interfaces that the timer drivers
use to announce elapsed time and get the next time the scheduler
needs a timer event. It is possible that the scheduler may not
need another timer event, in which case the system would wait
for a non-timer event to wake it up if it is idling.
4. New APIs are defined to be implemented by timer drivers. Also
they need to handler timer events differently. These changes
have been done in the HPET timer driver. In future other timers
that support tickles kernel should implement these APIs as well.
These APIs are to re-program the timer, update and announce
elapsed time.
5. Philosopher and timer_api applications have been enabled to
test tickless kernel. Separate configuration files are created
which define the necessary CONFIG flags. Run these apps using
following command
make pristine && make BOARD=qemu_x86 CONF_FILE=prj_tickless.conf qemu
Jira: ZEP-339 ZEP-1946 ZEP-948
Change-Id: I7d950c31bf1ff929a9066fad42c2f0559a2e5983
Signed-off-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-02-06 04:37:19 +01:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
pushfl /* push KERNEL_LOCK_KEY argument */
|
2019-11-07 21:43:29 +01:00
|
|
|
call arch_swap
|
2016-07-08 22:53:50 +02:00
|
|
|
addl $4, %esp /* pop KERNEL_LOCK_KEY argument */
|
2019-08-23 04:46:50 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2015-08-20 17:04:01 +02:00
|
|
|
* The interrupted thread has now been scheduled,
|
2019-11-07 21:43:29 +01:00
|
|
|
* as the result of a _later_ invocation of arch_swap().
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2015-08-20 17:04:01 +02:00
|
|
|
* Now need to restore the interrupted thread's environment before
|
2015-04-11 01:44:37 +02:00
|
|
|
* returning control to it at the point where it was interrupted ...
|
|
|
|
*/
|
2015-08-23 00:41:06 +02:00
|
|
|
|
2020-05-03 11:18:37 +02:00
|
|
|
#if defined(CONFIG_LAZY_FPU_SHARING)
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2019-11-07 21:43:29 +01:00
|
|
|
* arch_swap() has restored the floating point registers, if needed.
|
2019-09-19 01:30:39 +02:00
|
|
|
* Clear X86_THREAD_FLAG_INT in the interrupted thread's state
|
2015-04-11 01:44:37 +02:00
|
|
|
* since it has served its purpose.
|
|
|
|
*/
|
2015-08-23 00:41:06 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
movl _kernel + _kernel_offset_to_current, %eax
|
2019-09-19 01:30:39 +02:00
|
|
|
andb $~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax)
|
2020-05-03 11:18:37 +02:00
|
|
|
#endif /* CONFIG_LAZY_FPU_SHARING */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-08-20 17:04:01 +02:00
|
|
|
/* Restore volatile registers and return to the interrupted thread */
|
2016-09-23 23:01:39 +02:00
|
|
|
popl %edi
|
2015-04-11 01:44:37 +02:00
|
|
|
popl %ecx
|
2016-09-23 23:01:39 +02:00
|
|
|
popl %edx
|
2015-08-23 00:41:06 +02:00
|
|
|
popl %eax
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
2019-02-07 00:35:24 +01:00
|
|
|
KPTI_IRET
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-12-14 20:34:29 +01:00
|
|
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
noReschedule:
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A thread reschedule is not required; switch back to the
|
|
|
|
* interrupted thread's stack and restore volatile registers
|
|
|
|
*/
|
|
|
|
|
|
|
|
popl %esp /* pop thread stack pointer */
|
|
|
|
|
2017-05-11 22:29:15 +02:00
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
2019-03-08 22:19:05 +01:00
|
|
|
call z_check_stack_sentinel
|
2017-05-11 22:29:15 +02:00
|
|
|
#endif
|
2017-06-07 18:33:16 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/* fall through to 'nestedInterrupt' */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the nested interrupt case, the interrupt stack must still be
|
|
|
|
* utilized, and more importantly, a rescheduling decision must
|
|
|
|
* not be performed.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 18:25:00 +02:00
|
|
|
nestedInterrupt:
|
2016-09-23 23:01:39 +02:00
|
|
|
popl %edi
|
|
|
|
popl %ecx /* pop volatile registers in reverse order */
|
|
|
|
popl %edx
|
2015-04-11 01:44:37 +02:00
|
|
|
popl %eax
|
|
|
|
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
2019-02-07 00:35:24 +01:00
|
|
|
KPTI_IRET
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM
|
2016-09-26 22:15:58 +02:00
|
|
|
handle_idle:
|
|
|
|
pushl %eax
|
|
|
|
pushl %edx
|
2016-11-08 16:36:50 +01:00
|
|
|
/* Zero out _kernel.idle */
|
|
|
|
movl $0, _kernel_offset_to_idle(%ecx)
|
2016-09-26 22:15:58 +02:00
|
|
|
|
|
|
|
/*
|
2024-05-31 23:56:03 +02:00
|
|
|
* Beware that a timer driver's pm_system_resume() implementation might
|
2016-09-26 22:15:58 +02:00
|
|
|
* expect that interrupts are disabled when invoked. This ensures that
|
|
|
|
* the calculation and programming of the device for the next timer
|
|
|
|
* deadline is not interrupted.
|
|
|
|
*/
|
|
|
|
|
2024-05-31 23:56:03 +02:00
|
|
|
call pm_system_resume
|
2016-09-26 22:15:58 +02:00
|
|
|
popl %edx
|
|
|
|
popl %eax
|
|
|
|
jmp alreadyOnIntStack
|
2020-09-02 00:31:40 +02:00
|
|
|
#endif /* CONFIG_PM */
|
2016-09-26 22:15:58 +02:00
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
|
|
|
*
|
2019-03-14 16:20:46 +01:00
|
|
|
* z_SpuriousIntHandler -
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief Spurious interrupt handler stubs
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* Interrupt-gate descriptors are statically created for all slots in the IDT
|
2019-03-14 16:20:46 +01:00
|
|
|
* that point to z_SpuriousIntHandler() or z_SpuriousIntNoErrCodeHandler(). The
|
2015-07-01 23:22:39 +02:00
|
|
|
* former stub is connected to exception vectors where the processor pushes an
|
|
|
|
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
|
|
|
|
* records.
|
|
|
|
*
|
2019-07-11 23:18:28 +02:00
|
|
|
* A spurious interrupt is considered a fatal condition; there is no provision
|
|
|
|
* to return to the interrupted execution context and thus the volatile
|
|
|
|
* registers are not saved.
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2015-07-01 23:29:04 +02:00
|
|
|
* @return Never returns
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* C function prototype:
|
|
|
|
*
|
2019-03-14 16:20:46 +01:00
|
|
|
* void z_SpuriousIntHandler (void);
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* INTERNAL
|
2016-08-02 00:59:10 +02:00
|
|
|
* The gen_idt tool creates an interrupt-gate descriptor for all
|
2015-07-01 23:22:39 +02:00
|
|
|
* connections. The processor will automatically clear the IF bit
|
|
|
|
* in the EFLAGS register upon execution of the handler,
|
2019-03-14 16:20:46 +01:00
|
|
|
* thus z_SpuriousIntNoErrCodeHandler()/z_SpuriousIntHandler() shall be
|
2015-07-01 23:22:39 +02:00
|
|
|
* invoked with interrupts disabled.
|
|
|
|
*/
|
2021-02-26 01:42:53 +01:00
|
|
|
SECTION_FUNC(PINNED_TEXT, z_SpuriousIntNoErrCodeHandler)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
pushl $0 /* push dummy err code onto stk */
|
|
|
|
|
2019-03-14 16:20:46 +01:00
|
|
|
/* fall through to z_SpuriousIntHandler */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
2021-02-26 01:42:53 +01:00
|
|
|
SECTION_FUNC(PINNED_TEXT, z_SpuriousIntHandler)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
cld /* Clear direction flag */
|
|
|
|
|
2015-10-05 16:48:46 +02:00
|
|
|
/* Create the ESF */
|
|
|
|
|
|
|
|
pushl %eax
|
|
|
|
pushl %ecx
|
|
|
|
pushl %edx
|
|
|
|
pushl %edi
|
|
|
|
pushl %esi
|
|
|
|
pushl %ebx
|
|
|
|
pushl %ebp
|
|
|
|
|
|
|
|
leal 44(%esp), %ecx /* Calculate ESP before exception occurred */
|
|
|
|
pushl %ecx /* Save calculated ESP */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-08-23 04:46:50 +02:00
|
|
|
pushl %esp /* push cur stack pointer: pEsf arg */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* re-enable interrupts */
|
2015-08-23 00:41:06 +02:00
|
|
|
sti
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* call the fatal error handler */
|
2019-07-11 23:18:28 +02:00
|
|
|
call z_x86_spurious_irq
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-07-08 22:53:50 +02:00
|
|
|
/* handler doesn't return */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-11-17 23:08:45 +01:00
|
|
|
#if CONFIG_IRQ_OFFLOAD
|
2021-02-26 01:42:53 +01:00
|
|
|
SECTION_FUNC(PINNED_TEXT, _irq_sw_handler)
|
2016-09-23 23:01:39 +02:00
|
|
|
push $0
|
2019-03-14 16:20:46 +01:00
|
|
|
push $z_irq_do_offload
|
2016-09-23 23:01:39 +02:00
|
|
|
jmp _interrupt_enter
|
2015-11-17 23:08:45 +01:00
|
|
|
|
|
|
|
#endif
|
2018-10-31 00:55:38 +01:00
|
|
|
|
|
|
|
#if CONFIG_X86_DYNAMIC_IRQ_STUBS > 0
|
|
|
|
z_dynamic_irq_stub_common:
|
|
|
|
/* stub number already pushed */
|
|
|
|
push $z_x86_dynamic_irq_handler
|
|
|
|
jmp _interrupt_enter
|
|
|
|
|
|
|
|
/* Create all the dynamic IRQ stubs
|
|
|
|
*
|
2019-06-28 00:01:01 +02:00
|
|
|
* NOTE: Please update DYN_STUB_SIZE in include/arch/x86/ia32/arch.h if you
|
|
|
|
* change how large the generated stubs are, otherwise _get_dynamic_stub()
|
|
|
|
* will be unable to correctly determine the offset
|
2018-10-31 00:55:38 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create nice labels for all the stubs so we can see where we
|
|
|
|
* are in a debugger
|
|
|
|
*/
|
|
|
|
.altmacro
|
|
|
|
.macro __INT_STUB_NUM id
|
|
|
|
z_dynamic_irq_stub_\id:
|
|
|
|
.endm
|
|
|
|
.macro INT_STUB_NUM id
|
|
|
|
__INT_STUB_NUM %id
|
|
|
|
.endm
|
|
|
|
|
|
|
|
z_dynamic_stubs_begin:
|
|
|
|
stub_num = 0
|
|
|
|
.rept ((CONFIG_X86_DYNAMIC_IRQ_STUBS + Z_DYN_STUB_PER_BLOCK - 1) / Z_DYN_STUB_PER_BLOCK)
|
|
|
|
block_counter = 0
|
|
|
|
.rept Z_DYN_STUB_PER_BLOCK
|
|
|
|
.if stub_num < CONFIG_X86_DYNAMIC_IRQ_STUBS
|
|
|
|
INT_STUB_NUM stub_num
|
|
|
|
/*
|
|
|
|
* 2-byte push imm8.
|
|
|
|
*/
|
|
|
|
push $stub_num
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to make sure this isn't the last stub in
|
|
|
|
* a block, in which case we just fall through
|
|
|
|
*/
|
|
|
|
.if (block_counter <> (Z_DYN_STUB_PER_BLOCK - 1) && \
|
|
|
|
(stub_num <> CONFIG_X86_DYNAMIC_IRQ_STUBS - 1))
|
|
|
|
/* This should always be a 2-byte jmp rel8 */
|
|
|
|
jmp 1f
|
|
|
|
.endif
|
|
|
|
stub_num = stub_num + 1
|
|
|
|
block_counter = block_counter + 1
|
|
|
|
.endif
|
|
|
|
.endr
|
|
|
|
/*
|
|
|
|
* This must a 5-bvte jump rel32, which is why z_dynamic_irq_stub_common
|
|
|
|
* is before the actual stubs
|
|
|
|
*/
|
|
|
|
1: jmp z_dynamic_irq_stub_common
|
|
|
|
.endr
|
|
|
|
#endif /* CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 */
|
|
|
|
|