2015-04-10 16:44:37 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-18 17:01:01 -08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-10 16:44:37 -07:00
|
|
|
*/
|
|
|
|
|
2015-12-04 10:09:39 -05:00
|
|
|
/**
|
|
|
|
* @file
|
2017-03-11 19:33:29 +03:00
|
|
|
* @brief ARM Cortex-M interrupt management
|
2015-12-04 10:09:39 -05:00
|
|
|
*
|
2015-10-20 09:42:33 -07:00
|
|
|
*
|
|
|
|
* Interrupt management: enabling/disabling and dynamic ISR
|
|
|
|
* connecting/replacing. SW_ISR_TABLE_DYNAMIC has to be enabled for
|
|
|
|
* connecting ISRs at runtime.
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2016-12-23 08:35:34 -05:00
|
|
|
#include <kernel.h>
|
2015-05-28 10:56:47 -07:00
|
|
|
#include <arch/cpu.h>
|
2017-01-12 15:14:33 -06:00
|
|
|
#include <arch/arm/cortex_m/cmsis.h>
|
2015-04-10 16:44:37 -07:00
|
|
|
#include <misc/__assert.h>
|
|
|
|
#include <toolchain.h>
|
|
|
|
#include <sections.h>
|
|
|
|
#include <sw_isr_table.h>
|
2016-02-25 13:21:02 -08:00
|
|
|
#include <irq.h>
|
2017-02-09 12:40:08 -08:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <logging/kernel_event_logger.h>
|
2015-04-10 16:44:37 -07:00
|
|
|
|
|
|
|
extern void __reserved(void);
|
|
|
|
|
2017-01-12 15:14:33 -06:00
|
|
|
#define NUM_IRQS_PER_REG 32
|
|
|
|
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
|
|
|
|
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
|
2015-04-10 16:44:37 -07:00
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Enable an interrupt line
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2016-09-01 20:44:36 +02:00
|
|
|
* Enable the interrupt. After this call, the CPU will receive interrupts for
|
|
|
|
* the specified <irq>.
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2016-02-25 13:21:02 -08:00
|
|
|
void _arch_irq_enable(unsigned int irq)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2017-01-12 15:14:33 -06:00
|
|
|
NVIC_EnableIRQ((IRQn_Type)irq);
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Disable an interrupt line
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
|
|
|
* Disable an interrupt line. After this call, the CPU will stop receiving
|
|
|
|
* interrupts for the specified <irq>.
|
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2016-02-25 13:21:02 -08:00
|
|
|
void _arch_irq_disable(unsigned int irq)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2017-01-12 15:14:33 -06:00
|
|
|
NVIC_DisableIRQ((IRQn_Type)irq);
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2016-09-10 12:38:36 +02:00
|
|
|
/**
|
|
|
|
* @brief Return IRQ enable state
|
|
|
|
*
|
|
|
|
* @param irq IRQ line
|
|
|
|
* @return interrupt enable state, true or false
|
|
|
|
*/
|
|
|
|
int _arch_irq_is_enabled(unsigned int irq)
|
|
|
|
{
|
2017-01-12 15:14:33 -06:00
|
|
|
return NVIC->ISER[REG_FROM_IRQ(irq)] & (1 << BIT_FROM_IRQ(irq));
|
2016-09-10 12:38:36 +02:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
2015-08-12 15:15:48 -04:00
|
|
|
* @internal
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Set an interrupt's priority
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2016-02-04 13:55:57 -08:00
|
|
|
* The priority is verified if ASSERT_ON is enabled. The maximum number
|
|
|
|
* of priority levels is a little complex, as there are some hardware
|
|
|
|
* priority levels which are reserved: three for various types of exceptions,
|
|
|
|
* and possibly one additional to support zero latency interrupts.
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2016-02-04 13:55:57 -08:00
|
|
|
void _irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
2016-02-04 13:55:57 -08:00
|
|
|
/* Hardware priority levels 0 and 1 reserved for Kernel use.
|
|
|
|
* So we add 2 to the requested priority level. If we support
|
|
|
|
* ZLI, 2 is also reserved so we add 3.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if CONFIG_ZERO_LATENCY_IRQS
|
|
|
|
/* If we have zero latency interrupts, that makes priority level 2
|
|
|
|
* a case with special semantics; it is not masked by irq_lock().
|
|
|
|
* Our policy is to express priority levels with special properties
|
|
|
|
* via flags
|
|
|
|
*/
|
2016-11-24 11:52:21 -05:00
|
|
|
if (flags & IRQ_ZERO_LATENCY) {
|
2016-02-04 13:55:57 -08:00
|
|
|
prio = 2;
|
|
|
|
} else {
|
2016-12-12 16:29:07 -05:00
|
|
|
prio += _IRQ_PRIO_OFFSET;
|
2016-02-04 13:55:57 -08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(flags);
|
2016-12-12 16:29:07 -05:00
|
|
|
prio += _IRQ_PRIO_OFFSET;
|
2016-02-04 13:55:57 -08:00
|
|
|
#endif
|
2016-09-07 00:43:08 -03:00
|
|
|
/* The last priority level is also used by PendSV exception, but
|
|
|
|
* allow other interrupts to use the same level, even if it ends up
|
|
|
|
* affecting performance (can still be useful on systems with a
|
|
|
|
* reduced set of priorities, like Cortex-M0/M0+).
|
|
|
|
*/
|
|
|
|
__ASSERT(prio <= ((1 << CONFIG_NUM_IRQ_PRIO_BITS) - 1),
|
2016-02-04 13:55:57 -08:00
|
|
|
"invalid priority %d! values must be less than %d\n",
|
2016-12-12 16:29:07 -05:00
|
|
|
prio - _IRQ_PRIO_OFFSET,
|
|
|
|
(1 << CONFIG_NUM_IRQ_PRIO_BITS) - (_IRQ_PRIO_OFFSET));
|
2017-01-12 15:14:33 -06:00
|
|
|
NVIC_SetPriority((IRQn_Type)irq, prio);
|
2015-04-10 16:44:37 -07:00
|
|
|
}
|
|
|
|
|
2015-07-01 17:22:39 -04:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 17:51:40 -04:00
|
|
|
* @brief Spurious interrupt handler
|
2015-07-01 17:22:39 -04:00
|
|
|
*
|
|
|
|
* Installed in all dynamic interrupt slots at boot time. Throws an error if
|
|
|
|
* called.
|
|
|
|
*
|
|
|
|
* See __reserved().
|
|
|
|
*
|
2015-07-01 17:29:04 -04:00
|
|
|
* @return N/A
|
2015-07-01 17:22:39 -04:00
|
|
|
*/
|
2015-04-23 16:49:36 +03:00
|
|
|
void _irq_spurious(void *unused)
|
2015-04-10 16:44:37 -07:00
|
|
|
{
|
|
|
|
ARG_UNUSED(unused);
|
|
|
|
__reserved();
|
|
|
|
}
|
|
|
|
|
2017-02-09 12:40:08 -08:00
|
|
|
/* FIXME: IRQ direct inline functions have to be placed here and not in
|
|
|
|
* arch/cpu.h as inline functions due to nasty circular dependency between
|
|
|
|
* arch/cpu.h and kernel_structs.h; the inline functions typically need to
|
|
|
|
* perform operations on _kernel. For now, leave as regular functions, a
|
|
|
|
* future iteration will resolve this.
|
|
|
|
* We have a similar issue with the k_event_logger functions.
|
|
|
|
*
|
|
|
|
* See https://jira.zephyrproject.org/browse/ZEP-1595
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
|
|
|
void _arch_isr_direct_pm(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_ARMV6_M)
|
|
|
|
int key;
|
|
|
|
|
|
|
|
/* irq_lock() does what we wan for this CPU */
|
|
|
|
key = irq_lock();
|
|
|
|
#elif defined(CONFIG_ARMV7_M)
|
|
|
|
/* Lock all interrupts. irq_lock() will on this CPU only disable those
|
|
|
|
* lower than BASEPRI, which is not what we want. See comments in
|
|
|
|
* arch/arm/core/isr_wrapper.S
|
|
|
|
*/
|
|
|
|
__asm__ volatile("cpsid i" : : : "memory");
|
|
|
|
#else
|
|
|
|
#error Unknown ARM architecture
|
|
|
|
#endif /* CONFIG_ARMV6_M */
|
|
|
|
|
|
|
|
if (_kernel.idle) {
|
|
|
|
int32_t idle_val = _kernel.idle;
|
|
|
|
|
|
|
|
_kernel.idle = 0;
|
|
|
|
_sys_power_save_idle_exit(idle_val);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_ARMV6_M)
|
|
|
|
irq_unlock(key);
|
|
|
|
#elif defined(CONFIG_ARMV7_M)
|
|
|
|
__asm__ volatile("cpsie i" : : : "memory");
|
|
|
|
#else
|
|
|
|
#error Unknown ARM architecture
|
|
|
|
#endif /* CONFIG_ARMV6_M */
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_KERNEL_EVENT_LOGGER_SLEEP) || \
|
|
|
|
defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT)
|
|
|
|
void _arch_isr_direct_header(void)
|
|
|
|
{
|
|
|
|
_sys_k_event_logger_interrupt();
|
|
|
|
_sys_k_event_logger_exit_sleep();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|