kernel: add architecture interface headers

include/sys/arch_inlines.h will contain all architecture APIs
that are used by public inline functions and macros,
with implementations deriving from include/arch/cpu.h.

kernel/include/arch_interface.h will contain everything
else, with implementations deriving from
arch/*/include/kernel_arch_func.h.

Instances of duplicate documentation for these APIs have been
removed; implementation details have been left in place.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-10-03 10:08:13 -07:00 committed by Anas Nashif
commit 8ffff144ea
37 changed files with 925 additions and 960 deletions

View file

@ -259,12 +259,6 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
} }
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
/**
* @brief Get the maximum number of partitions for a memory domain
* that is supported by the MPU hardware, and with respect
* to the current static memory region configuration.
*/
int z_arch_mem_domain_max_partitions_get(void) int z_arch_mem_domain_max_partitions_get(void)
{ {
int available_regions = arm_core_mpu_get_max_available_dyn_regions(); int available_regions = arm_core_mpu_get_max_available_dyn_regions();
@ -280,9 +274,6 @@ int z_arch_mem_domain_max_partitions_get(void)
return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions); return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions);
} }
/**
* @brief Configure the memory domain of the thread.
*/
void z_arch_mem_domain_thread_add(struct k_thread *thread) void z_arch_mem_domain_thread_add(struct k_thread *thread)
{ {
if (_current != thread) { if (_current != thread) {
@ -296,12 +287,6 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
z_arm_configure_dynamic_mpu_regions(thread); z_arm_configure_dynamic_mpu_regions(thread);
} }
/*
* @brief Reset the MPU configuration related to the memory domain
* partitions
*
* @param domain pointer to the memory domain (must be valid)
*/
void z_arch_mem_domain_destroy(struct k_mem_domain *domain) void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
{ {
/* This function will reset the access permission configuration /* This function will reset the access permission configuration
@ -332,13 +317,6 @@ void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
} }
} }
/*
* @brief Remove a partition from the memory domain
*
* @param domain pointer to the memory domain (must be valid
* @param partition_id the ID (sequence) number of the memory domain
* partition (must be a valid partition).
*/
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id) u32_t partition_id)
{ {
@ -371,9 +349,6 @@ void z_arch_mem_domain_thread_remove(struct k_thread *thread)
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain); z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
} }
/*
* Validate the given buffer is user accessible or not
*/
int z_arch_buffer_validate(void *addr, size_t size, int write) int z_arch_buffer_validate(void *addr, size_t size, int write)
{ {
return arm_core_mpu_buffer_validate(addr, size, write); return arm_core_mpu_buffer_validate(addr, size, write);

View file

@ -50,22 +50,6 @@ SECTION_FUNC(TEXT, z_arm_cpu_idle_init)
#endif #endif
bx lr bx lr
/**
*
* @brief Power save idle routine for ARM Cortex-M
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero. The ARM 'wfi' instruction
* will be issued, causing a low-power consumption sleep mode.
*
* @return N/A
*
* C function prototype:
*
* void z_arch_cpu_idle (void);
*/
SECTION_FUNC(TEXT, z_arch_cpu_idle) SECTION_FUNC(TEXT, z_arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
@ -93,28 +77,6 @@ SECTION_FUNC(TEXT, z_arch_cpu_idle)
bx lr bx lr
/**
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* INTERNAL
* The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that occurs
* if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'key' input parameter.
*
* @return N/A
*
* C function prototype:
*
* void z_arch_cpu_atomic_idle (unsigned int key);
*/
SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}

View file

@ -37,40 +37,16 @@ extern void z_arm_reserved(void);
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG) #define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG) #define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
/**
*
* @brief Enable an interrupt line
*
* Enable the interrupt. After this call, the CPU will receive interrupts for
* the specified <irq>.
*
* @return N/A
*/
void z_arch_irq_enable(unsigned int irq) void z_arch_irq_enable(unsigned int irq)
{ {
NVIC_EnableIRQ((IRQn_Type)irq); NVIC_EnableIRQ((IRQn_Type)irq);
} }
/**
*
* @brief Disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* @return N/A
*/
void z_arch_irq_disable(unsigned int irq) void z_arch_irq_disable(unsigned int irq)
{ {
NVIC_DisableIRQ((IRQn_Type)irq); NVIC_DisableIRQ((IRQn_Type)irq);
} }
/**
* @brief Return IRQ enable state
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
int z_arch_irq_is_enabled(unsigned int irq) int z_arch_irq_is_enabled(unsigned int irq)
{ {
return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq)); return NVIC->ISER[REG_FROM_IRQ(irq)] & BIT(BIT_FROM_IRQ(irq));
@ -122,16 +98,6 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
} }
#elif defined(CONFIG_CPU_CORTEX_R) #elif defined(CONFIG_CPU_CORTEX_R)
/**
*
* @brief Enable an interrupt line
*
* Enable the interrupt. After this call, the CPU will receive interrupts for
* the specified <irq>.
*
* @return N/A
*/
void z_arch_irq_enable(unsigned int irq) void z_arch_irq_enable(unsigned int irq)
{ {
struct device *dev = _sw_isr_table[0].arg; struct device *dev = _sw_isr_table[0].arg;
@ -139,15 +105,6 @@ void z_arch_irq_enable(unsigned int irq)
irq_enable_next_level(dev, (irq >> 8) - 1); irq_enable_next_level(dev, (irq >> 8) - 1);
} }
/**
*
* @brief Disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* @return N/A
*/
void z_arch_irq_disable(unsigned int irq) void z_arch_irq_disable(unsigned int irq)
{ {
struct device *dev = _sw_isr_table[0].arg; struct device *dev = _sw_isr_table[0].arg;
@ -155,12 +112,6 @@ void z_arch_irq_disable(unsigned int irq)
irq_disable_next_level(dev, (irq >> 8) - 1); irq_disable_next_level(dev, (irq >> 8) - 1);
} }
/**
* @brief Return IRQ enable state
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
int z_arch_irq_is_enabled(unsigned int irq) int z_arch_irq_is_enabled(unsigned int irq)
{ {
struct device *dev = _sw_isr_table[0].arg; struct device *dev = _sw_isr_table[0].arg;

View file

@ -13,14 +13,7 @@ extern void read_timer_start_of_swap(void);
#endif #endif
extern const int _k_neg_eagain; extern const int _k_neg_eagain;
/** /* The 'key' actually represents the BASEPRI register
*
* @brief Initiate a cooperative context switch
*
* The z_arch_swap() routine is invoked by various kernel services to effect
* a cooperative context context switch. Prior to invoking z_arch_swap(), the caller
* disables interrupts via irq_lock() and the return 'key' is passed as a
* parameter to z_arch_swap(). The 'key' actually represents the BASEPRI register
* prior to disabling interrupts via the BASEPRI mechanism. * prior to disabling interrupts via the BASEPRI mechanism.
* *
* z_arch_swap() itself does not do much. * z_arch_swap() itself does not do much.
@ -40,10 +33,6 @@ extern const int _k_neg_eagain;
* *
* On ARMv6-M, the intlock key is represented by the PRIMASK register, * On ARMv6-M, the intlock key is represented by the PRIMASK register,
* as BASEPRI is not available. * as BASEPRI is not available.
*
* @return -EAGAIN, or a return value set by a call to
* z_arch_thread_return_value_set()
*
*/ */
int z_arch_swap(unsigned int key) int z_arch_swap(unsigned int key)
{ {

View file

@ -21,13 +21,8 @@
extern u8_t *z_priv_stack_find(void *obj); extern u8_t *z_priv_stack_find(void *obj);
#endif #endif
/** /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
* * end of the stack, and thus reusable by the stack when not needed anymore.
* @brief Initialize a new thread from its stack space
*
* The control structure (thread) is put at the lower address of the stack. An
* initial context, to be "restored" by z_arm_pendsv(), is put at the other end
* of the stack, and thus reusable by the stack when not needed anymore.
* *
* The initial context is an exception stack frame (ESF) since exiting the * The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
@ -37,21 +32,7 @@ extern u8_t *z_priv_stack_find(void *obj);
* halfwords). Since the compiler automatically sets the lsb of function * halfwords). Since the compiler automatically sets the lsb of function
* addresses, we have to unset it manually before storing it in the 'pc' field * addresses, we have to unset it manually before storing it in the 'pc' field
* of the ESF. * of the ESF.
*
* <options> is currently unused.
*
* @param stack pointer to the aligned stack memory
* @param stackSize size of the available stack memory in bytes
* @param pEntry the entry point
* @param parameter1 entry point to the first param
* @param parameter2 entry point to the second param
* @param parameter3 entry point to the third param
* @param priority thread priority
* @param options thread options: K_ESSENTIAL, K_FP_REGS
*
* @return N/A
*/ */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stackSize, k_thread_entry_t pEntry, size_t stackSize, k_thread_entry_t pEntry,
void *parameter1, void *parameter2, void *parameter3, void *parameter1, void *parameter2, void *parameter3,

View file

@ -38,16 +38,10 @@ extern volatile irq_offload_routine_t offload_routine;
* to the Vector Key field, otherwise the writes are ignored. * to the Vector Key field, otherwise the writes are ignored.
*/ */
#define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL #define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL
/** /* The current executing vector is found in the IPSR register. We consider the
*
* @brief Find out if running in an ISR context
*
* The current executing vector is found in the IPSR register. We consider the
* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions to be * IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions to be
* interrupts. Taking a fault within an exception is also considered in * interrupts. Taking a fault within an exception is also considered in
* interrupt context. * interrupt context.
*
* @return 1 if in ISR, 0 if not.
*/ */
static ALWAYS_INLINE bool z_arch_is_in_isr(void) static ALWAYS_INLINE bool z_arch_is_in_isr(void)
{ {

View file

@ -32,14 +32,7 @@ extern "C" {
extern volatile irq_offload_routine_t offload_routine; extern volatile irq_offload_routine_t offload_routine;
#endif #endif
/** /* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
*
* @brief Find out if running in an ISR context
*
* Check the CPSR mode bits to see if we are in IRQ or FIQ mode
*
* @return 1 if in ISR, 0 if not.
*/
static ALWAYS_INLINE bool z_arch_is_in_isr(void) static ALWAYS_INLINE bool z_arch_is_in_isr(void)
{ {
unsigned int status; unsigned int status;

View file

@ -7,16 +7,6 @@
#include <kernel.h> #include <kernel.h>
#include <kernel_structs.h> #include <kernel_structs.h>
/**
*
* @brief Power save idle routine
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero.
*
* @return N/A
*/
void z_arch_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
/* Do nothing but unconditionally unlock interrupts and return to the /* Do nothing but unconditionally unlock interrupts and return to the
@ -25,23 +15,6 @@ void z_arch_cpu_idle(void)
irq_unlock(NIOS2_STATUS_PIE_MSK); irq_unlock(NIOS2_STATUS_PIE_MSK);
} }
/**
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* INTERNAL
* The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'key' input parameter.
*
* @return N/A
*/
void z_arch_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
/* Do nothing but restore IRQ state. This CPU does not have any /* Do nothing but restore IRQ state. This CPU does not have any
@ -49,4 +22,3 @@ void z_arch_cpu_atomic_idle(unsigned int key)
*/ */
irq_unlock(key); irq_unlock(key);
} }

View file

@ -24,18 +24,6 @@
#include "posix_soc_if.h" #include "posix_soc_if.h"
#include <debug/tracing.h> #include <debug/tracing.h>
/**
*
* @brief Power save idle routine for IA-32
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero.
*
* This function is just a pass thru to the SOC one
*
* @return N/A
*/
void z_arch_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
@ -43,25 +31,6 @@ void z_arch_cpu_idle(void)
posix_halt_cpu(); posix_halt_cpu();
} }
/**
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* INTERNAL
* The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'key' input parameter.
*
* This function is just a pass thru to the SOC one
*
* @return N/A
*/
void z_arch_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();

View file

@ -19,36 +19,23 @@
#include "irq.h" #include "irq.h"
#include "kswap.h" #include "kswap.h"
/**
*
* @brief Initiate a cooperative context switch
*
* The z_arch_swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking z_arch_swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to z_arch_swap().
*
*
* @return -EAGAIN, or a return value set by a call to
* z_arch_thread_return_value_set()
*
*/
int z_arch_swap(unsigned int key) int z_arch_swap(unsigned int key)
{ {
/* /*
* struct k_thread * _kernel.current is the currently runnig thread * struct k_thread * _kernel.current is the currently runnig thread
* struct k_thread * _kernel.ready_q.cache contains the next thread to run * struct k_thread * _kernel.ready_q.cache contains the next thread to
* (cannot be NULL) * run (cannot be NULL)
* *
* Here a "real" arch would save all processor registers, stack pointer and so * Here a "real" arch would save all processor registers, stack pointer
* forth. * and so forth. But we do not need to do so because we use posix
* But we do not need to do so because we use posix threads => those are all * threads => those are all nicely kept by the native OS kernel
* nicely kept by the native OS kernel */
*/
_kernel.current->callee_saved.key = key; _kernel.current->callee_saved.key = key;
_kernel.current->callee_saved.retval = -EAGAIN; _kernel.current->callee_saved.retval = -EAGAIN;
/* retval may be modified with a call to z_arch_thread_return_value_set() */
/* retval may be modified with a call to
* z_arch_thread_return_value_set()
*/
posix_thread_status_t *ready_thread_ptr = posix_thread_status_t *ready_thread_ptr =
(posix_thread_status_t *) (posix_thread_status_t *)
@ -63,8 +50,9 @@ int z_arch_swap(unsigned int key)
/* /*
* Here a "real" arch would load all processor registers for the thread * Here a "real" arch would load all processor registers for the thread
* to run. In this arch case, we just block this thread until allowed to * to run. In this arch case, we just block this thread until allowed
* run later, and signal to whomever is allowed to run to continue. * to run later, and signal to whomever is allowed to run to
* continue.
*/ */
posix_swap(ready_thread_ptr->thread_idx, posix_swap(ready_thread_ptr->thread_idx,
this_thread_ptr->thread_idx); this_thread_ptr->thread_idx);
@ -79,12 +67,11 @@ int z_arch_swap(unsigned int key)
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/** /* This is just a version of z_arch_swap() in which we do not save anything
* This is just a version of z_arch_swap() in which we do not save anything about the * about the current thread.
* current thread.
* *
* Note that we will never come back to this thread: * Note that we will never come back to this thread: posix_main_thread_start()
* posix_main_thread_start() does never return * does never return.
*/ */
void z_arch_switch_to_main_thread(struct k_thread *main_thread, void z_arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack, k_thread_stack_t *main_stack,

View file

@ -20,26 +20,8 @@
#include "posix_core.h" #include "posix_core.h"
#include "posix_soc_if.h" #include "posix_soc_if.h"
/* Note that in this arch we cheat quite a bit: we use as stack a normal
/**
* @brief Create a new kernel execution thread
*
* Initializes the k_thread object and sets up initial stack frame.
*
* @param thread pointer to thread struct memory, including any space needed
* for extra coprocessor context
* @param stack the pointer to aligned stack memory
* @param stack_size the stack size in bytes
* @param entry thread entry point routine
* @param arg1 first param to entry point
* @param arg2 second param to entry point
* @param arg3 third param to entry point
* @param priority thread priority
* @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS
*
* Note that in this arch we cheat quite a bit: we use as stack a normal
* pthreads stack and therefore we ignore the stack size * pthreads stack and therefore we ignore the stack size
*
*/ */
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t thread_func, size_t stack_size, k_thread_entry_t thread_func,

View file

@ -26,23 +26,11 @@ void z_arch_switch_to_main_thread(struct k_thread *main_thread,
size_t main_stack_size, k_thread_entry_t _main); size_t main_stack_size, k_thread_entry_t _main);
#endif #endif
/**
*
* @brief Performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the kernel.
* Trivial stuff is done inline; more complex initialization is done via
* function calls.
*
* @return N/A
*/
static inline void z_arch_kernel_init(void) static inline void z_arch_kernel_init(void)
{ {
/* Nothing to be done */ /* Nothing to be done */
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {

View file

@ -17,38 +17,11 @@
* functions as weak functions, so that they can be replaced at the SOC-level. * functions as weak functions, so that they can be replaced at the SOC-level.
*/ */
/**
*
* @brief Power save idle routine
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero.
*
* @return N/A
*/
void __weak z_arch_cpu_idle(void) void __weak z_arch_cpu_idle(void)
{ {
irq_unlock(SOC_MSTATUS_IEN); irq_unlock(SOC_MSTATUS_IEN);
} }
/**
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* INTERNAL
* The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'imask' input parameter.
*
* @return N/A
*/
void __weak z_arch_cpu_atomic_idle(unsigned int key) void __weak z_arch_cpu_atomic_idle(unsigned int key)
{ {
irq_unlock(key); irq_unlock(key);

View file

@ -7,17 +7,6 @@
#include <debug/tracing.h> #include <debug/tracing.h>
#include <arch/cpu.h> #include <arch/cpu.h>
/**
*
* @brief Power save idle routine
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero. The 'hlt' instruction
* will be issued causing a low-power consumption sleep mode.
*
* @return N/A
*/
void z_arch_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
@ -26,24 +15,6 @@ void z_arch_cpu_idle(void)
"hlt\n\t"); "hlt\n\t");
} }
/**
*
* @brief Atomically re-enable interrupts and enter low power mode
*
* INTERNAL
* The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'key' input parameter.
*
* @return N/A
*/
void z_arch_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();

View file

@ -250,49 +250,6 @@ static void idt_vector_install(int vector, void *irq_handler)
irq_unlock(key); irq_unlock(key);
} }
/**
*
* @brief Connect a C routine to a hardware interrupt
*
* @param irq virtualized IRQ to connect to
* @param priority requested priority of interrupt
* @param routine the C interrupt handler
* @param parameter parameter passed to C routine
* @param flags IRQ flags
*
* This routine connects an interrupt service routine (ISR) coded in C to
* the specified hardware <irq>. An interrupt vector will be allocated to
* satisfy the specified <priority>.
*
* The specified <irq> represents a virtualized IRQ, i.e. it does not
* necessarily represent a specific IRQ line on a given interrupt controller
* device. The platform presents a virtualized set of IRQs from 0 to N, where
* N is the total number of IRQs supported by all the interrupt controller
* devices on the board. See the platform's documentation for the mapping of
* virtualized IRQ to physical IRQ.
*
* When the device asserts an interrupt on the specified <irq>, a switch to
* the interrupt stack is performed (if not already executing on the interrupt
* stack), followed by saving the integer (i.e. non-floating point) thread of
* the currently executing thread or ISR. The ISR specified by <routine>
* will then be invoked with the single <parameter>. When the ISR returns, a
* context switch may occur.
*
* On some platforms <flags> parameter needs to be specified to indicate if
* the irq is triggered by low or high level or by rising or falling edge.
*
* The routine searches for the first available element in the dynamic_stubs
* array and uses it for the stub.
*
* @return the allocated interrupt vector
*
* WARNINGS
* This routine does not perform range checking on the requested <priority>
* and thus, depending on the underlying interrupt controller, may result
* in the assignment of an interrupt vector located in the reserved range of
* the processor.
*/
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
u32_t flags) u32_t flags)

View file

@ -27,23 +27,14 @@
#endif #endif
GDATA(_k_neg_eagain) GDATA(_k_neg_eagain)
/** /*
* * Given that z_arch_swap() is called to effect a cooperative context switch,
* @brief Initiate a cooperative context switch * only the non-volatile integer registers need to be saved in the TCS of the
*
* The z_arch_swap() routine is invoked by various kernel services to effect
* a cooperative context switch. Prior to invoking z_arch_swap(), the
* caller disables interrupts (via irq_lock) and the return 'key'
* is passed as a parameter to z_arch_swap(). The 'key' actually represents
* the EFLAGS register prior to disabling interrupts via a 'cli' instruction.
*
* Given that z_arch_swap() is called to effect a cooperative context switch, only
* the non-volatile integer registers need to be saved in the TCS of the
* outgoing thread. The restoration of the integer registers of the incoming * outgoing thread. The restoration of the integer registers of the incoming
* thread depends on whether that thread was preemptively context switched out. * thread depends on whether that thread was preemptively context switched out.
* The X86_THREAD_FLAG_INT and _EXC bits in the k_thread->arch.flags field * The X86_THREAD_FLAG_INT and _EXC bits in the k_thread->arch.flags field will
* will signify that the thread was preemptively context switched out, and thus * signify that the thread was preemptively context switched out, and thus both
* both the volatile and non-volatile integer registers need to be restored. * the volatile and non-volatile integer registers need to be restored.
* *
* The non-volatile registers need to be scrubbed to ensure they contain no * The non-volatile registers need to be scrubbed to ensure they contain no
* sensitive information that could compromise system security. This is to * sensitive information that could compromise system security. This is to
@ -54,26 +45,22 @@
* to this routine that alter the values of these registers MUST be reviewed * to this routine that alter the values of these registers MUST be reviewed
* for potential security impacts. * for potential security impacts.
* *
* Floating point registers are handled using a lazy save/restore * Floating point registers are handled using a lazy save/restore mechanism
* mechanism since it's expected relatively few threads will be created * since it's expected relatively few threads will be created with the
* with the K_FP_REGS or K_SSE_REGS option bits. The kernel data structure * K_FP_REGS or K_SSE_REGS option bits. The kernel data structure maintains a
* maintains a 'current_fp' field to keep track of the thread that "owns" * 'current_fp' field to keep track of the thread that "owns" the floating
* the floating point registers. Floating point registers consist of * point registers. Floating point registers consist of ST0->ST7 (x87 FPU and
* ST0->ST7 (x87 FPU and MMX registers) and XMM0 -> XMM7. * MMX registers) and XMM0 -> XMM7.
* *
* All floating point registers are considered 'volatile' thus they will * All floating point registers are considered 'volatile' thus they will only
* only be saved/restored when a preemptive context switch occurs. * be saved/restored when a preemptive context switch occurs.
* *
* Floating point registers are currently NOT scrubbed, and are subject to * Floating point registers are currently NOT scrubbed, and are subject to
* potential security leaks. * potential security leaks.
* *
* @return -EAGAIN, or a return value set by a call to
* z_arch_thread_return_value_set()
*
* C function prototype: * C function prototype:
* *
* unsigned int z_arch_swap (unsigned int eflags); * unsigned int z_arch_swap (unsigned int eflags);
*
*/ */
.macro read_tsc var_name .macro read_tsc var_name

View file

@ -174,22 +174,6 @@ int z_arch_float_disable(struct k_thread *thread)
} }
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
/**
* @brief Create a new kernel execution thread
*
* Initializes the k_thread object and sets up initial stack frame.
*
* @param thread pointer to thread struct memory, including any space needed
* for extra coprocessor context
* @param stack the pointer to aligned stack memory
* @param stack_size the stack size in bytes
* @param entry thread entry point routine
* @param parameter1 first param to entry point
* @param parameter2 second param to entry point
* @param parameter3 third param to entry point
* @param priority thread priority
* @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS
*/
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry, size_t stack_size, k_thread_entry_t entry,
void *parameter1, void *parameter2, void *parameter3, void *parameter1, void *parameter2, void *parameter3,

View file

@ -31,34 +31,11 @@ void z_x86_early_serial_init(void);
/* Create all page tables with boot configuration and enable paging */ /* Create all page tables with boot configuration and enable paging */
void z_x86_paging_init(void); void z_x86_paging_init(void);
/**
*
* @brief Performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the kernel.
* Trivial stuff is done inline; more complex initialization is done via
* function calls.
*
* @return N/A
*/
static inline void z_arch_kernel_init(void) static inline void z_arch_kernel_init(void)
{ {
/* No-op on this arch */ /* No-op on this arch */
} }
/**
*
* @brief Set the return value for the specified thread (inline)
*
* @param thread pointer to thread
* @param value value to set as return value
*
* The register used to store the return value from a function call invocation
* is set to @a value. It is assumed that the specified @a thread is pending, and
* thus the threads context is stored in its TCS.
*
* @return N/A
*/
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value) z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{ {

View file

@ -5,25 +5,11 @@
#include <debug/tracing.h> #include <debug/tracing.h>
/*
* @brief Put the CPU in low-power mode
*
* This function always exits with interrupts unlocked.
*
* void z_arch_cpu_idle(void)
*/
void z_arch_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile ("waiti 0"); __asm__ volatile ("waiti 0");
} }
/*
* @brief Put the CPU in low-power mode, entered with IRQs locked
*
* This function exits with interrupts restored to <key>.
*
* void z_arch_cpu_atomic_idle(unsigned int key)
*/
void z_arch_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();

View file

@ -51,16 +51,6 @@ static ALWAYS_INLINE _cpu_t *z_arch_curr_cpu(void)
return val; return val;
} }
/**
*
* @brief Performs architecture-specific initialization
*
* This routine performs architecture-specific initialization of the
* kernel. Trivial stuff is done inline; more complex initialization is
* done via function calls.
*
* @return N/A
*/
static ALWAYS_INLINE void z_arch_kernel_init(void) static ALWAYS_INLINE void z_arch_kernel_init(void)
{ {
_cpu_t *cpu0 = &_kernel.cpus[0]; _cpu_t *cpu0 = &_kernel.cpus[0];

View file

@ -226,9 +226,6 @@ extern "C" {
/* Typedef for the k_mem_partition attribute*/ /* Typedef for the k_mem_partition attribute*/
typedef u32_t k_mem_partition_attr_t; typedef u32_t k_mem_partition_attr_t;
/**
* @brief Explicitly nop operation.
*/
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void z_arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");

View file

@ -39,26 +39,13 @@ extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
extern void _isr_wrapper(void); extern void _isr_wrapper(void);
extern void z_irq_spurious(void *unused); extern void z_irq_spurious(void *unused);
/** /* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
*
* Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create * parameters, which will then be used by gen_irq_tables.py to create
* the vector table and the software ISR table. This is all done at * the vector table and the software ISR table. This is all done at
* build-time. * build-time.
* *
* We additionally set the priority in the interrupt controller at * We additionally set the priority in the interrupt controller at
* runtime. * runtime.
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ options
*
* @return The vector assigned to this interrupt
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
@ -67,40 +54,6 @@ extern void z_irq_spurious(void *unused);
irq_p; \ irq_p; \
}) })
/**
*
* @brief Disable all interrupts on the local CPU
*
* This routine disables interrupts. It can be called from either interrupt or
* thread level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the
* irq_unlock() API. It should never be used to manually re-enable
* interrupts or to inspect or manipulate the contents of the source register.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* thread executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a thread. Thus, if a
* thread disables interrupts and subsequently invokes a kernel
* routine that causes the calling thread to block, the interrupt
* disable state will be restored when the thread is later rescheduled
* for execution.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
{ {
unsigned int key; unsigned int key;
@ -109,28 +62,11 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
return key; return key;
} }
/**
*
* @brief Enable all interrupts on the local CPU
*
* This routine re-enables interrupts on the local CPU. The @a key parameter
* is an architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either interrupt or thread level.
*
* @return N/A
*/
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
{ {
__asm__ volatile("seti %0" : : "ir"(key) : "memory"); __asm__ volatile("seti %0" : : "ir"(key) : "memory");
} }
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
{ {
/* ARC irq lock uses instruction "clri r0", /* ARC irq lock uses instruction "clri r0",

View file

@ -26,40 +26,7 @@
extern "C" { extern "C" {
#endif #endif
/** /* On ARMv7-M and ARMv8-M Mainline CPUs, this function prevents regular
*
* @brief Disable all interrupts on the CPU
*
* This routine disables interrupts. It can be called from either interrupt or
* thread level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the irq_unlock()
* API. It should never be used to manually re-enable interrupts or to inspect
* or manipulate the contents of the source register.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* thread executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a thread. Thus, if a
* thread disables interrupts and subsequently invokes a kernel
* routine that causes the calling thread to block, the interrupt
* disable state will be restored when the thread is later rescheduled
* for execution.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*
* @internal
*
* On ARMv7-M and ARMv8-M Mainline CPUs, this function prevents regular
* exceptions (i.e. with interrupt priority lower than or equal to * exceptions (i.e. with interrupt priority lower than or equal to
* _EXC_IRQ_DEFAULT_PRIO) from interrupting the CPU. NMI, Faults, SVC, * _EXC_IRQ_DEFAULT_PRIO) from interrupting the CPU. NMI, Faults, SVC,
* and Zero Latency IRQs (if supported) may still interrupt the CPU. * and Zero Latency IRQs (if supported) may still interrupt the CPU.
@ -104,23 +71,8 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
} }
/** /* On Cortex-M0/M0+, this enables all interrupts if they were not
*
* @brief Enable all interrupts on the CPU (inline)
*
* This routine re-enables interrupts on the CPU. The @a key parameter is an
* architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either interrupt or thread level.
*
* @param key architecture-dependent lock-out key
*
* @return N/A
*
* On Cortex-M0/M0+, this enables all interrupts if they were not
* previously disabled. * previously disabled.
*
*/ */
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
@ -148,10 +100,6 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
} }
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
{ {
/* This convention works for both PRIMASK and BASEPRI */ /* This convention works for both PRIMASK and BASEPRI */

View file

@ -66,10 +66,7 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
#endif #endif
/** /* All arguments must be computable by the compiler at build time.
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
* *
* Z_ISR_DECLARE will populate the .intList section with the interrupt's * Z_ISR_DECLARE will populate the .intList section with the interrupt's
* parameters, which will then be used by gen_irq_tables.py to create * parameters, which will then be used by gen_irq_tables.py to create
@ -78,14 +75,6 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
* *
* We additionally set the priority in the interrupt controller at * We additionally set the priority in the interrupt controller at
* runtime. * runtime.
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ options
*
* @return The vector assigned to this interrupt
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
@ -94,13 +83,6 @@ extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
irq_p; \ irq_p; \
}) })
/**
* Configure a 'direct' static interrupt.
*
* See include/irq.h for details.
* All arguments must be computable at build time.
*/
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ #define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \ Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \

View file

@ -26,9 +26,6 @@ static inline u32_t z_arch_k_cycle_get_32(void)
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
/**
* @brief Explicitly nop operation.
*/
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void z_arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");

View file

@ -33,33 +33,8 @@
extern "C" { extern "C" {
#endif #endif
/** /* There is no notion of priority with the Nios II internal interrupt
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
*
* Internally this function does a few things:
*
* 1. The enum statement has no effect but forces the compiler to only
* accept constant values for the irq_p parameter, very important as the
* numerical IRQ line is used to create a named section.
*
* 2. An instance of struct _isr_table_entry is created containing the ISR and
* its parameter. If you look at how _sw_isr_table is created, each entry in
* the array is in its own section named by the IRQ line number. What we are
* doing here is to override one of the default entries (which points to the
* spurious IRQ handler) with what was supplied here.
*
* There is no notion of priority with the Nios II internal interrupt
* controller and no flags are currently supported. * controller and no flags are currently supported.
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority (ignored)
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ triggering options (currently unused)
*
* @return The vector assigned to this interrupt
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
@ -116,10 +91,6 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
#endif #endif
} }
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
{ {
return key & 1; return key & 1;
@ -205,9 +176,6 @@ static inline u32_t z_arch_k_cycle_get_32(void)
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
/**
* @brief Explicitly nop operation.
*/
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void z_arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");

View file

@ -51,9 +51,6 @@ static inline u32_t z_arch_k_cycle_get_32(void)
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
/**
* @brief Explicitly nop operation.
*/
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void z_arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");

View file

@ -69,20 +69,6 @@ int z_arch_irq_is_enabled(unsigned int irq);
void z_arch_irq_priority_set(unsigned int irq, unsigned int prio); void z_arch_irq_priority_set(unsigned int irq, unsigned int prio);
void z_irq_spurious(void *unused); void z_irq_spurious(void *unused);
/**
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ options
*
* @return The vector assigned to this interrupt
*/
#if defined(CONFIG_RISCV_HAS_PLIC) #if defined(CONFIG_RISCV_HAS_PLIC)
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
@ -130,10 +116,6 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
: "memory"); : "memory");
} }
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
{ {
/* FIXME: looking at z_arch_irq_lock, this should be reducable /* FIXME: looking at z_arch_irq_lock, this should be reducable
@ -146,15 +128,11 @@ static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN; return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN;
} }
/**
* @brief Explicitly nop operation.
*/
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void z_arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");
} }
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
static inline u32_t z_arch_k_cycle_get_32(void) static inline u32_t z_arch_k_cycle_get_32(void)

View file

@ -217,10 +217,6 @@ static inline u32_t z_arch_k_cycle_get_32(void)
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
{ {
return (key & 0x200) != 0; return (key & 0x200) != 0;

View file

@ -168,12 +168,7 @@ typedef struct s_isrList {
*/ */
#define _VECTOR_ARG(irq_p) (-1) #define _VECTOR_ARG(irq_p) (-1)
/** /* Internally this function does a few things:
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
*
* Internally this function does a few things:
* *
* 1. There is a declaration of the interrupt parameters in the .intList * 1. There is a declaration of the interrupt parameters in the .intList
* section, used by gen_idt to create the IDT. This does the same thing * section, used by gen_idt to create the IDT. This does the same thing
@ -190,14 +185,6 @@ typedef struct s_isrList {
* *
* 4. z_irq_controller_irq_config() is called at runtime to set the mapping * 4. z_irq_controller_irq_config() is called at runtime to set the mapping
* between the vector and the IRQ line as well as triggering flags * between the vector and the IRQ line as well as triggering flags
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ triggering options, as defined in sysapic.h
*
* @return The vector assigned to this interrupt
*/ */
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
@ -228,11 +215,6 @@ typedef struct s_isrList {
Z_IRQ_TO_INTERRUPT_VECTOR(irq_p); \ Z_IRQ_TO_INTERRUPT_VECTOR(irq_p); \
}) })
/** Configure a 'direct' static interrupt
*
* All arguments must be computable by the compiler at build time
*
*/
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \ #define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \ ({ \
NANO_CPU_INT_REGISTER(isr_p, irq_p, priority_p, -1, 0); \ NANO_CPU_INT_REGISTER(isr_p, irq_p, priority_p, -1, 0); \
@ -307,38 +289,6 @@ struct _x86_syscall_stack_frame {
u32_t ss; u32_t ss;
}; };
/**
* @brief Disable all interrupts on the CPU (inline)
*
* This routine disables interrupts. It can be called from either interrupt
* or thread level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the irq_unlock()
* API. It should never be used to manually re-enable interrupts or to inspect
* or manipulate the contents of the source register.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* thread executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a thread. Thus, if a
* thread disables interrupts and subsequently invokes a kernel
* routine that causes the calling thread to block, the interrupt
* disable state will be restored when the thread is later rescheduled
* for execution.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*
*/
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
{ {
unsigned int key; unsigned int key;

View file

@ -39,36 +39,6 @@ extern "C" {
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */ /* internal routine documented in C file, needed by IRQ_CONNECT() macro */
extern void z_irq_priority_set(u32_t irq, u32_t prio, u32_t flags); extern void z_irq_priority_set(u32_t irq, u32_t prio, u32_t flags);
/**
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time; if this
* can't be done use irq_connect_dynamic() instead.
*
* Internally this function does a few things:
*
* 1. The enum statement has no effect but forces the compiler to only
* accept constant values for the irq_p parameter, very important as the
* numerical IRQ line is used to create a named section.
*
* 2. An instance of _isr_table_entry is created containing the ISR and its
* parameter. If you look at how _sw_isr_table is created, each entry in the
* array is in its own section named by the IRQ line number. What we are doing
* here is to override one of the default entries (which points to the
* spurious IRQ handler) with what was supplied here.
*
* 3. The priority level for the interrupt is configured by a call to
* z_irq_priority_set()
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ options
*
* @return The vector assigned to this interrupt
*/
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \ ({ \
Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \ Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
@ -87,9 +57,6 @@ static inline u32_t z_arch_k_cycle_get_32(void)
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
} }
/**
* @brief Explicitly nop operation.
*/
static ALWAYS_INLINE void z_arch_nop(void) static ALWAYS_INLINE void z_arch_nop(void)
{ {
__asm__ volatile("nop"); __asm__ volatile("nop");

View file

@ -47,30 +47,11 @@
#endif #endif
/**
*
* @brief Enable an interrupt line
*
* Clear possible pending interrupts on the line, and enable the interrupt
* line. After this call, the CPU will receive interrupts for the specified
* IRQ.
*
* @return N/A
*/
static ALWAYS_INLINE void z_xtensa_irq_enable(u32_t irq) static ALWAYS_INLINE void z_xtensa_irq_enable(u32_t irq)
{ {
z_xt_ints_on(1 << irq); z_xt_ints_on(1 << irq);
} }
/**
*
* @brief Disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified IRQ.
*
* @return N/A
*/
static ALWAYS_INLINE void z_xtensa_irq_disable(u32_t irq) static ALWAYS_INLINE void z_xtensa_irq_disable(u32_t irq)
{ {
z_xt_ints_off(1 << irq); z_xt_ints_off(1 << irq);
@ -87,10 +68,6 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
XTOS_RESTORE_INTLEVEL(key); XTOS_RESTORE_INTLEVEL(key);
} }
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key) static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
{ {
return (key & 0xf) == 0; /* INTLEVEL field */ return (key & 0xf) == 0; /* INTLEVEL field */

View file

@ -50,6 +50,10 @@ extern "C" {
#define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ #define IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
extern int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter,
u32_t flags);
/** /**
* Configure a dynamic interrupt. * Configure a dynamic interrupt.
* *
@ -63,10 +67,6 @@ extern "C" {
* *
* @return The vector assigned to this interrupt * @return The vector assigned to this interrupt
*/ */
extern int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter,
u32_t flags);
static inline int static inline int
irq_connect_dynamic(unsigned int irq, unsigned int priority, irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter), void *parameter, void (*routine)(void *parameter), void *parameter,
@ -187,6 +187,7 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority,
/** /**
* @brief Lock interrupts. * @brief Lock interrupts.
* @def irq_lock()
* *
* This routine disables all interrupts on the CPU. It returns an unsigned * This routine disables all interrupts on the CPU. It returns an unsigned
* integer "lock-out key", which is an architecture-dependent indicator of * integer "lock-out key", which is an architecture-dependent indicator of
@ -214,7 +215,8 @@ irq_connect_dynamic(unsigned int irq, unsigned int priority,
* The lock-out key should never be used to manually re-enable interrupts * The lock-out key should never be used to manually re-enable interrupts
* or to inspect or manipulate the contents of the CPU's interrupt bits. * or to inspect or manipulate the contents of the CPU's interrupt bits.
* *
* @return Lock-out key. * @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int z_smp_global_lock(void); unsigned int z_smp_global_lock(void);
@ -225,6 +227,7 @@ unsigned int z_smp_global_lock(void);
/** /**
* @brief Unlock interrupts. * @brief Unlock interrupts.
* @def irq_unlock()
* *
* This routine reverses the effect of a previous call to irq_lock() using * This routine reverses the effect of a previous call to irq_lock() using
* the associated lock-out key. The caller must call the routine once for * the associated lock-out key. The caller must call the routine once for

View file

@ -5162,28 +5162,6 @@ extern void k_mem_domain_remove_thread(k_tid_t thread);
*/ */
__syscall void k_str_out(char *c, size_t n); __syscall void k_str_out(char *c, size_t n);
/**
* @brief Start a numbered CPU on a MP-capable system
* This starts and initializes a specific CPU. The main thread on
* startup is running on CPU zero, other processors are numbered
* sequentially. On return from this function, the CPU is known to
* have begun operating and will enter the provided function. Its
* interrupts will be initialized but disabled such that irq_unlock()
* with the provided key will work to enable them.
*
* Normally, in SMP mode this function will be called by the kernel
* initialization and should not be used as a user API. But it is
* defined here for special-purpose apps which want Zephyr running on
* one core and to use others for design-specific processing.
*
* @param cpu_num Integer number of the CPU
* @param stack Stack memory for the CPU
* @param sz Stack buffer size, in bytes
* @param fn Function to begin running on the CPU. First argument is
* an irq_unlock() key.
* @param arg Untyped argument to be passed to "fn"
*/
extern void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, extern void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
void (*fn)(int key, void *data), void *arg); void (*fn)(int key, void *data), void *arg);

463
include/sys/arch_inlines.h Normal file
View file

@ -0,0 +1,463 @@
/*
* Copyright (c) 2019 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Internal kernel APIs with public scope
*
* The main set of architecture APIs is specified by
* include/sys/arch_interface.h
*
* Any public kernel APIs that are implemented as inline functions and need to
* call architecture-specific APIso will have the prototypes for the
* architecture-specific APIs here. Architecture APIs that aren't used in this
* way go in include/sys/arch_interface.h.
*
* The set of architecture-specific macros used internally by public macros
* in public headers is also specified and documented.
*
* For all macros and inline function prototypes described herein, <arch/cpu.h>
* must eventually pull in full definitions for all of them (the actual macro
* defines and inline function bodies)
*
* include/kernel.h and other public headers depend on definitions in this
* header.
*/
#ifndef ZEPHYR_INCLUDE_SYS_ARCH_INLINES_H_
#define ZEPHYR_INCLUDE_SYS_ARCH_INLINES_H_
#ifndef _ASMLANGUAGE
#include <stdbool.h>
#include <zephyr/types.h>
#include <arch/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
/* NOTE: We cannot pull in kernel.h here, need some forward declarations */
struct k_thread;
typedef struct _k_thread_stack_element k_thread_stack_t;
/**
* @addtogroup arch-timing
* @{
*/
/**
* Obtain the current cycle count, in units that are hardware-specific
*
* @see k_cycle_get_32()
*/
static inline u32_t z_arch_k_cycle_get_32(void);
/** @} */
/**
* @addtogroup arch-threads
* @{
*/
/**
* @def Z_ARCH_THREAD_STACK_DEFINE(sym, size)
*
* @see K_THREAD_STACK_DEFINE()
*/
/**
* @def Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, size)
*
* @see K_THREAD_STACK_ARRAY_DEFINE()
*/
/**
* @def Z_ARCH_THREAD_STACK_LEN(size)
*
* @see K_THREAD_STACK_LEN()
*/
/**
* @def Z_ARCH_THREAD_STACK_MEMBER(sym, size)
*
* @see K_THREAD_STACK_MEMBER()
*/
/*
* @def Z_ARCH_THREAD_STACK_SIZEOF(sym)
*
* @see K_THREAD_STACK_SIZEOF()
*/
/**
* @def Z_ARCH_THREAD_STACK_RESERVED
*
* @see K_THREAD_STACK_RESERVED
*/
/**
* @def Z_ARCH_THREAD_STACK_BUFFER(sym)
*
* @see K_THREAD_STACK_RESERVED
*/
/** @} */
/**
* @addtogroup arch-pm
* @{
*/
/**
* @brief Power save idle routine
*
* This function will be called by the kernel idle loop or possibly within
* an implementation of z_sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero.
*
* Architectures that do not implement power management instructions may
* immediately return, otherwise a power-saving instruction should be
* issued to wait for an interrupt.
*
* @see k_cpu_idle()
*/
void z_arch_cpu_idle(void);
/**
* @brief Atomically re-enable interrupts and enter low power mode
*
* The requirements for z_arch_cpu_atomic_idle() are as follows:
*
* 1) Enabling interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
* in k_lifo_get(), for example, of the race condition that
* occurs if this requirement is not met.
*
* 2) After waking up from the low-power mode, the interrupt lockout state
* must be restored as indicated in the 'key' input parameter.
*
* @see k_cpu_atomic_idle()
*
* @param key Lockout key returned by previous invocation of z_arch_irq_lock()
*/
void z_arch_cpu_atomic_idle(unsigned int key);
/** @} */
/**
* @addtogroup arch-smp
* @{
*/
/**
* @brief Start a numbered CPU on a MP-capable system
*
* This starts and initializes a specific CPU. The main thread on startup is
* running on CPU zero, other processors are numbered sequentially. On return
* from this function, the CPU is known to have begun operating and will enter
* the provided function. Its interrupts will be initialized but disabled such
* that irq_unlock() with the provided key will work to enable them.
*
* Normally, in SMP mode this function will be called by the kernel
* initialization and should not be used as a user API. But it is defined here
* for special-purpose apps which want Zephyr running on one core and to use
* others for design-specific processing.
*
* @param cpu_num Integer number of the CPU
* @param stack Stack memory for the CPU
* @param sz Stack buffer size, in bytes
* @param fn Function to begin running on the CPU. First argument is
* an irq_unlock() key.
* @param arg Untyped argument to be passed to "fn"
*/
void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
void (*fn)(int key, void *data), void *arg);
/** @} */
/**
* @addtogroup arch-irq
* @{
*/
/**
* Lock interrupts on the current CPU
*
* @see irq_lock()
*/
static inline unsigned int z_arch_irq_lock(void);
/**
* Unlock interrupts on the current CPU
*
* @see irq_unlock()
*/
static inline void z_arch_irq_unlock(unsigned int key);
/**
* Test if calling z_arch_irq_unlock() with this key would unlock irqs
*
* @param key value returned by z_arch_irq_lock()
* @return true if interrupts were unlocked prior to the z_arch_irq_lock()
* call that produced the key argument.
*/
static inline bool z_arch_irq_unlocked(unsigned int key);
/**
* Disable the specified interrupt line
*
* @see irq_disable()
*/
void z_arch_irq_disable(unsigned int irq);
/**
* Enable the specified interrupt line
*
* @see irq_enable()
*/
void z_arch_irq_enable(unsigned int irq);
/**
* Test if an interrupt line is enabled
*
* @see irq_is_enabled()
*/
int z_arch_irq_is_enabled(unsigned int irq);
/**
* Arch-specific hook to install a dynamic interrupt.
*
* @param irq IRQ line number
* @param priority Interrupt priority
* @param routine Interrupt service routine
* @param parameter ISR parameter
* @param flags Arch-specific IRQ configuration flag
*
* @return The vector assigned to this interrupt
*/
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(void *parameter),
void *parameter, u32_t flags);
/**
* @def Z_ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
*
* @see IRQ_CONNECT()
*/
/**
* @def Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
*
* @see IRQ_DIRECT_CONNECT()
*/
/**
* @def Z_ARCH_ISR_DIRECT_PM()
*
* @see ISR_DIRECT_PM()
*/
/**
* @def Z_ARCH_ISR_DIRECT_HEADER()
*
* @see ISR_DIRECT_HEADER()
*/
/**
* @def Z_ARCH_ISR_DIRECT_FOOTER(swap)
*
* @see ISR_DIRECT_FOOTER()
*/
/**
* @def Z_ARCH_ISR_DIRECT_DECLARE(name)
*
* @see ISR_DIRECT_DECLARE()
*/
/**
* @def Z_ARCH_EXCEPT(reason_p)
*
* Generate a software induced fatal error.
*
* If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
* K_ERR_STACK_CHK_FAIL may be induced.
*
* This should ideally generate a software trap, with exception context
* indicating state when this was invoked. General purpose register state at
* the time of trap should not be disturbed from the calling context.
*
* @param reason_p K_ERR_ scoped reason code for the fatal error.
*/
#ifdef CONFIG_IRQ_OFFLOAD
typedef void (*irq_offload_routine_t)(void *parameter);
/**
* Run a function in interrupt context.
*
* Implementations should invoke an exception such that the kernel goes through
* its interrupt handling dispatch path, to include switching to the interrupt
* stack, and runs the provided routine and parameter.
*
* The only intended use-case for this function is for test code to simulate
* the correctness of kernel APIs in interrupt handling context. This API
* is not intended for real applications.
*
* @see irq_offload()
*
* @param routine Function to run in interrupt context
* @param parameter Value to pass to the function when invoked
*/
void z_arch_irq_offload(irq_offload_routine_t routine, void *parameter);
#endif /* CONFIG_IRQ_OFFLOAD */
/** @} */
/**
* @addtogroup arch-userspace
* @{
*/
#ifdef CONFIG_USERSPACE
/**
* Invoke a system call with 0 arguments.
*
* No general-purpose register state other than return value may be preserved
* when transitioning from supervisor mode back down to user mode for
* security reasons.
*
* It is required that all arguments be stored in registers when elevating
* privileges from user to supervisor mode.
*
* Processing of the syscall takes place on a separate kernel stack. Interrupts
* should be enabled when invoking the system call marshallers from the
* dispatch table. Thread preemption may occur when handling system calls.
*
* Call ids are untrusted and must be bounds-checked, as the value is used to
* index the system call dispatch table, containing function pointers to the
* specific system call code.
*
* @param call_id System call ID
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke0(u32_t call_id);
/**
* Invoke a system call with 1 argument.
*
* @see z_arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke1(u32_t arg1, u32_t call_id);
/**
* Invoke a system call with 2 arguments.
*
* @see z_arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke2(u32_t arg1, u32_t arg2,
u32_t call_id);
/**
* Invoke a system call with 3 arguments.
*
* @see z_arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id);
/**
* Invoke a system call with 4 arguments.
*
* @see z_arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param arg4 Fourth argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id);
/**
* Invoke a system call with 5 arguments.
*
* @see z_arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param arg4 Fourth argument to the system call.
* @param arg5 Fifth argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5,
u32_t call_id);
/**
* Invoke a system call with 6 arguments.
*
* @see z_arch_syscall_invoke0()
*
* @param arg1 First argument to the system call.
* @param arg2 Second argument to the system call.
* @param arg3 Third argument to the system call.
* @param arg4 Fourth argument to the system call.
* @param arg5 Fifth argument to the system call.
* @param arg6 Sixth argument to the system call.
* @param call_id System call ID, will be bounds-checked and used to reference
* kernel-side dispatch table
* @return Return value of the system call. Void system calls return 0 here.
*/
static inline u32_t z_arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id);
/**
* Indicate whether we are currently running in user mode
*
* @return true if the CPU is currently running with user permissions
*/
static inline bool z_arch_is_user_context(void);
#endif /* CONFIG_USERSPACE */
/** @} */
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE?SYS_ARCH_INLINES_H_ */

View file

@ -0,0 +1,408 @@
/*
* Copyright (c) 2019 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Internal kernel APIs implemented at the architecture layer.
*
* Not all architecture-specific defines are here, APIs that are used
* by public inline functions and macros are described in
* include/sys/arch_inlines.h.
*
* For all inline functions prototyped here, the implementation is expected
* to be provided by arch/ARCH/include/kernel_arch_func.h
*
* This header is not intended for general use; like kernel_arch_func.h,
* it is intended to be pulled in by internal kernel headers, specifically
* kernel/include/kernel_structs.h
*/
#ifndef ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
#define ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
#ifndef _ASMLANGUAGE
#include <kernel.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup arch-timing Architecture timing APIs
* @{
*/
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
/**
* Architecture-specific implementation of busy-waiting
*
* @param usec_to_wait Wait period, in microseconds
*/
void z_arch_busy_wait(u32_t usec_to_wait);
#endif
/** @} */
/**
* @defgroup arch-threads Architecture thread APIs
* @{
*/
/** Handle arch-specific logic for setting up new threads
*
* The stack and arch-specific thread state variables must be set up
* such that a later attempt to switch to this thread will succeed
* and we will enter z_thread_entry with the requested thread and
* arguments as its parameters.
*
* At some point in this function's implementation, z_setup_new_thread() must
* be called with the true bounds of the available stack buffer within the
* thread's stack object.
*
* @param thread Pointer to uninitialized struct k_thread
* @param pStack Pointer to the stack space.
* @param stackSize Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
*/
void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *pStack,
size_t stackSize, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, unsigned int options);
#ifdef CONFIG_USE_SWITCH
/**
* Cooperatively context switch
*
* The semantics of the two switch handles are entirely up to the architecture
* implementation, and are stored per thread in k_thread->switch_handle.
* The outgoing thread's switch handle pointer may be updated during this
* process.
*
* @param switch_to Incoming thread's switch handle
* @param switched_from Pointer to outgoing thread's switch handle, which
* may be updated.
*/
static inline void z_arch_switch(void *switch_to, void **switched_from);
#else
/**
* Cooperatively context switch
*
* Must be called with interrupts locked with the provided key.
* This is the older-style context switching method, which is incompatible
* with SMP. New arch ports, either SMP or UP, are encouraged to implement
* z_arch_switch() instead.
*
* @param key Interrupt locking key
* @return If woken from blocking on some kernel object, the result of that
* blocking operation.
*/
int z_arch_swap(unsigned int key);
/**
* Set the return value for the specified thread.
*
* It is assumed that the specified @a thread is pending.
*
* @param thread Pointer to thread object
* @param value value to set as return value
*/
static ALWAYS_INLINE void
z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
#endif /* CONFIG_USE_SWITCH i*/
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/**
* Custom logic for entering main thread context at early boot
*
* Used by architectures where the typical trick of setting up a dummy thread
* in early boot context to "switch out" of isn't workable.
*
* @param main_thread main thread object
* @param main_stack main thread's stack object
* @param main_stack_size Size of the stack object's buffer
* @param _main Entry point for application main function.
*/
void z_arch_switch_to_main_thread(struct k_thread *main_thread,
k_thread_stack_t *main_stack,
size_t main_stack_size,
k_thread_entry_t _main);
#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
/**
* @brief Disable floating point context preservation
*
* The function is used to disable the preservation of floating
* point context information for a particular thread.
*
* @note For ARM architecture, disabling floating point preservation may only
* be requested for the current thread and cannot be requested in ISRs.
*
* @retval 0 On success.
* @retval -EINVAL If the floating point disabling could not be performed.
*/
int z_arch_float_disable(struct k_thread *thread);
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
/** @} */
/**
* @defgroup arch-pm Architecture-specific power management APIs
* @{
*/
/** Halt the system, optionally propagating a reason code */
FUNC_NORETURN void z_arch_system_halt(unsigned int reason);
/** @} */
/**
* @defgroup arch-smp Architecture-specific SMP APIs
* @{
*/
#ifdef CONFIG_SMP
/** Return the CPU struct for the currently executing CPU */
static inline struct _cpu *z_arch_curr_cpu(void);
/**
* Broadcast an interrupt to all CPUs
*
* This will invoke z_sched_ipi() on other CPUs in the system.
*/
void z_arch_sched_ipi(void);
#endif /* CONFIG_SMP */
/** @} */
/**
* @defgroup arch-irq Architecture-specific IRQ APIs
* @{
*/
/**
* Test if the current context is in interrupt context
*
* XXX: This is inconsistently handled among arches wrt exception context
* See: #17656
*
* @return true if we are in interrupt context
*/
static inline bool z_arch_is_in_isr(void);
/** @} */
/**
* @defgroup arch-userspace Architecture-specific userspace APIs
* @{
*/
#ifdef CONFIG_USERSPACE
/**
* @brief Get the maximum number of partitions for a memory domain
*
* @return Max number of partitions, or -1 if there is no limit
*/
int z_arch_mem_domain_max_partitions_get(void);
/**
* @brief Add a thread to a memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when the provided thread has been added to a memory domain.
*
* The thread's memory domain pointer will be set to the domain to be added
* to.
*
* @param thread Thread which needs to be configured.
*/
void z_arch_mem_domain_thread_add(struct k_thread *thread);
/**
* @brief Remove a thread from a memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when the provided thread has been removed from a memory domain.
*
* The thread's memory domain pointer will be the domain that the thread
* is being removed from.
*
* @param thread Thread being removed from its memory domain
*/
void z_arch_mem_domain_thread_remove(struct k_thread *thread);
/**
* @brief Remove a partition from the memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has had a partition removed.
*
* The partition index data, and the number of partitions configured, are not
* respectively cleared and decremented in the domain until after this function
* runs.
*
* @param domain The memory domain structure
* @param partition_id The partition index that needs to be deleted
*/
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id);
/**
* @brief Add a partition to the memory domain
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has a partition added.
*
* @param domain The memory domain structure
* @param partition_id The partition that needs to be added
*/
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id);
/**
* @brief Remove the memory domain
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has been destroyed.
*
* Thread assignments to the memory domain are only cleared after this function
* runs.
*
* @param domain The memory domain structure which needs to be deleted.
*/
void z_arch_mem_domain_destroy(struct k_mem_domain *domain);
/**
* @brief Check memory region permissions
*
* Given a memory region, return whether the current memory management hardware
* configuration would allow a user thread to read/write that region. Used by
* system calls to validate buffers coming in from userspace.
*
* Notes:
* The function is guaranteed to never return validation success, if the entire
* buffer area is not user accessible.
*
* The function is guaranteed to correctly validate the permissions of the
* supplied buffer, if the user access permissions of the entire buffer are
* enforced by a single, enabled memory management region.
*
* In some architectures the validation will always return failure
* if the supplied memory buffer spans multiple enabled memory management
* regions (even if all such regions permit user access).
*
* @param addr start address of the buffer
* @param size the size of the buffer
* @param write If nonzero, additionally check if the area is writable.
* Otherwise, just check if the memory can be read.
*
* @return nonzero if the permissions don't match.
*/
int z_arch_buffer_validate(void *addr, size_t size, int write);
/**
* Perform a one-way transition from supervisor to kernel mode.
*
* Implementations of this function must do the following:
*
* - Reset the thread's stack pointer to a suitable initial value. We do not
* need any prior context since this is a one-way operation.
* - Set up any kernel stack region for the CPU to use during privilege
* elevation
* - Put the CPU in whatever its equivalent of user mode is
* - Transfer execution to z_arch_new_thread() passing along all the supplied
* arguments, in user mode.
*
* @param user_entry Entry point to start executing as a user thread
* @param p1 1st parameter to user thread
* @param p2 2nd parameter to user thread
* @param p3 3rd parameter to user thread
*/
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3);
/**
* @brief Induce a kernel oops that appears to come from a specific location
*
* Normally, k_oops() generates an exception that appears to come from the
* call site of the k_oops() itself.
*
* However, when validating arguments to a system call, if there are problems
* we want the oops to appear to come from where the system call was invoked
* and not inside the validation function.
*
* @param ssf System call stack frame pointer. This gets passed as an argument
* to _k_syscall_handler_t functions and its contents are completely
* architecture specific.
*/
FUNC_NORETURN void z_arch_syscall_oops(void *ssf);
/**
* @brief Safely take the length of a potentially bad string
*
* This must not fault, instead the err parameter must have -1 written to it.
* This function otherwise should work exactly like libc strnlen(). On success
* *err should be set to 0.
*
* @param s String to measure
* @param maxsize Max length of the string
* @param err Error value to write
* @return Length of the string, not counting NULL byte, up to maxsize
*/
size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err);
#endif /* CONFIG_USERSPACE */
/** @} */
/**
* @defgroup arch-benchmarking Architecture-specific benchmarking globals
*/
#ifdef CONFIG_EXECUTION_BENCHMARKING
extern u64_t z_arch_timing_swap_start;
extern u64_t z_arch_timing_swap_end;
extern u64_t z_arch_timing_irq_start;
extern u64_t z_arch_timing_irq_end;
extern u64_t z_arch_timing_tick_start;
extern u64_t z_arch_timing_tick_end;
extern u64_t z_arch_timing_user_mode_end;
extern u32_t z_arch_timing_value_swap_end;
extern u64_t z_arch_timing_value_swap_common;
extern u64_t z_arch_timing_value_swap_temp;
#endif /* CONFIG_EXECUTION_BENCHMARKING */
/** @} */
/**
* @defgroup arch-misc Miscellaneous architecture APIs
*/
/**
* Architecture-specific kernel initialization hook
*
* This function is invoked near the top of _Cstart, for additional
* architecture-specific setup before the rest of the kernel is brought up.
*
* TODO: Deprecate, most arches are using a prep_c() function to do the same
* thing in a simpler way
*/
static inline void z_arch_kernel_init(void);
/** Do nothing and return. Yawn. */
static inline void z_arch_nop(void);
/** @} */
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_ */

View file

@ -52,176 +52,33 @@ extern void z_setup_new_thread(struct k_thread *new_thread,
int prio, u32_t options, const char *name); int prio, u32_t options, const char *name);
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
/**
* @brief Disable floating point context preservation
*
* The function is used to disable the preservation of floating
* point context information for a particular thread.
*
* @note
* For ARM architecture, disabling floating point preservation
* - may only be requested for the current thread
* - cannot be requested in ISRs.
*
* @retval 0 On success.
* @retval -EINVAL If the floating point disabling could not be performed.
*/
extern int z_arch_float_disable(struct k_thread *thread); extern int z_arch_float_disable(struct k_thread *thread);
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/**
* @brief Get the maximum number of partitions for a memory domain
*
* @return Max number of partitions, or -1 if there is no limit
*/
extern int z_arch_mem_domain_max_partitions_get(void); extern int z_arch_mem_domain_max_partitions_get(void);
/**
* @brief Add a thread to a memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when the provided thread has been added to a memory domain.
*
* The thread's memory domain pointer will be set to the domain to be added
* to.
*
* @param thread Thread which needs to be configured.
*/
extern void z_arch_mem_domain_thread_add(struct k_thread *thread); extern void z_arch_mem_domain_thread_add(struct k_thread *thread);
/**
* @brief Remove a thread from a memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when the provided thread has been removed from a memory domain.
*
* The thread's memory domain pointer will be the domain that the thread
* is being removed from.
*
* @param thread Thread being removed from its memory domain
*/
extern void z_arch_mem_domain_thread_remove(struct k_thread *thread); extern void z_arch_mem_domain_thread_remove(struct k_thread *thread);
/**
* @brief Remove a partition from the memory domain (arch-specific)
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has had a partition removed.
*
* The partition index data, and the number of partitions configured, are not
* respectively cleared and decremented in the domain until after this function
* runs.
*
* @param domain The memory domain structure
* @param partition_id The partition index that needs to be deleted
*/
extern void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain, extern void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id); u32_t partition_id);
/**
* @brief Add a partition to the memory domain
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has a partition added.
*
* @param domain The memory domain structure
* @param partition_id The partition that needs to be added
*/
extern void z_arch_mem_domain_partition_add(struct k_mem_domain *domain, extern void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id); u32_t partition_id);
/**
* @brief Remove the memory domain
*
* Architecture-specific hook to manage internal data structures or hardware
* state when a memory domain has been destroyed.
*
* Thread assignments to the memory domain are only cleared after this function
* runs.
*
* @param domain The memory domain structure which needs to be deleted.
*/
extern void z_arch_mem_domain_destroy(struct k_mem_domain *domain); extern void z_arch_mem_domain_destroy(struct k_mem_domain *domain);
/**
* @brief Check memory region permissions
*
* Given a memory region, return whether the current memory management hardware
* configuration would allow a user thread to read/write that region. Used by
* system calls to validate buffers coming in from userspace.
*
* Notes:
* The function is guaranteed to never return validation success, if the entire
* buffer area is not user accessible.
*
* The function is guaranteed to correctly validate the permissions of the
* supplied buffer, if the user access permissions of the entire buffer are
* enforced by a single, enabled memory management region.
*
* In some architectures the validation will always return failure
* if the supplied memory buffer spans multiple enabled memory management
* regions (even if all such regions permit user access).
*
* @param addr start address of the buffer
* @param size the size of the buffer
* @param write If nonzero, additionally check if the area is writable.
* Otherwise, just check if the memory can be read.
*
* @return nonzero if the permissions don't match.
*/
extern int z_arch_buffer_validate(void *addr, size_t size, int write); extern int z_arch_buffer_validate(void *addr, size_t size, int write);
/**
* Perform a one-way transition from supervisor to kernel mode.
*
* Implementations of this function must do the following:
* - Reset the thread's stack pointer to a suitable initial value. We do not
* need any prior context since this is a one-way operation.
* - Set up any kernel stack region for the CPU to use during privilege
* elevation
* - Put the CPU in whatever its equivalent of user mode is
* - Transfer execution to z_arch_new_thread() passing along all the supplied
* arguments, in user mode.
*
* @param Entry point to start executing as a user thread
* @param p1 1st parameter to user thread
* @param p2 2nd parameter to user thread
* @param p3 3rd parameter to user thread
*/
extern FUNC_NORETURN extern FUNC_NORETURN
void z_arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void z_arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2,
void *p3); void *p3);
/**
* @brief Induce a kernel oops that appears to come from a specific location
*
* Normally, k_oops() generates an exception that appears to come from the
* call site of the k_oops() itself.
*
* However, when validating arguments to a system call, if there are problems
* we want the oops to appear to come from where the system call was invoked
* and not inside the validation function.
*
* @param ssf System call stack frame pointer. This gets passed as an argument
* to _k_syscall_handler_t functions and its contents are completely
* architecture specific.
*/
extern FUNC_NORETURN void z_arch_syscall_oops(void *ssf); extern FUNC_NORETURN void z_arch_syscall_oops(void *ssf);
/**
* @brief Safely take the length of a potentially bad string
*
* This must not fault, instead the err parameter must have -1 written to it.
* This function otherwise should work exactly like libc strnlen(). On success
* *err should be set to 0.
*
* @param s String to measure
* @param maxlen Max length of the string
* @param err Error value to write
* @return Length of the string, not counting NULL byte, up to maxsize
*/
extern size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err); extern size_t z_arch_user_string_nlen(const char *s, size_t maxsize, int *err);
/** /**
@ -277,9 +134,6 @@ extern void z_arch_busy_wait(u32_t usec_to_wait);
int z_arch_swap(unsigned int key); int z_arch_swap(unsigned int key);
/**
* TODO: document
*/
extern FUNC_NORETURN void z_arch_system_halt(unsigned int reason); extern FUNC_NORETURN void z_arch_system_halt(unsigned int reason);
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
@ -290,8 +144,6 @@ extern u64_t z_arch_timing_irq_end;
extern u64_t z_arch_timing_tick_start; extern u64_t z_arch_timing_tick_start;
extern u64_t z_arch_timing_tick_end; extern u64_t z_arch_timing_tick_end;
extern u64_t z_arch_timing_user_mode_end; extern u64_t z_arch_timing_user_mode_end;
/* FIXME: Document. Temporary storage, seems x86 specific? */
extern u32_t z_arch_timing_value_swap_end; extern u32_t z_arch_timing_value_swap_end;
extern u64_t z_arch_timing_value_swap_common; extern u64_t z_arch_timing_value_swap_common;
extern u64_t z_arch_timing_value_swap_temp; extern u64_t z_arch_timing_value_swap_temp;