arch: define struct arch_esf and deprecate z_arch_esf_t

Make `struct arch_esf` compulsory for all architectures by
declaring it in the `arch_interface.h` header.

After this commit, the named struct `z_arch_esf_t` is only used
internally to generate offsets, and is slated to be removed
from the `arch_interface.h` header in the future.

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
This commit is contained in:
Yong Cong Sin 2024-06-01 00:07:14 +08:00 committed by Maureen Helm
commit e54b27b967
105 changed files with 203 additions and 222 deletions

View file

@ -23,7 +23,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
static void dump_arc_esf(const z_arch_esf_t *esf) static void dump_arc_esf(const struct arch_esf *esf)
{ {
ARC_EXCEPTION_DUMP(" r0: 0x%" PRIxPTR " r1: 0x%" PRIxPTR " r2: 0x%" PRIxPTR ARC_EXCEPTION_DUMP(" r0: 0x%" PRIxPTR " r1: 0x%" PRIxPTR " r2: 0x%" PRIxPTR
" r3: 0x%" PRIxPTR "", esf->r0, esf->r1, esf->r2, esf->r3); " r3: 0x%" PRIxPTR "", esf->r0, esf->r1, esf->r2, esf->r3);
@ -42,7 +42,7 @@ static void dump_arc_esf(const z_arch_esf_t *esf)
} }
#endif #endif
void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf) void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf)
{ {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) { if (esf != NULL) {

View file

@ -346,7 +346,7 @@ static void dump_exception_info(uint32_t vector, uint32_t cause, uint32_t parame
* invokes the user provided routine k_sys_fatal_error_handler() which is * invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy. * responsible for implementing the error handling policy.
*/ */
void _Fault(z_arch_esf_t *esf, uint32_t old_sp) void _Fault(struct arch_esf *esf, uint32_t old_sp)
{ {
uint32_t vector, cause, parameter; uint32_t vector, cause, parameter;
uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA); uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);

View file

@ -62,7 +62,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, uint32_t stack, uint32_t size, void *p2, void *p3, uint32_t stack, uint32_t size,
struct k_thread *thread); struct k_thread *thread);
extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf); extern void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf);
extern void arch_sched_ipi(void); extern void arch_sched_ipi(void);

View file

@ -206,7 +206,7 @@ bool z_arm_fault_undef_instruction_fp(void)
* *
* @return Returns true if the fault is fatal * @return Returns true if the fault is fatal
*/ */
bool z_arm_fault_undef_instruction(z_arch_esf_t *esf) bool z_arm_fault_undef_instruction(struct arch_esf *esf)
{ {
#if defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU_SHARING)
/* /*
@ -243,7 +243,7 @@ bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
* *
* @return Returns true if the fault is fatal * @return Returns true if the fault is fatal
*/ */
bool z_arm_fault_prefetch(z_arch_esf_t *esf) bool z_arm_fault_prefetch(struct arch_esf *esf)
{ {
uint32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
@ -299,7 +299,7 @@ static const struct z_exc_handle exceptions[] = {
* *
* @return true if error is recoverable, otherwise return false. * @return true if error is recoverable, otherwise return false.
*/ */
static bool memory_fault_recoverable(z_arch_esf_t *esf) static bool memory_fault_recoverable(struct arch_esf *esf)
{ {
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
/* Mask out instruction mode */ /* Mask out instruction mode */
@ -321,7 +321,7 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf)
* *
* @return Returns true if the fault is fatal * @return Returns true if the fault is fatal
*/ */
bool z_arm_fault_data(z_arch_esf_t *esf) bool z_arm_fault_data(struct arch_esf *esf)
{ {
uint32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;

View file

@ -71,7 +71,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
} }
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
/** /**
* *

View file

@ -41,7 +41,7 @@ struct arm_arch_block {
*/ */
static struct arm_arch_block arch_blk; static struct arm_arch_block arch_blk;
void arch_coredump_info_dump(const z_arch_esf_t *esf) void arch_coredump_info_dump(const struct arch_esf *esf)
{ {
struct coredump_arch_hdr_t hdr = { struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID, .id = COREDUMP_ARCH_HDR_ID,

View file

@ -146,7 +146,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
*/ */
#if (CONFIG_FAULT_DUMP == 1) #if (CONFIG_FAULT_DUMP == 1)
static void fault_show(const z_arch_esf_t *esf, int fault) static void fault_show(const struct arch_esf *esf, int fault)
{ {
PR_EXC("Fault! EXC #%d", fault); PR_EXC("Fault! EXC #%d", fault);
@ -165,7 +165,7 @@ static void fault_show(const z_arch_esf_t *esf, int fault)
* *
* For Dump level 0, no information needs to be generated. * For Dump level 0, no information needs to be generated.
*/ */
static void fault_show(const z_arch_esf_t *esf, int fault) static void fault_show(const struct arch_esf *esf, int fault)
{ {
(void)esf; (void)esf;
(void)fault; (void)fault;
@ -185,7 +185,7 @@ static const struct z_exc_handle exceptions[] = {
* *
* @return true if error is recoverable, otherwise return false. * @return true if error is recoverable, otherwise return false.
*/ */
static bool memory_fault_recoverable(z_arch_esf_t *esf, bool synchronous) static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
{ {
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@ -228,7 +228,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault, static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault,
bool *recoverable) bool *recoverable)
{ {
uint32_t reason = K_ERR_ARM_MEM_GENERIC; uint32_t reason = K_ERR_ARM_MEM_GENERIC;
@ -387,7 +387,7 @@ static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
* @return error code to identify the fatal error reason. * @return error code to identify the fatal error reason.
* *
*/ */
static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable) static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
{ {
uint32_t reason = K_ERR_ARM_BUS_GENERIC; uint32_t reason = K_ERR_ARM_BUS_GENERIC;
@ -549,7 +549,7 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static uint32_t usage_fault(const z_arch_esf_t *esf) static uint32_t usage_fault(const struct arch_esf *esf)
{ {
uint32_t reason = K_ERR_ARM_USAGE_GENERIC; uint32_t reason = K_ERR_ARM_USAGE_GENERIC;
@ -612,7 +612,7 @@ static uint32_t usage_fault(const z_arch_esf_t *esf)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static uint32_t secure_fault(const z_arch_esf_t *esf) static uint32_t secure_fault(const struct arch_esf *esf)
{ {
uint32_t reason = K_ERR_ARM_SECURE_GENERIC; uint32_t reason = K_ERR_ARM_SECURE_GENERIC;
@ -661,7 +661,7 @@ static uint32_t secure_fault(const z_arch_esf_t *esf)
* See z_arm_fault_dump() for example. * See z_arm_fault_dump() for example.
* *
*/ */
static void debug_monitor(z_arch_esf_t *esf, bool *recoverable) static void debug_monitor(struct arch_esf *esf, bool *recoverable)
{ {
*recoverable = false; *recoverable = false;
@ -687,7 +687,7 @@ static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
static inline bool z_arm_is_synchronous_svc(z_arch_esf_t *esf) static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
{ {
uint16_t *ret_addr = (uint16_t *)esf->basic.pc; uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
/* SVC is a 16-bit instruction. On a synchronous SVC /* SVC is a 16-bit instruction. On a synchronous SVC
@ -762,7 +762,7 @@ static inline bool z_arm_is_pc_valid(uintptr_t pc)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable) static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
{ {
uint32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
@ -829,7 +829,7 @@ static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
* See z_arm_fault_dump() for example. * See z_arm_fault_dump() for example.
* *
*/ */
static void reserved_exception(const z_arch_esf_t *esf, int fault) static void reserved_exception(const struct arch_esf *esf, int fault)
{ {
ARG_UNUSED(esf); ARG_UNUSED(esf);
@ -839,7 +839,7 @@ static void reserved_exception(const z_arch_esf_t *esf, int fault)
} }
/* Handler function for ARM fault conditions. */ /* Handler function for ARM fault conditions. */
static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable) static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
{ {
uint32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
@ -893,7 +893,7 @@ static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
* *
* @param secure_esf Pointer to the secure stack frame. * @param secure_esf Pointer to the secure stack frame.
*/ */
static void secure_stack_dump(const z_arch_esf_t *secure_esf) static void secure_stack_dump(const struct arch_esf *secure_esf)
{ {
/* /*
* In case a Non-Secure exception interrupted the Secure * In case a Non-Secure exception interrupted the Secure
@ -918,7 +918,7 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
* Non-Secure exception entry. * Non-Secure exception entry.
*/ */
top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS; top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
secure_esf = (const z_arch_esf_t *)top_of_sec_stack; secure_esf = (const struct arch_esf *)top_of_sec_stack;
sec_ret_addr = secure_esf->basic.pc; sec_ret_addr = secure_esf->basic.pc;
} else { } else {
/* Exception during Non-Secure function call. /* Exception during Non-Secure function call.
@ -947,11 +947,11 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
* *
* @return ESF pointer on success, otherwise return NULL * @return ESF pointer on success, otherwise return NULL
*/ */
static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return, static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
bool *nested_exc) bool *nested_exc)
{ {
bool alternative_state_exc = false; bool alternative_state_exc = false;
z_arch_esf_t *ptr_esf = NULL; struct arch_esf *ptr_esf = NULL;
*nested_exc = false; *nested_exc = false;
@ -979,14 +979,14 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
alternative_state_exc = true; alternative_state_exc = true;
/* Dump the Secure stack before handling the actual fault. */ /* Dump the Secure stack before handling the actual fault. */
z_arch_esf_t *secure_esf; struct arch_esf *secure_esf;
if (exc_return & EXC_RETURN_SPSEL_PROCESS) { if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Secure stack pointed by PSP */ /* Secure stack pointed by PSP */
secure_esf = (z_arch_esf_t *)psp; secure_esf = (struct arch_esf *)psp;
} else { } else {
/* Secure stack pointed by MSP */ /* Secure stack pointed by MSP */
secure_esf = (z_arch_esf_t *)msp; secure_esf = (struct arch_esf *)msp;
*nested_exc = true; *nested_exc = true;
} }
@ -997,9 +997,9 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
* and supply it to the fault handing function. * and supply it to the fault handing function.
*/ */
if (exc_return & EXC_RETURN_MODE_THREAD) { if (exc_return & EXC_RETURN_MODE_THREAD) {
ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS(); ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
} else { } else {
ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS(); ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
} }
} }
#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE) #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
@ -1024,10 +1024,10 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
if (exc_return & EXC_RETURN_SPSEL_PROCESS) { if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Non-Secure stack frame on PSP */ /* Non-Secure stack frame on PSP */
ptr_esf = (z_arch_esf_t *)psp; ptr_esf = (struct arch_esf *)psp;
} else { } else {
/* Non-Secure stack frame on MSP */ /* Non-Secure stack frame on MSP */
ptr_esf = (z_arch_esf_t *)msp; ptr_esf = (struct arch_esf *)msp;
} }
} else { } else {
/* Exception entry occurred in Non-Secure stack. */ /* Exception entry occurred in Non-Secure stack. */
@ -1046,11 +1046,11 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
if (!alternative_state_exc) { if (!alternative_state_exc) {
if (exc_return & EXC_RETURN_MODE_THREAD) { if (exc_return & EXC_RETURN_MODE_THREAD) {
/* Returning to thread mode */ /* Returning to thread mode */
ptr_esf = (z_arch_esf_t *)psp; ptr_esf = (struct arch_esf *)psp;
} else { } else {
/* Returning to handler mode */ /* Returning to handler mode */
ptr_esf = (z_arch_esf_t *)msp; ptr_esf = (struct arch_esf *)msp;
*nested_exc = true; *nested_exc = true;
} }
} }
@ -1095,12 +1095,12 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
uint32_t reason = K_ERR_CPU_EXCEPTION; uint32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk; int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
bool recoverable, nested_exc; bool recoverable, nested_exc;
z_arch_esf_t *esf; struct arch_esf *esf;
/* Create a stack-ed copy of the ESF to be used during /* Create a stack-ed copy of the ESF to be used during
* the fault handling process. * the fault handling process.
*/ */
z_arch_esf_t esf_copy; struct arch_esf esf_copy;
/* Force unlock interrupts */ /* Force unlock interrupts */
arch_irq_unlock(0); arch_irq_unlock(0);
@ -1123,13 +1123,13 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
/* Copy ESF */ /* Copy ESF */
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO) #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
memcpy(&esf_copy, esf, sizeof(z_arch_esf_t)); memcpy(&esf_copy, esf, sizeof(struct arch_esf));
ARG_UNUSED(callee_regs); ARG_UNUSED(callee_regs);
#else #else
/* the extra exception info is not present in the original esf /* the extra exception info is not present in the original esf
* so we only copy the fields before those. * so we only copy the fields before those.
*/ */
memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info)); memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
esf_copy.extra_info = (struct __extra_esf_info) { esf_copy.extra_info = (struct __extra_esf_info) {
.callee = callee_regs, .callee = callee_regs,
.exc_return = exc_return, .exc_return = exc_return,

View file

@ -94,7 +94,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
#endif /* !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) */ #endif /* !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) */
void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
/** /**
* *

View file

@ -18,7 +18,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
static void esf_dump(const z_arch_esf_t *esf) static void esf_dump(const struct arch_esf *esf)
{ {
LOG_ERR("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x", LOG_ERR("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
esf->basic.a1, esf->basic.a2, esf->basic.a3); esf->basic.a1, esf->basic.a2, esf->basic.a3);
@ -66,7 +66,7 @@ static void esf_dump(const z_arch_esf_t *esf)
} }
#endif /* CONFIG_EXCEPTION_DEBUG */ #endif /* CONFIG_EXCEPTION_DEBUG */
void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf) void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf)
{ {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) { if (esf != NULL) {
@ -102,7 +102,7 @@ void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
* @param esf exception frame * @param esf exception frame
* @param callee_regs Callee-saved registers (R4-R11) * @param callee_regs Callee-saved registers (R4-R11)
*/ */
void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs) void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs)
{ {
#if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)) #if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
ARG_UNUSED(callee_regs); ARG_UNUSED(callee_regs);
@ -130,9 +130,9 @@ void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs)
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO) #if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
z_arm_fatal_error(reason, esf); z_arm_fatal_error(reason, esf);
#else #else
z_arch_esf_t esf_copy; struct arch_esf esf_copy;
memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info)); memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* extra exception info is collected in callee_reg param /* extra exception info is collected in callee_reg param
* on CONFIG_ARMV7_M_ARMV8_M_MAINLINE * on CONFIG_ARMV7_M_ARMV8_M_MAINLINE
@ -156,7 +156,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs)
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
{ {
uint32_t *ssf_contents = ssf_ptr; uint32_t *ssf_contents = ssf_ptr;
z_arch_esf_t oops_esf = { 0 }; struct arch_esf oops_esf = { 0 };
/* TODO: Copy the rest of the register set out of ssf_ptr */ /* TODO: Copy the rest of the register set out of ssf_ptr */
oops_esf.basic.pc = ssf_contents[3]; oops_esf.basic.pc = ssf_contents[3];

View file

@ -42,7 +42,7 @@ static int is_bkpt(unsigned int exc_cause)
} }
/* Wrapper function to save and restore execution c */ /* Wrapper function to save and restore execution c */
void z_gdb_entry(z_arch_esf_t *esf, unsigned int exc_cause) void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause)
{ {
/* Disable the hardware breakpoint in case it was set */ /* Disable the hardware breakpoint in case it was set */
__asm__ volatile("mcr p14, 0, %0, c0, c0, 5" ::"r"(0x0) :); __asm__ volatile("mcr p14, 0, %0, c0, c0, 5" ::"r"(0x0) :);

View file

@ -38,7 +38,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void)
return (arch_curr_cpu()->nested != 0U); return (arch_curr_cpu()->nested != 0U);
} }
static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf)
{ {
return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false); return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false);
} }
@ -48,7 +48,7 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
* This function is used by privileged code to determine if the thread * This function is used by privileged code to determine if the thread
* associated with the stack frame is in user mode. * associated with the stack frame is in user mode.
*/ */
static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf) static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf)
{ {
return ((esf->basic.xpsr & CPSR_M_Msk) == CPSR_M_USR); return ((esf->basic.xpsr & CPSR_M_Msk) == CPSR_M_USR);
} }

View file

@ -59,7 +59,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
uint32_t stack_end, uint32_t stack_end,
uint32_t stack_start); uint32_t stack_start);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -68,7 +68,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void)
* @return true if execution state was in handler mode, before * @return true if execution state was in handler mode, before
* the current exception occurred, otherwise false. * the current exception occurred, otherwise false.
*/ */
static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf)
{ {
return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false); return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
} }
@ -80,7 +80,7 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
* @param esf the exception stack frame (unused) * @param esf the exception stack frame (unused)
* @return true if the current thread was in unprivileged mode * @return true if the current thread was in unprivileged mode
*/ */
static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf) static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf)
{ {
return z_arm_thread_is_in_user_mode(); return z_arm_thread_is_in_user_mode();
} }

View file

@ -76,7 +76,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
uint32_t stack_end, uint32_t stack_end,
uint32_t stack_start); uint32_t stack_start);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -13,7 +13,7 @@
#define ARCH_HDR_VER 1 #define ARCH_HDR_VER 1
/* Structure to store the architecture registers passed arch_coredump_info_dump /* Structure to store the architecture registers passed arch_coredump_info_dump
* As callee saved registers are not provided in z_arch_esf_t structure in Zephyr * As callee saved registers are not provided in struct arch_esf structure in Zephyr
* we just need 22 registers. * we just need 22 registers.
*/ */
struct arm64_arch_block { struct arm64_arch_block {
@ -50,7 +50,7 @@ struct arm64_arch_block {
*/ */
static struct arm64_arch_block arch_blk; static struct arm64_arch_block arch_blk;
void arch_coredump_info_dump(const z_arch_esf_t *esf) void arch_coredump_info_dump(const struct arch_esf *esf)
{ {
/* Target architecture information header */ /* Target architecture information header */
/* Information just relevant to the python parser */ /* Information just relevant to the python parser */
@ -69,7 +69,7 @@ void arch_coredump_info_dump(const z_arch_esf_t *esf)
/* /*
* Copies the thread registers to a memory block that will be printed out * Copies the thread registers to a memory block that will be printed out
* The thread registers are already provided by structure z_arch_esf_t * The thread registers are already provided by structure struct arch_esf
*/ */
arch_blk.r.x0 = esf->x0; arch_blk.r.x0 = esf->x0;
arch_blk.r.x1 = esf->x1; arch_blk.r.x1 = esf->x1;

View file

@ -181,7 +181,7 @@ static void dump_esr(uint64_t esr, bool *dump_far)
LOG_ERR(" ISS: 0x%llx", GET_ESR_ISS(esr)); LOG_ERR(" ISS: 0x%llx", GET_ESR_ISS(esr));
} }
static void esf_dump(const z_arch_esf_t *esf) static void esf_dump(const struct arch_esf *esf)
{ {
LOG_ERR("x0: 0x%016llx x1: 0x%016llx", esf->x0, esf->x1); LOG_ERR("x0: 0x%016llx x1: 0x%016llx", esf->x0, esf->x1);
LOG_ERR("x2: 0x%016llx x3: 0x%016llx", esf->x2, esf->x3); LOG_ERR("x2: 0x%016llx x3: 0x%016llx", esf->x2, esf->x3);
@ -196,7 +196,7 @@ static void esf_dump(const z_arch_esf_t *esf)
} }
#ifdef CONFIG_EXCEPTION_STACK_TRACE #ifdef CONFIG_EXCEPTION_STACK_TRACE
static void esf_unwind(const z_arch_esf_t *esf) static void esf_unwind(const struct arch_esf *esf)
{ {
/* /*
* For GCC: * For GCC:
@ -244,7 +244,7 @@ static void esf_unwind(const z_arch_esf_t *esf)
#endif /* CONFIG_EXCEPTION_DEBUG */ #endif /* CONFIG_EXCEPTION_DEBUG */
#ifdef CONFIG_ARM64_STACK_PROTECTION #ifdef CONFIG_ARM64_STACK_PROTECTION
static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint64_t far) static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, uint64_t far)
{ {
uint64_t sp, sp_limit, guard_start; uint64_t sp, sp_limit, guard_start;
/* 0x25 means data abort from current EL */ /* 0x25 means data abort from current EL */
@ -284,7 +284,7 @@ static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint
} }
#endif #endif
static bool is_recoverable(z_arch_esf_t *esf, uint64_t esr, uint64_t far, static bool is_recoverable(struct arch_esf *esf, uint64_t esr, uint64_t far,
uint64_t elr) uint64_t elr)
{ {
if (!esf) if (!esf)
@ -306,7 +306,7 @@ static bool is_recoverable(z_arch_esf_t *esf, uint64_t esr, uint64_t far,
return false; return false;
} }
void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf) void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf)
{ {
uint64_t esr = 0; uint64_t esr = 0;
uint64_t elr = 0; uint64_t elr = 0;
@ -379,7 +379,7 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
* *
* @param esf exception frame * @param esf exception frame
*/ */
void z_arm64_do_kernel_oops(z_arch_esf_t *esf) void z_arm64_do_kernel_oops(struct arch_esf *esf)
{ {
/* x8 holds the exception reason */ /* x8 holds the exception reason */
unsigned int reason = esf->x8; unsigned int reason = esf->x8;

View file

@ -159,7 +159,7 @@ void z_arm64_fpu_enter_exc(void)
* simulate them and leave the FPU access disabled. This also avoids the * simulate them and leave the FPU access disabled. This also avoids the
* need for disabling interrupts in syscalls and IRQ handlers as well. * need for disabling interrupts in syscalls and IRQ handlers as well.
*/ */
static bool simulate_str_q_insn(z_arch_esf_t *esf) static bool simulate_str_q_insn(struct arch_esf *esf)
{ {
/* /*
* Support only the "FP in exception" cases for now. * Support only the "FP in exception" cases for now.
@ -221,7 +221,7 @@ static bool simulate_str_q_insn(z_arch_esf_t *esf)
* don't get interrupted that is. To ensure that we mask interrupts to * don't get interrupted that is. To ensure that we mask interrupts to
* the triggering exception context. * the triggering exception context.
*/ */
void z_arm64_fpu_trap(z_arch_esf_t *esf) void z_arm64_fpu_trap(struct arch_esf *esf)
{ {
__ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled");

View file

@ -18,7 +18,7 @@
#include <zephyr/sw_isr_table.h> #include <zephyr/sw_isr_table.h>
#include <zephyr/drivers/interrupt_controller/gic.h> #include <zephyr/drivers/interrupt_controller/gic.h>
void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf); void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf);
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
/* /*

View file

@ -87,7 +87,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
extern void z_arm64_exit_exc(void); extern void z_arm64_exit_exc(void);
z_arch_esf_t *pInitCtx; struct arch_esf *pInitCtx;
/* /*
* Clean the thread->arch to avoid unexpected behavior because the * Clean the thread->arch to avoid unexpected behavior because the

View file

@ -43,7 +43,7 @@ static inline void arch_switch(void *switch_to, void **switched_from)
z_arm64_context_switch(new, old); z_arm64_context_switch(new, old);
} }
extern void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf); extern void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf);
extern void z_arm64_set_ttbr0(uint64_t ttbr0); extern void z_arm64_set_ttbr0(uint64_t ttbr0);
extern void z_arm64_mem_cfg_ipi(void); extern void z_arm64_mem_cfg_ipi(void);

View file

@ -9,7 +9,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason, FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) { if (esf != NULL) {
@ -84,7 +84,7 @@ static char *cause_str(unsigned long cause)
} }
} }
void _Fault(z_arch_esf_t *esf) void _Fault(struct arch_esf *esf)
{ {
unsigned long cause; unsigned long cause;

View file

@ -35,7 +35,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
} }
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason, FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
static inline bool arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {

View file

@ -12,7 +12,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
#if CONFIG_EXCEPTION_DEBUG #if CONFIG_EXCEPTION_DEBUG
if (esf != NULL) { if (esf != NULL) {
@ -102,7 +102,7 @@ static char *cause_str(uint32_t cause_code)
} }
#endif #endif
FUNC_NORETURN void _Fault(const z_arch_esf_t *esf) FUNC_NORETURN void _Fault(const struct arch_esf *esf)
{ {
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG) #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
/* Unfortunately, completely unavailable on Nios II/e cores */ /* Unfortunately, completely unavailable on Nios II/e cores */

View file

@ -39,7 +39,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
} }
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
static inline bool arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {

View file

@ -67,7 +67,7 @@ struct riscv_arch_block {
*/ */
static struct riscv_arch_block arch_blk; static struct riscv_arch_block arch_blk;
void arch_coredump_info_dump(const z_arch_esf_t *esf) void arch_coredump_info_dump(const struct arch_esf *esf)
{ {
struct coredump_arch_hdr_t hdr = { struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID, .id = COREDUMP_ARCH_HDR_ID,

View file

@ -30,15 +30,15 @@ static const struct z_exc_handle exceptions[] = {
#endif #endif
/* Stack trace function */ /* Stack trace function */
void z_riscv_unwind_stack(const z_arch_esf_t *esf); void z_riscv_unwind_stack(const struct arch_esf *esf);
uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf) uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf)
{ {
/* /*
* Kernel stack pointer prior this exception i.e. before * Kernel stack pointer prior this exception i.e. before
* storing the exception stack frame. * storing the exception stack frame.
*/ */
uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t); uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
if ((esf->mstatus & MSTATUS_MPP) == PRV_U) { if ((esf->mstatus & MSTATUS_MPP) == PRV_U) {
@ -54,12 +54,12 @@ uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf)
} }
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
z_riscv_fatal_error_csf(reason, esf, NULL); z_riscv_fatal_error_csf(reason, esf, NULL);
} }
FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf_t *esf, FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
const _callee_saved_t *csf) const _callee_saved_t *csf)
{ {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
@ -152,14 +152,14 @@ static char *cause_str(unsigned long cause)
} }
} }
static bool bad_stack_pointer(z_arch_esf_t *esf) static bool bad_stack_pointer(struct arch_esf *esf)
{ {
#ifdef CONFIG_PMP_STACK_GUARD #ifdef CONFIG_PMP_STACK_GUARD
/* /*
* Check if the kernel stack pointer prior this exception (before * Check if the kernel stack pointer prior this exception (before
* storing the exception stack frame) was in the stack guard area. * storing the exception stack frame) was in the stack guard area.
*/ */
uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t); uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf);
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
if (_current->arch.priv_stack_start != 0 && if (_current->arch.priv_stack_start != 0 &&
@ -197,7 +197,7 @@ static bool bad_stack_pointer(z_arch_esf_t *esf)
return false; return false;
} }
void _Fault(z_arch_esf_t *esf) void _Fault(struct arch_esf *esf)
{ {
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* /*
@ -249,7 +249,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
void z_impl_user_fault(unsigned int reason) void z_impl_user_fault(unsigned int reason)
{ {
z_arch_esf_t *oops_esf = _current->syscall_frame; struct arch_esf *oops_esf = _current->syscall_frame;
if (((_current->base.user_options & K_USER) != 0) && if (((_current->base.user_options & K_USER) != 0) &&
reason != K_ERR_STACK_CHK_FAIL) { reason != K_ERR_STACK_CHK_FAIL) {

View file

@ -204,7 +204,7 @@ void z_riscv_fpu_enter_exc(void)
* Note that the exception depth count was not incremented before this call * Note that the exception depth count was not incremented before this call
* as no further exceptions are expected before returning to normal mode. * as no further exceptions are expected before returning to normal mode.
*/ */
void z_riscv_fpu_trap(z_arch_esf_t *esf) void z_riscv_fpu_trap(struct arch_esf *esf)
{ {
__ASSERT((esf->mstatus & MSTATUS_FS) == 0 && __ASSERT((esf->mstatus & MSTATUS_FS) == 0 &&
(csr_read(mstatus) & MSTATUS_FS) == 0, (csr_read(mstatus) & MSTATUS_FS) == 0,
@ -293,7 +293,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
* This is called on every exception exit except for z_riscv_fpu_trap(). * This is called on every exception exit except for z_riscv_fpu_trap().
* In that case the exception level of interest is 1 (soon to be 0). * In that case the exception level of interest is 1 (soon to be 0).
*/ */
void z_riscv_fpu_exit_exc(z_arch_esf_t *esf) void z_riscv_fpu_exit_exc(struct arch_esf *esf)
{ {
if (fpu_access_allowed(1)) { if (fpu_access_allowed(1)) {
esf->mstatus &= ~MSTATUS_FS; esf->mstatus &= ~MSTATUS_FS;

View file

@ -12,7 +12,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf); uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf);
#if __riscv_xlen == 32 #if __riscv_xlen == 32
#define PR_REG "%08" PRIxPTR #define PR_REG "%08" PRIxPTR
@ -42,7 +42,7 @@ struct stackframe {
LOG_ERR(" %2d: " SFP_FMT PR_REG " ra: " PR_REG, idx, sfp, ra) LOG_ERR(" %2d: " SFP_FMT PR_REG " ra: " PR_REG, idx, sfp, ra)
#endif #endif
static bool in_stack_bound(uintptr_t addr, const z_arch_esf_t *esf) static bool in_stack_bound(uintptr_t addr, const struct arch_esf *esf)
{ {
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
uintptr_t start, end; uintptr_t start, end;
@ -86,7 +86,7 @@ static inline bool in_text_region(uintptr_t addr)
} }
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
void z_riscv_unwind_stack(const z_arch_esf_t *esf) void z_riscv_unwind_stack(const struct arch_esf *esf)
{ {
uintptr_t fp = esf->s0; uintptr_t fp = esf->s0;
uintptr_t ra; uintptr_t ra;
@ -115,7 +115,7 @@ void z_riscv_unwind_stack(const z_arch_esf_t *esf)
LOG_ERR(""); LOG_ERR("");
} }
#else /* !CONFIG_FRAME_POINTER */ #else /* !CONFIG_FRAME_POINTER */
void z_riscv_unwind_stack(const z_arch_esf_t *esf) void z_riscv_unwind_stack(const struct arch_esf *esf)
{ {
uintptr_t sp = z_riscv_get_sp_before_exc(esf); uintptr_t sp = z_riscv_get_sp_before_exc(esf);
uintptr_t ra; uintptr_t ra;

View file

@ -71,9 +71,9 @@ arch_switch(void *switch_to, void **switched_from)
/* Thin wrapper around z_riscv_fatal_error_csf */ /* Thin wrapper around z_riscv_fatal_error_csf */
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf_t *esf, FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf,
const _callee_saved_t *csf); const _callee_saved_t *csf);
static inline bool arch_is_in_isr(void) static inline bool arch_is_in_isr(void)

View file

@ -122,7 +122,7 @@ static const struct {
{ .tt = 0x0A, .desc = "tag_overflow", }, { .tt = 0x0A, .desc = "tag_overflow", },
}; };
static void print_trap_type(const z_arch_esf_t *esf) static void print_trap_type(const struct arch_esf *esf)
{ {
const int tt = (esf->tbr & TBR_TT) >> TBR_TT_BIT; const int tt = (esf->tbr & TBR_TT) >> TBR_TT_BIT;
const char *desc = "unknown"; const char *desc = "unknown";
@ -142,7 +142,7 @@ static void print_trap_type(const z_arch_esf_t *esf)
LOG_ERR("tt = 0x%02X, %s", tt, desc); LOG_ERR("tt = 0x%02X, %s", tt, desc);
} }
static void print_integer_registers(const z_arch_esf_t *esf) static void print_integer_registers(const struct arch_esf *esf)
{ {
const struct savearea *flushed = (struct savearea *) esf->out[6]; const struct savearea *flushed = (struct savearea *) esf->out[6];
@ -159,7 +159,7 @@ static void print_integer_registers(const z_arch_esf_t *esf)
} }
} }
static void print_special_registers(const z_arch_esf_t *esf) static void print_special_registers(const struct arch_esf *esf)
{ {
LOG_ERR( LOG_ERR(
"psr: %08x wim: %08x tbr: %08x y: %08x", "psr: %08x wim: %08x tbr: %08x y: %08x",
@ -168,7 +168,7 @@ static void print_special_registers(const z_arch_esf_t *esf)
LOG_ERR(" pc: %08x npc: %08x", esf->pc, esf->npc); LOG_ERR(" pc: %08x npc: %08x", esf->pc, esf->npc);
} }
static void print_backtrace(const z_arch_esf_t *esf) static void print_backtrace(const struct arch_esf *esf)
{ {
const int MAX_LOGLINES = 40; const int MAX_LOGLINES = 40;
const struct savearea *s = (struct savearea *) esf->out[6]; const struct savearea *s = (struct savearea *) esf->out[6];
@ -190,7 +190,7 @@ static void print_backtrace(const z_arch_esf_t *esf)
} }
} }
static void print_all(const z_arch_esf_t *esf) static void print_all(const struct arch_esf *esf)
{ {
LOG_ERR(""); LOG_ERR("");
print_trap_type(esf); print_trap_type(esf);
@ -205,7 +205,7 @@ static void print_all(const z_arch_esf_t *esf)
#endif /* CONFIG_EXCEPTION_DEBUG */ #endif /* CONFIG_EXCEPTION_DEBUG */
FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason, FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
#if CONFIG_EXCEPTION_DEBUG #if CONFIG_EXCEPTION_DEBUG
if (esf != NULL) { if (esf != NULL) {

View file

@ -43,7 +43,7 @@ static inline void arch_switch(void *switch_to, void **switched_from)
} }
FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason, FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
static inline bool arch_is_in_isr(void) static inline bool arch_is_in_isr(void)
{ {

View file

@ -35,7 +35,7 @@ FUNC_NORETURN void arch_system_halt(unsigned int reason)
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
static inline uintptr_t esf_get_sp(const z_arch_esf_t *esf) static inline uintptr_t esf_get_sp(const struct arch_esf *esf)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return esf->rsp; return esf->rsp;
@ -122,7 +122,7 @@ bool z_x86_check_guard_page(uintptr_t addr)
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
static inline uintptr_t esf_get_code(const z_arch_esf_t *esf) static inline uintptr_t esf_get_code(const struct arch_esf *esf)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return esf->code; return esf->code;
@ -188,7 +188,7 @@ static void unwind_stack(uintptr_t base_ptr, uint16_t cs)
} }
#endif /* CONFIG_EXCEPTION_STACK_TRACE */ #endif /* CONFIG_EXCEPTION_STACK_TRACE */
static inline uintptr_t get_cr3(const z_arch_esf_t *esf) static inline uintptr_t get_cr3(const struct arch_esf *esf)
{ {
#if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI) #if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI)
/* If the interrupted thread was in user mode, we did a page table /* If the interrupted thread was in user mode, we did a page table
@ -206,14 +206,14 @@ static inline uintptr_t get_cr3(const z_arch_esf_t *esf)
return z_x86_cr3_get(); return z_x86_cr3_get();
} }
static inline pentry_t *get_ptables(const z_arch_esf_t *esf) static inline pentry_t *get_ptables(const struct arch_esf *esf)
{ {
return z_mem_virt_addr(get_cr3(esf)); return z_mem_virt_addr(get_cr3(esf));
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
__pinned_func __pinned_func
static void dump_regs(const z_arch_esf_t *esf) static void dump_regs(const struct arch_esf *esf)
{ {
LOG_ERR("RAX: 0x%016lx RBX: 0x%016lx RCX: 0x%016lx RDX: 0x%016lx", LOG_ERR("RAX: 0x%016lx RBX: 0x%016lx RCX: 0x%016lx RDX: 0x%016lx",
esf->rax, esf->rbx, esf->rcx, esf->rdx); esf->rax, esf->rbx, esf->rcx, esf->rdx);
@ -236,7 +236,7 @@ static void dump_regs(const z_arch_esf_t *esf)
} }
#else /* 32-bit */ #else /* 32-bit */
__pinned_func __pinned_func
static void dump_regs(const z_arch_esf_t *esf) static void dump_regs(const struct arch_esf *esf)
{ {
LOG_ERR("EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x", LOG_ERR("EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x",
esf->eax, esf->ebx, esf->ecx, esf->edx); esf->eax, esf->ebx, esf->ecx, esf->edx);
@ -327,7 +327,7 @@ static void log_exception(uintptr_t vector, uintptr_t code)
} }
__pinned_func __pinned_func
static void dump_page_fault(z_arch_esf_t *esf) static void dump_page_fault(struct arch_esf *esf)
{ {
uintptr_t err; uintptr_t err;
void *cr2; void *cr2;
@ -362,7 +362,7 @@ static void dump_page_fault(z_arch_esf_t *esf)
__pinned_func __pinned_func
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
if (esf != NULL) { if (esf != NULL) {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
@ -385,7 +385,7 @@ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
__pinned_func __pinned_func
FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
log_exception(vector, esf_get_code(esf)); log_exception(vector, esf_get_code(esf));
@ -404,7 +404,7 @@ static const struct z_exc_handle exceptions[] = {
#endif #endif
__pinned_func __pinned_func
void z_x86_page_fault_handler(z_arch_esf_t *esf) void z_x86_page_fault_handler(struct arch_esf *esf)
{ {
#ifdef CONFIG_DEMAND_PAGING #ifdef CONFIG_DEMAND_PAGING
if ((esf->errorCode & PF_P) == 0) { if ((esf->errorCode & PF_P) == 0) {
@ -488,7 +488,7 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
} }
__pinned_func __pinned_func
void z_x86_do_kernel_oops(const z_arch_esf_t *esf) void z_x86_do_kernel_oops(const struct arch_esf *esf)
{ {
uintptr_t reason; uintptr_t reason;

View file

@ -34,7 +34,7 @@ struct x86_arch_block {
*/ */
static struct x86_arch_block arch_blk; static struct x86_arch_block arch_blk;
void arch_coredump_info_dump(const z_arch_esf_t *esf) void arch_coredump_info_dump(const struct arch_esf *esf)
{ {
struct coredump_arch_hdr_t hdr = { struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID, .id = COREDUMP_ARCH_HDR_ID,

View file

@ -27,10 +27,10 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
unsigned int z_x86_exception_vector; unsigned int z_x86_exception_vector;
#endif #endif
__weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); } __weak void z_debug_fatal_hook(const struct arch_esf *esf) { ARG_UNUSED(esf); }
__pinned_func __pinned_func
void z_x86_spurious_irq(const z_arch_esf_t *esf) void z_x86_spurious_irq(const struct arch_esf *esf)
{ {
int vector = z_irq_controller_isr_vector_get(); int vector = z_irq_controller_isr_vector_get();
@ -46,7 +46,7 @@ void arch_syscall_oops(void *ssf)
{ {
struct _x86_syscall_stack_frame *ssf_ptr = struct _x86_syscall_stack_frame *ssf_ptr =
(struct _x86_syscall_stack_frame *)ssf; (struct _x86_syscall_stack_frame *)ssf;
z_arch_esf_t oops = { struct arch_esf oops = {
.eip = ssf_ptr->eip, .eip = ssf_ptr->eip,
.cs = ssf_ptr->cs, .cs = ssf_ptr->cs,
.eflags = ssf_ptr->eflags .eflags = ssf_ptr->eflags
@ -66,7 +66,7 @@ NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ,
#if CONFIG_EXCEPTION_DEBUG #if CONFIG_EXCEPTION_DEBUG
__pinned_func __pinned_func
FUNC_NORETURN static void generic_exc_handle(unsigned int vector, FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
const z_arch_esf_t *pEsf) const struct arch_esf *pEsf)
{ {
#ifdef CONFIG_DEBUG_COREDUMP #ifdef CONFIG_DEBUG_COREDUMP
z_x86_exception_vector = vector; z_x86_exception_vector = vector;
@ -77,7 +77,7 @@ FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
#define _EXC_FUNC(vector) \ #define _EXC_FUNC(vector) \
__pinned_func \ __pinned_func \
FUNC_NORETURN __used static void handle_exc_##vector(const z_arch_esf_t *pEsf) \ FUNC_NORETURN __used static void handle_exc_##vector(const struct arch_esf *pEsf) \
{ \ { \
generic_exc_handle(vector, pEsf); \ generic_exc_handle(vector, pEsf); \
} }
@ -120,7 +120,7 @@ EXC_FUNC_NOCODE(IV_MACHINE_CHECK, 0);
_EXCEPTION_CONNECT_CODE(z_x86_page_fault_handler, IV_PAGE_FAULT, 0); _EXCEPTION_CONNECT_CODE(z_x86_page_fault_handler, IV_PAGE_FAULT, 0);
#ifdef CONFIG_X86_ENABLE_TSS #ifdef CONFIG_X86_ENABLE_TSS
static __pinned_noinit volatile z_arch_esf_t _df_esf; static __pinned_noinit volatile struct arch_esf _df_esf;
/* Very tiny stack; just enough for the bogus error code pushed by the CPU /* Very tiny stack; just enough for the bogus error code pushed by the CPU
* and a frame pointer push by the compiler. All df_handler_top does is * and a frame pointer push by the compiler. All df_handler_top does is
@ -182,14 +182,14 @@ static __used void df_handler_bottom(void)
reason = K_ERR_STACK_CHK_FAIL; reason = K_ERR_STACK_CHK_FAIL;
} }
#endif #endif
z_x86_fatal_error(reason, (z_arch_esf_t *)&_df_esf); z_x86_fatal_error(reason, (struct arch_esf *)&_df_esf);
} }
__pinned_func __pinned_func
static FUNC_NORETURN __used void df_handler_top(void) static FUNC_NORETURN __used void df_handler_top(void)
{ {
/* State of the system when the double-fault forced a task switch /* State of the system when the double-fault forced a task switch
* will be in _main_tss. Set up a z_arch_esf_t and copy system state into * will be in _main_tss. Set up a struct arch_esf and copy system state into
* it * it
*/ */
_df_esf.esp = _main_tss.esp; _df_esf.esp = _main_tss.esp;

View file

@ -302,7 +302,7 @@ int z_float_disable(struct k_thread *thread)
* instruction is executed while CR0[TS]=1. The handler then enables the * instruction is executed while CR0[TS]=1. The handler then enables the
* current thread to use all supported floating point registers. * current thread to use all supported floating point registers.
*/ */
void _FpNotAvailableExcHandler(z_arch_esf_t *pEsf) void _FpNotAvailableExcHandler(struct arch_esf *pEsf)
{ {
ARG_UNUSED(pEsf); ARG_UNUSED(pEsf);

View file

@ -78,7 +78,7 @@ static unsigned int get_exception(unsigned int vector)
/* /*
* Debug exception handler. * Debug exception handler.
*/ */
static void z_gdb_interrupt(unsigned int vector, z_arch_esf_t *esf) static void z_gdb_interrupt(unsigned int vector, struct arch_esf *esf)
{ {
debug_ctx.exception = get_exception(vector); debug_ctx.exception = get_exception(vector);
@ -212,7 +212,7 @@ size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
return ret; return ret;
} }
static __used void z_gdb_debug_isr(z_arch_esf_t *esf) static __used void z_gdb_debug_isr(struct arch_esf *esf)
{ {
#ifdef CONFIG_GDBSTUB_TRACE #ifdef CONFIG_GDBSTUB_TRACE
printk("gdbstub:enter %s (IV_DEBUG)\n", __func__); printk("gdbstub:enter %s (IV_DEBUG)\n", __func__);
@ -225,7 +225,7 @@ static __used void z_gdb_debug_isr(z_arch_esf_t *esf)
#endif #endif
} }
static __used void z_gdb_break_isr(z_arch_esf_t *esf) static __used void z_gdb_break_isr(struct arch_esf *esf)
{ {
#ifdef CONFIG_GDBSTUB_TRACE #ifdef CONFIG_GDBSTUB_TRACE
printk("gdbstub:enter %s (IV_BREAKPOINT)\n", __func__); printk("gdbstub:enter %s (IV_BREAKPOINT)\n", __func__);

View file

@ -46,7 +46,7 @@ struct x86_64_arch_block {
*/ */
static struct x86_64_arch_block arch_blk; static struct x86_64_arch_block arch_blk;
void arch_coredump_info_dump(const z_arch_esf_t *esf) void arch_coredump_info_dump(const struct arch_esf *esf)
{ {
struct coredump_arch_hdr_t hdr = { struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID, .id = COREDUMP_ARCH_HDR_ID,

View file

@ -13,14 +13,14 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* NMI handlers should override weak implementation /* NMI handlers should override weak implementation
* return true if NMI is handled, false otherwise * return true if NMI is handled, false otherwise
*/ */
__weak bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf) __weak bool z_x86_do_kernel_nmi(const struct arch_esf *esf)
{ {
ARG_UNUSED(esf); ARG_UNUSED(esf);
return false; return false;
} }
void z_x86_exception(z_arch_esf_t *esf) void z_x86_exception(struct arch_esf *esf)
{ {
switch (esf->vector) { switch (esf->vector) {
case Z_X86_OOPS_VECTOR: case Z_X86_OOPS_VECTOR:

View file

@ -53,6 +53,5 @@ GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF,
GEN_OFFSET_SYM(_callee_saved_t, esp); GEN_OFFSET_SYM(_callee_saved_t, esp);
/* z_arch_esf_t structure member offsets */ /* z_arch_esf_t structure member offsets */
GEN_OFFSET_SYM(z_arch_esf_t, eflags); GEN_OFFSET_SYM(z_arch_esf_t, eflags);
#endif /* _X86_OFFSETS_INC_ */ #endif /* _X86_OFFSETS_INC_ */

View file

@ -62,7 +62,7 @@
* Assign an exception handler to a particular vector in the IDT. * Assign an exception handler to a particular vector in the IDT.
* *
* @param handler A handler function of the prototype * @param handler A handler function of the prototype
* void handler(const z_arch_esf_t *esf) * void handler(const struct arch_esf *esf)
* @param vector Vector index in the IDT * @param vector Vector index in the IDT
*/ */
#define _EXCEPTION_CONNECT_NOCODE(handler, vector, dpl) \ #define _EXCEPTION_CONNECT_NOCODE(handler, vector, dpl) \
@ -75,7 +75,7 @@
* The error code will be accessible in esf->errorCode * The error code will be accessible in esf->errorCode
* *
* @param handler A handler function of the prototype * @param handler A handler function of the prototype
* void handler(const z_arch_esf_t *esf) * void handler(const struct arch_esf *esf)
* @param vector Vector index in the IDT * @param vector Vector index in the IDT
*/ */
#define _EXCEPTION_CONNECT_CODE(handler, vector, dpl) \ #define _EXCEPTION_CONNECT_CODE(handler, vector, dpl) \

View file

@ -36,7 +36,7 @@ void x86_sse_init(struct k_thread *thread);
void z_x86_syscall_entry_stub(void); void z_x86_syscall_entry_stub(void);
bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf); bool z_x86_do_kernel_nmi(const struct arch_esf *esf);
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -49,16 +49,16 @@ void z_x86_early_serial_init(void);
* interesting info and call z_x86_fatal_error() * interesting info and call z_x86_fatal_error()
*/ */
FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
const z_arch_esf_t *esf); const struct arch_esf *esf);
/* Called upon unrecoverable error; dump registers and transfer control to /* Called upon unrecoverable error; dump registers and transfer control to
* kernel via z_fatal_error() * kernel via z_fatal_error()
*/ */
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
/* Common handling for page fault exceptions */ /* Common handling for page fault exceptions */
void z_x86_page_fault_handler(z_arch_esf_t *esf); void z_x86_page_fault_handler(struct arch_esf *esf);
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
/** /**
@ -90,7 +90,7 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread);
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
void z_x86_do_kernel_oops(const z_arch_esf_t *esf); void z_x86_do_kernel_oops(const struct arch_esf *esf);
/* /*
* Find a free IRQ vector at the specified priority, or return -1 if none left. * Find a free IRQ vector at the specified priority, or return -1 if none left.

View file

@ -91,7 +91,7 @@ struct xtensa_arch_block {
*/ */
static struct xtensa_arch_block arch_blk; static struct xtensa_arch_block arch_blk;
void arch_coredump_info_dump(const z_arch_esf_t *esf) void arch_coredump_info_dump(const struct arch_esf *esf)
{ {
struct coredump_arch_hdr_t hdr = { struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID, .id = COREDUMP_ARCH_HDR_ID,

View file

@ -84,7 +84,7 @@ char *xtensa_exccause(unsigned int cause_code)
#endif #endif
} }
void xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf) void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf)
{ {
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
if (esf) { if (esf) {

View file

@ -422,7 +422,7 @@ static unsigned int get_gdb_exception_reason(unsigned int reason)
* @param ctx GDB context * @param ctx GDB context
* @param stack Pointer to the stack frame * @param stack Pointer to the stack frame
*/ */
static void copy_to_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack) static void copy_to_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack)
{ {
struct xtensa_register *reg; struct xtensa_register *reg;
int idx, num_laddr_regs; int idx, num_laddr_regs;
@ -513,7 +513,7 @@ static void copy_to_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack)
* @param ctx GDB context * @param ctx GDB context
* @param stack Pointer to the stack frame * @param stack Pointer to the stack frame
*/ */
static void restore_from_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack) static void restore_from_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack)
{ {
struct xtensa_register *reg; struct xtensa_register *reg;
int idx, num_laddr_regs; int idx, num_laddr_regs;
@ -913,7 +913,7 @@ out:
return ret; return ret;
} }
void z_gdb_isr(z_arch_esf_t *esf) void z_gdb_isr(struct arch_esf *esf)
{ {
uint32_t reg; uint32_t reg;

View file

@ -37,7 +37,7 @@ static const struct z_exc_handle exceptions[] = {
}; };
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
void xtensa_dump_stack(const z_arch_esf_t *stack) void xtensa_dump_stack(const void *stack)
{ {
_xtensa_irq_stack_frame_raw_t *frame = (void *)stack; _xtensa_irq_stack_frame_raw_t *frame = (void *)stack;
_xtensa_irq_bsa_t *bsa = frame->ptr_to_bsa; _xtensa_irq_bsa_t *bsa = frame->ptr_to_bsa;
@ -218,9 +218,10 @@ static inline DEF_INT_C_HANDLER(1)
* different because exceptions and interrupts land at the same * different because exceptions and interrupts land at the same
* vector; other interrupt levels have their own vectors. * vector; other interrupt levels have their own vectors.
*/ */
void *xtensa_excint1_c(int *interrupted_stack) void *xtensa_excint1_c(void *esf)
{ {
int cause; int cause;
int *interrupted_stack = &((struct arch_esf *)esf)->dummy;
_xtensa_irq_bsa_t *bsa = (void *)*(int **)interrupted_stack; _xtensa_irq_bsa_t *bsa = (void *)*(int **)interrupted_stack;
bool is_fatal_error = false; bool is_fatal_error = false;
bool is_dblexc = false; bool is_dblexc = false;
@ -385,7 +386,7 @@ fixup_out:
#if defined(CONFIG_GDBSTUB) #if defined(CONFIG_GDBSTUB)
void *xtensa_debugint_c(int *interrupted_stack) void *xtensa_debugint_c(int *interrupted_stack)
{ {
extern void z_gdb_isr(z_arch_esf_t *esf); extern void z_gdb_isr(struct arch_esf *esf);
z_gdb_isr((void *)interrupted_stack); z_gdb_isr((void *)interrupted_stack);

View file

@ -25,7 +25,7 @@
* *
* @param stack Pointer to stack frame. * @param stack Pointer to stack frame.
*/ */
void xtensa_dump_stack(const z_arch_esf_t *stack); void xtensa_dump_stack(const void *stack);
/** /**
* @brief Get string description from an exception code. * @brief Get string description from an exception code.
@ -43,7 +43,7 @@ char *xtensa_exccause(unsigned int cause_code);
* @param esf Exception context, with details and partial or full register * @param esf Exception context, with details and partial or full register
* state when the error occurred. May in some cases be NULL. * state when the error occurred. May in some cases be NULL.
*/ */
void xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf); void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf);
/** /**
* @brief Perform a one-way transition from supervisor to user mode. * @brief Perform a one-way transition from supervisor to user mode.

View file

@ -387,7 +387,7 @@ static bool handle_nmi(void)
return true; return true;
} }
bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf) bool z_x86_do_kernel_nmi(const struct arch_esf *esf)
{ {
const struct device *const dev = DEVICE_DT_GET(DEVICE_NODE); const struct device *const dev = DEVICE_DT_GET(DEVICE_NODE);
struct ibecc_data *data = dev->data; struct ibecc_data *data = dev->data;

View file

@ -18,11 +18,6 @@
extern "C" { extern "C" {
#endif #endif
#ifdef _ASMLANGUAGE
#else
typedef struct arch_esf z_arch_esf_t;
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -39,6 +39,7 @@ extern "C" {
#endif #endif
/* NOTE: We cannot pull in kernel.h here, need some forward declarations */ /* NOTE: We cannot pull in kernel.h here, need some forward declarations */
struct arch_esf;
struct k_thread; struct k_thread;
struct k_mem_domain; struct k_mem_domain;
@ -46,6 +47,8 @@ typedef struct z_thread_stack_element k_thread_stack_t;
typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3); typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
__deprecated typedef struct arch_esf z_arch_esf_t;
/** /**
* @defgroup arch-timing Architecture timing APIs * @defgroup arch-timing Architecture timing APIs
* @ingroup arch-interface * @ingroup arch-interface

View file

@ -75,8 +75,6 @@ struct arch_esf {
extern uint32_t z_arm_coredump_fault_sp; extern uint32_t z_arm_coredump_fault_sp;
typedef struct arch_esf z_arch_esf_t;
extern void z_arm_exc_exit(bool fatal); extern void z_arm_exc_exit(bool fatal);
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -119,8 +119,6 @@ struct arch_esf {
extern uint32_t z_arm_coredump_fault_sp; extern uint32_t z_arm_coredump_fault_sp;
typedef struct arch_esf z_arch_esf_t;
extern void z_arm_exc_exit(void); extern void z_arm_exc_exit(void);
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -64,7 +64,7 @@ struct gdb_ctx {
unsigned int registers[GDB_NUM_REGS]; unsigned int registers[GDB_NUM_REGS];
}; };
void z_gdb_entry(z_arch_esf_t *esf, unsigned int exc_cause); void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause);
#endif #endif

View file

@ -55,8 +55,6 @@ struct arch_esf {
#endif #endif
} __aligned(16); } __aligned(16);
typedef struct arch_esf z_arch_esf_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -50,8 +50,6 @@ struct arch_esf {
unsigned long cause; unsigned long cause;
}; };
typedef struct arch_esf z_arch_esf_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -101,10 +101,10 @@ void arch_irq_enable(unsigned int irq);
void arch_irq_disable(unsigned int irq); void arch_irq_disable(unsigned int irq);
FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason, FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason, FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
const z_arch_esf_t *esf); const struct arch_esf *esf);
enum nios2_exception_cause { enum nios2_exception_cause {
NIOS2_EXCEPTION_UNKNOWN = -1, NIOS2_EXCEPTION_UNKNOWN = -1,

View file

@ -35,8 +35,6 @@ struct arch_esf {
uint32_t instr; /* Instruction being executed when exc occurred */ uint32_t instr; /* Instruction being executed when exc occurred */
}; };
typedef struct arch_esf z_arch_esf_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -19,8 +19,6 @@ struct arch_esf {
uint32_t dummy; /*maybe we will want to add something someday*/ uint32_t dummy; /*maybe we will want to add something someday*/
}; };
typedef struct arch_esf z_arch_esf_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -48,12 +48,12 @@
*/ */
#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
#define Z_RISCV_STACK_GUARD_SIZE \ #define Z_RISCV_STACK_GUARD_SIZE \
Z_POW2_CEIL(MAX(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \ Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
Z_RISCV_STACK_PMP_ALIGN)) Z_RISCV_STACK_PMP_ALIGN))
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE #define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
#else #else
#define Z_RISCV_STACK_GUARD_SIZE \ #define Z_RISCV_STACK_GUARD_SIZE \
ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \ ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
Z_RISCV_STACK_PMP_ALIGN) Z_RISCV_STACK_PMP_ALIGN)
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN #define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
#endif #endif

View file

@ -87,7 +87,6 @@ struct arch_esf {
} __aligned(16); } __aligned(16);
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */ #endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
typedef struct arch_esf z_arch_esf_t;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
typedef struct soc_esf soc_esf_t; typedef struct soc_esf soc_esf_t;
#endif #endif

View file

@ -25,8 +25,6 @@ struct arch_esf {
uint32_t y; uint32_t y;
}; };
typedef struct arch_esf z_arch_esf_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -27,7 +27,7 @@ extern "C" {
* Those registers are pushed onto the stack by _ExcEnt(). * Those registers are pushed onto the stack by _ExcEnt().
*/ */
typedef struct arch_esf { struct arch_esf {
#ifdef CONFIG_GDBSTUB #ifdef CONFIG_GDBSTUB
unsigned int ss; unsigned int ss;
unsigned int gs; unsigned int gs;
@ -47,7 +47,7 @@ typedef struct arch_esf {
unsigned int eip; unsigned int eip;
unsigned int cs; unsigned int cs;
unsigned int eflags; unsigned int eflags;
} z_arch_esf_t; };
extern unsigned int z_x86_exception_vector; extern unsigned int z_x86_exception_vector;

View file

@ -53,8 +53,6 @@ struct arch_esf {
unsigned long ss; unsigned long ss;
}; };
typedef struct arch_esf z_arch_esf_t;
struct x86_ssf { struct x86_ssf {
unsigned long rip; unsigned long rip;
unsigned long rflags; unsigned long rflags;

View file

@ -28,7 +28,6 @@ extern "C" {
struct arch_esf { struct arch_esf {
int dummy; int dummy;
}; };
typedef int z_arch_esf_t;
#endif #endif

View file

@ -451,7 +451,7 @@ struct bt_hci_evt_mesh_scanning_report {
struct bt_hci_evt_mesh_scan_report reports[0]; struct bt_hci_evt_mesh_scan_report reports[0];
} __packed; } __packed;
struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const z_arch_esf_t *esf); struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf);
struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc); struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc);
struct net_buf *hci_vs_err_assert(const char *file, uint32_t line); struct net_buf *hci_vs_err_assert(const char *file, uint32_t line);

View file

@ -232,7 +232,7 @@ struct coredump_backend_api {
coredump_backend_cmd_t cmd; coredump_backend_cmd_t cmd;
}; };
void coredump(unsigned int reason, const z_arch_esf_t *esf, void coredump(unsigned int reason, const struct arch_esf *esf,
struct k_thread *thread); struct k_thread *thread);
void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr); void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr);
void coredump_buffer_output(uint8_t *buf, size_t buflen); void coredump_buffer_output(uint8_t *buf, size_t buflen);
@ -242,7 +242,7 @@ int coredump_cmd(enum coredump_cmd_id cmd_id, void *arg);
#else #else
static inline void coredump(unsigned int reason, const z_arch_esf_t *esf, static inline void coredump(unsigned int reason, const struct arch_esf *esf,
struct k_thread *thread) struct k_thread *thread)
{ {
ARG_UNUSED(reason); ARG_UNUSED(reason);
@ -279,7 +279,7 @@ static inline int coredump_cmd(enum coredump_cmd_id query_id, void *arg)
#endif /* CONFIG_DEBUG_COREDUMP */ #endif /* CONFIG_DEBUG_COREDUMP */
/** /**
* @fn void coredump(unsigned int reason, const z_arch_esf_t *esf, struct k_thread *thread); * @fn void coredump(unsigned int reason, const struct arch_esf *esf, struct k_thread *thread);
* @brief Perform coredump. * @brief Perform coredump.
* *
* Normally, this is called inside z_fatal_error() to generate coredump * Normally, this is called inside z_fatal_error() to generate coredump

View file

@ -65,7 +65,7 @@ FUNC_NORETURN void k_fatal_halt(unsigned int reason);
* @param esf Exception context, with details and partial or full register * @param esf Exception context, with details and partial or full register
* state when the error occurred. May in some cases be NULL. * state when the error occurred. May in some cases be NULL.
*/ */
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf); void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf);
/** /**
* Called by architecture code upon a fatal error. * Called by architecture code upon a fatal error.
@ -81,7 +81,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf);
* @param esf Exception context, with details and partial or full register * @param esf Exception context, with details and partial or full register
* state when the error occurred. May in some cases be NULL. * state when the error occurred. May in some cases be NULL.
*/ */
void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf); void z_fatal_error(unsigned int reason, const struct arch_esf *esf);
/** @} */ /** @} */

View file

@ -35,7 +35,7 @@ FUNC_NORETURN __weak void arch_system_halt(unsigned int reason)
/* LCOV_EXCL_START */ /* LCOV_EXCL_START */
__weak void k_sys_fatal_error_handler(unsigned int reason, __weak void k_sys_fatal_error_handler(unsigned int reason,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
ARG_UNUSED(esf); ARG_UNUSED(esf);
@ -82,7 +82,7 @@ FUNC_NORETURN void k_fatal_halt(unsigned int reason)
} }
/* LCOV_EXCL_STOP */ /* LCOV_EXCL_STOP */
void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) void z_fatal_error(unsigned int reason, const struct arch_esf *esf)
{ {
/* We can't allow this code to be preempted, but don't need to /* We can't allow this code to be preempted, but don't need to
* synchronize between CPUs, so an arch-layer lock is * synchronize between CPUs, so an arch-layer lock is

View file

@ -79,6 +79,8 @@
#include <zephyr/toolchain.h> #include <zephyr/toolchain.h>
#include <stddef.h> #include <stddef.h>
typedef struct arch_esf z_arch_esf_t;
/* definition of the GEN_OFFSET_SYM() macros is toolchain independent */ /* definition of the GEN_OFFSET_SYM() macros is toolchain independent */
#define GEN_OFFSET_SYM(S, M) \ #define GEN_OFFSET_SYM(S, M) \

View file

@ -583,7 +583,7 @@ static inline void arch_nop(void);
* *
* @param esf Exception Stack Frame (arch-specific) * @param esf Exception Stack Frame (arch-specific)
*/ */
void arch_coredump_info_dump(const z_arch_esf_t *esf); void arch_coredump_info_dump(const struct arch_esf *esf);
/** /**
* @brief Get the target code specified by the architecture. * @brief Get the target code specified by the architecture.

View file

@ -320,7 +320,7 @@ void bt_ctlr_assert_handle(char *file, uint32_t line)
#endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */ #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */
#if defined(CONFIG_BT_HCI_VS_FATAL_ERROR) #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
{ {
/* Disable interrupts, this is unrecoverable */ /* Disable interrupts, this is unrecoverable */
(void)irq_lock(); (void)irq_lock();

View file

@ -100,7 +100,7 @@ static void user_function(void *p1, void *p2, void *p3)
printk("[app]Thread %p done\n", k_current_get()); printk("[app]Thread %p done\n", k_current_get());
} }
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
{ {
int i; int i;

View file

@ -5001,7 +5001,7 @@ NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE,
typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data; typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data;
static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data, static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data,
const z_arch_esf_t *esf) const struct arch_esf *esf)
{ {
cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1); cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1);
cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2); cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2);
@ -5036,7 +5036,7 @@ static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len)
return buf; return buf;
} }
struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const z_arch_esf_t *esf) struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf)
{ {
/* Prepare vendor specific HCI Fatal Error event */ /* Prepare vendor specific HCI Fatal Error event */
struct bt_hci_vs_fatal_error_stack_frame *sf; struct bt_hci_vs_fatal_error_stack_frame *sf;

View file

@ -117,7 +117,7 @@ void process_memory_region_list(void)
#endif #endif
} }
void coredump(unsigned int reason, const z_arch_esf_t *esf, void coredump(unsigned int reason, const struct arch_esf *esf,
struct k_thread *thread) struct k_thread *thread)
{ {
z_coredump_start(); z_coredump_start();

View file

@ -29,7 +29,6 @@
extern "C" { extern "C" {
#endif #endif
struct arch_esf; struct arch_esf;
typedef struct arch_esf z_arch_esf_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -38,7 +38,7 @@ __syscall void ztest_set_fault_valid(bool valid);
* By default, it will do nothing before leaving error handler. * By default, it will do nothing before leaving error handler.
*/ */
void ztest_post_fatal_error_hook(unsigned int reason, void ztest_post_fatal_error_hook(unsigned int reason,
const z_arch_esf_t *pEsf); const struct arch_esf *pEsf);
#endif #endif

View file

@ -42,11 +42,11 @@ static inline void z_vrfy_ztest_set_fault_valid(bool valid)
#endif #endif
__weak void ztest_post_fatal_error_hook(unsigned int reason, __weak void ztest_post_fatal_error_hook(unsigned int reason,
const z_arch_esf_t *pEsf) const struct arch_esf *pEsf)
{ {
} }
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
k_tid_t curr_tid = k_current_get(); k_tid_t curr_tid = k_current_get();
bool valid_fault = (curr_tid == valid_fault_tid) || fault_in_isr; bool valid_fault = (curr_tid == valid_fault_tid) || fault_in_isr;

View file

@ -13,7 +13,7 @@
static volatile int expected_reason = -1; static volatile int expected_reason = -1;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
static bool triggered_synchronous_svc; static bool triggered_synchronous_svc;

View file

@ -24,7 +24,7 @@ static struct k_thread esf_collection_thread;
/** /**
* Validates that pEsf matches state from set_regs_with_known_pattern() * Validates that pEsf matches state from set_regs_with_known_pattern()
*/ */
static int check_esf_matches_expectations(const z_arch_esf_t *pEsf) static int check_esf_matches_expectations(const struct arch_esf *pEsf)
{ {
const uint16_t expected_fault_instruction = 0xde5a; /* udf #90 */ const uint16_t expected_fault_instruction = 0xde5a; /* udf #90 */
const bool caller_regs_match_expected = const bool caller_regs_match_expected =
@ -88,7 +88,7 @@ static int check_esf_matches_expectations(const z_arch_esf_t *pEsf)
return 0; return 0;
} }
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
TC_PRINT("Caught system error -- reason %d\n", reason); TC_PRINT("Caught system error -- reason %d\n", reason);

View file

@ -36,7 +36,7 @@ void arm_isr_handler(const void *args)
test_flag++; test_flag++;
} }
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);

View file

@ -35,7 +35,7 @@ uint8_t *nmi_stacks[] = {
#endif #endif
}; };
bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf) bool z_x86_do_kernel_nmi(const struct arch_esf *esf)
{ {
uint64_t stack; uint64_t stack;

View file

@ -47,7 +47,7 @@ static volatile int int_handler_executed;
/* Assume the spurious interrupt handler will execute and abort the task */ /* Assume the spurious interrupt handler will execute and abort the task */
static volatile int spur_handler_aborted_thread = 1; static volatile int spur_handler_aborted_thread = 1;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
{ {
if (reason != K_ERR_SPURIOUS_IRQ) { if (reason != K_ERR_SPURIOUS_IRQ) {
printk("wrong error reason\n"); printk("wrong error reason\n");
@ -89,7 +89,7 @@ void isr_handler(void)
* *
*/ */
void exc_divide_error_handler(z_arch_esf_t *p_esf) void exc_divide_error_handler(struct arch_esf *p_esf)
{ {
p_esf->eip += 2; p_esf->eip += 2;
/* provide evidence that the handler executed */ /* provide evidence that the handler executed */

View file

@ -7,7 +7,7 @@
#include <zephyr/fatal.h> #include <zephyr/fatal.h>
#include <zephyr/kernel.h> #include <zephyr/kernel.h>
void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) void z_fatal_error(unsigned int reason, const struct arch_esf *esf)
{ {
ztest_test_fail(); ztest_test_fail();
} }

View file

@ -32,7 +32,7 @@ static struct coredump_mem_region_node dump_region0 = {
.size = sizeof(values_to_dump) .size = sizeof(values_to_dump)
}; };
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
ARG_UNUSED(pEsf); ARG_UNUSED(pEsf);

View file

@ -50,7 +50,7 @@ volatile int rv;
static ZTEST_DMEM volatile int expected_reason = -1; static ZTEST_DMEM volatile int expected_reason = -1;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
TC_PRINT("Caught system error -- reason %d\n", reason); TC_PRINT("Caught system error -- reason %d\n", reason);

View file

@ -12,7 +12,7 @@ static volatile int expected_reason = -1;
void z_thread_essential_clear(struct k_thread *thread); void z_thread_essential_clear(struct k_thread *thread);
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);

View file

@ -13,7 +13,7 @@
static ZTEST_DMEM volatile int expected_reason = -1; static ZTEST_DMEM volatile int expected_reason = -1;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
int rv = TC_PASS; int rv = TC_PASS;

View file

@ -64,7 +64,7 @@ __pinned_bss
static bool expect_fault; static bool expect_fault;
__pinned_func __pinned_func
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);

View file

@ -33,7 +33,7 @@ volatile bool expect_fault;
__pinned_noinit __pinned_noinit
static uint8_t __aligned(CONFIG_MMU_PAGE_SIZE) test_page[TEST_PAGE_SZ]; static uint8_t __aligned(CONFIG_MMU_PAGE_SIZE) test_page[TEST_PAGE_SZ];
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);

View file

@ -8,7 +8,7 @@
ZTEST_BMEM volatile bool valid_fault; ZTEST_BMEM volatile bool valid_fault;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d %d\n", reason, valid_fault); printk("Caught system error -- reason %d %d\n", reason, valid_fault);
if (valid_fault) { if (valid_fault) {

View file

@ -28,7 +28,7 @@
#define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__) #define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
INFO("Caught system error -- reason %d\n", reason); INFO("Caught system error -- reason %d\n", reason);
ztest_test_pass(); ztest_test_pass();

View file

@ -15,7 +15,7 @@
ZTEST_BMEM static int count; ZTEST_BMEM static int count;
ZTEST_BMEM static int ret = TC_PASS; ZTEST_BMEM static int ret = TC_PASS;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
{ {
if (reason != K_ERR_STACK_CHK_FAIL) { if (reason != K_ERR_STACK_CHK_FAIL) {
printk("wrong error type\n"); printk("wrong error type\n");

View file

@ -568,7 +568,7 @@ ZTEST_USER(sys_sem_1cpu, test_sem_multiple_threads_wait)
* @} * @}
*/ */
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);
printk("Unexpected fault during test\n"); printk("Unexpected fault during test\n");

View file

@ -36,7 +36,7 @@ char kernel_string[BUF_SIZE];
char kernel_buf[BUF_SIZE]; char kernel_buf[BUF_SIZE];
ZTEST_BMEM char user_string[BUF_SIZE]; ZTEST_BMEM char user_string[BUF_SIZE];
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);
printk("Unexpected fault during test\n"); printk("Unexpected fault during test\n");

View file

@ -76,7 +76,7 @@ static void set_fault(unsigned int reason)
compiler_barrier(); compiler_barrier();
} }
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
INFO("Caught system error -- reason %d\n", reason); INFO("Caught system error -- reason %d\n", reason);

View file

@ -38,7 +38,7 @@ extern struct k_sem offload_sem;
/* A call back function which is hooked in default assert handler. */ /* A call back function which is hooked in default assert handler. */
void ztest_post_fatal_error_hook(unsigned int reason, void ztest_post_fatal_error_hook(unsigned int reason,
const z_arch_esf_t *pEsf) const struct arch_esf *pEsf)
{ {
/* check if expected error */ /* check if expected error */

View file

@ -674,7 +674,7 @@ void pipe_put_get_timeout(void)
/******************************************************************************/ /******************************************************************************/
ZTEST_BMEM bool valid_fault; ZTEST_BMEM bool valid_fault;
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
printk("Caught system error -- reason %d\n", reason); printk("Caught system error -- reason %d\n", reason);
if (valid_fault) { if (valid_fault) {

View file

@ -757,7 +757,7 @@ ZTEST(smp, test_smp_ipi)
} }
#endif #endif
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
{ {
static int trigger; static int trigger;

View file

@ -16,7 +16,7 @@ static K_SEM_DEFINE(end_sem, 0, 1);
static ZTEST_BMEM struct k_thread *dyn_thread; static ZTEST_BMEM struct k_thread *dyn_thread;
static struct k_thread *dynamic_threads[CONFIG_MAX_THREAD_BYTES * 8]; static struct k_thread *dynamic_threads[CONFIG_MAX_THREAD_BYTES * 8];
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf)
{ {
if (reason != K_ERR_KERNEL_OOPS) { if (reason != K_ERR_KERNEL_OOPS) {
printk("wrong error reason\n"); printk("wrong error reason\n");

View file

@ -179,7 +179,7 @@ static void set_fault(unsigned int reason)
compiler_barrier(); compiler_barrier();
} }
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
{ {
if (expect_fault) { if (expect_fault) {
if (expected_reason == reason) { if (expected_reason == reason) {

Some files were not shown because too many files have changed in this diff Show more