kernel: renamespace z_is_in_isr()

This is part of the core kernel -> architecture interface
and is appropriately renamed z_arch_is_in_isr().

References from test cases changed to k_is_in_isr().

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-09-21 15:58:15 -07:00 committed by Anas Nashif
commit e1ec59f9c2
22 changed files with 31 additions and 31 deletions

View file

@ -66,7 +66,7 @@ static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
return irq_num;
}
#define z_is_in_isr z_arc_v2_irq_unit_is_in_isr
#define z_arch_is_in_isr z_arc_v2_irq_unit_is_in_isr
extern void z_thread_entry_wrapper(void);
extern void z_user_thread_entry_wrapper(void);

View file

@ -352,7 +352,7 @@ int z_arch_float_disable(struct k_thread *thread)
return -EINVAL;
}
if (z_is_in_isr()) {
if (z_arch_is_in_isr()) {
return -EINVAL;
}

View file

@ -143,7 +143,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
extern void k_cpu_atomic_idle(unsigned int key);
#define z_is_in_isr() z_IsInIsr()
#define z_arch_is_in_isr() z_IsInIsr()
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,

View file

@ -44,7 +44,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const z_arch_esf_t *esf);
#define z_is_in_isr() (_kernel.nested != 0U)
#define z_arch_is_in_isr() (_kernel.nested != 0U)
#ifdef CONFIG_IRQ_OFFLOAD
void z_irq_do_offload(void);

View file

@ -53,7 +53,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
}
#endif
#define z_is_in_isr() (_kernel.nested != 0U)
#define z_arch_is_in_isr() (_kernel.nested != 0U)
#endif /* _ASMLANGUAGE */

View file

@ -40,7 +40,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf);
#define z_is_in_isr() (_kernel.nested != 0U)
#define z_arch_is_in_isr() (_kernel.nested != 0U)
#ifdef CONFIG_IRQ_OFFLOAD
int z_irq_do_offload(void);

View file

@ -41,7 +41,7 @@ static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs)
{
u32_t start, end;
if (z_is_in_isr()) {
if (z_arch_is_in_isr()) {
/* We were servicing an interrupt */
start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
end = start + CONFIG_ISR_STACK_SIZE;

View file

@ -64,7 +64,7 @@ void z_arch_isr_direct_header(void)
sys_trace_isr_enter();
/* We're not going to unlock IRQs, but we still need to increment this
* so that z_is_in_isr() works
* so that z_arch_is_in_isr() works
*/
++_kernel.nested;
}

View file

@ -12,7 +12,7 @@
#include <ia32/kernel_arch_func.h>
#endif
#define z_is_in_isr() (_kernel.nested != 0U)
#define z_arch_is_in_isr() (_kernel.nested != 0U)
#ifndef _ASMLANGUAGE

View file

@ -78,7 +78,7 @@ static inline unsigned int z_arch_k_cycle_get_32(void)
#endif
}
#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0)
#define z_arch_is_in_isr() (z_arch_curr_cpu()->nested != 0)
static inline void z_arch_switch(void *switch_to, void **switched_from)
{

View file

@ -89,7 +89,7 @@ extern void k_cpu_atomic_idle(unsigned int key);
}
#endif
#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0U)
#define z_arch_is_in_isr() (z_arch_curr_cpu()->nested != 0U)
#endif /* _ASMLANGUAGE */

View file

@ -255,7 +255,7 @@ static inline void _ready_one_thread(_wait_q_t *wq)
static inline void z_sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1, "");
--_current->base.sched_locked;
@ -270,7 +270,7 @@ static inline void z_sched_lock(void)
static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 0, "");
compiler_barrier();

View file

@ -52,7 +52,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
int ret;
s64_t end = 0;
__ASSERT(!(z_is_in_isr() && timeout != K_NO_WAIT), "");
__ASSERT(!(z_arch_is_in_isr() && timeout != K_NO_WAIT), "");
if (timeout > 0) {
end = k_uptime_get() + timeout;

View file

@ -110,7 +110,7 @@ void k_msgq_cleanup(struct k_msgq *msgq)
int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
{
__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
__ASSERT(!z_arch_is_in_isr() || timeout == K_NO_WAIT, "");
struct k_thread *pending_thread;
k_spinlock_key_t key;
@ -185,7 +185,7 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *q,
int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
{
__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
__ASSERT(!z_arch_is_in_isr() || timeout == K_NO_WAIT, "");
k_spinlock_key_t key;
struct k_thread *pending_thread;

View file

@ -190,7 +190,7 @@ static inline void set_event_ready(struct k_poll_event *event, u32_t state)
int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
{
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
__ASSERT(events != NULL, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n");

View file

@ -531,7 +531,7 @@ static inline int resched(u32_t key)
_current_cpu->swap_ok = 0;
#endif
return z_arch_irq_unlocked(key) && !z_is_in_isr();
return z_arch_irq_unlocked(key) && !z_arch_is_in_isr();
}
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
@ -563,7 +563,7 @@ void k_sched_unlock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(_current->base.sched_locked != 0, "");
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
LOCKED(&sched_spinlock) {
++_current->base.sched_locked;
@ -855,7 +855,7 @@ void z_impl_k_thread_priority_set(k_tid_t tid, int prio)
* keep track of it) and idle cannot change its priority.
*/
Z_ASSERT_VALID_PRIO(prio, NULL);
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
struct k_thread *thread = (struct k_thread *)tid;
@ -909,7 +909,7 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
void z_impl_k_yield(void)
{
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
if (!is_idle(_current)) {
LOCKED(&sched_spinlock) {
@ -939,7 +939,7 @@ static s32_t z_tick_sleep(s32_t ticks)
#ifdef CONFIG_MULTITHREADING
u32_t expected_wakeup_time;
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
K_DEBUG("thread %p for %d ticks\n", _current, ticks);
@ -1026,7 +1026,7 @@ void z_impl_k_wakeup(k_tid_t thread)
z_mark_thread_as_not_suspended(thread);
z_ready_thread(thread);
if (!z_is_in_isr()) {
if (!z_arch_is_in_isr()) {
z_reschedule_unlocked();
}
@ -1113,7 +1113,7 @@ static inline k_tid_t z_vrfy_k_current_get(void)
int z_impl_k_is_preempt_thread(void)
{
return !z_is_in_isr() && is_preempt(_current);
return !z_arch_is_in_isr() && is_preempt(_current);
}
#ifdef CONFIG_USERSPACE

View file

@ -138,7 +138,7 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem)
int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
{
__ASSERT(((z_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
__ASSERT(((z_arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
k_spinlock_key_t key = k_spin_lock(&lock);

View file

@ -61,7 +61,7 @@ void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
bool k_is_in_isr(void)
{
return z_is_in_isr();
return z_arch_is_in_isr();
}
/*
@ -531,7 +531,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay)
{
__ASSERT(!z_is_in_isr(), "Threads may not be created in ISRs");
__ASSERT(!z_arch_is_in_isr(), "Threads may not be created in ISRs");
/* Special case, only for unit tests */
#if defined(CONFIG_TEST) && defined(CONFIG_ARCH_HAS_USERSPACE) && !defined(CONFIG_USERSPACE)

View file

@ -43,7 +43,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
z_thread_single_abort(thread);
z_thread_monitor_exit(thread);
if (thread == _current && !z_is_in_isr()) {
if (thread == _current && !z_arch_is_in_isr()) {
z_swap(&lock, key);
} else {
/* Really, there's no good reason for this to be a

View file

@ -184,7 +184,7 @@ static inline u32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
u32_t z_impl_k_timer_status_sync(struct k_timer *timer)
{
__ASSERT(!z_is_in_isr(), "");
__ASSERT(!z_arch_is_in_isr(), "");
k_spinlock_key_t key = k_spin_lock(&lock);
u32_t result = timer->status;

View file

@ -25,7 +25,7 @@ static void offload_function(void *param)
u32_t x = POINTER_TO_INT(param);
/* Make sure we're in IRQ context */
zassert_true(z_is_in_isr(), "Not in IRQ context!");
zassert_true(k_is_in_isr(), "Not in IRQ context!");
sentinel = x;
}

View file

@ -128,7 +128,7 @@ static void offload_function(void *param)
{
ARG_UNUSED(param);
zassert_true(z_is_in_isr(), "Not in IRQ context!");
zassert_true(k_is_in_isr(), "Not in IRQ context!");
k_timer_init(&timer, timer_handler, NULL);
k_busy_wait(MS_TO_US(1));
k_timer_start(&timer, DURATION, 0);