arch/*: Add z_arch_irq_unlocked() predicate and test
It's useful to be able to inspect the key returned from z_arch_irq_unlock() to see if interrupts were enabled at the point where z_arch_irq_lock() was called. Architectures tend to represent this is a simple way that doesn't require platform assembly to inspect. Adds a simple test to kernel/common that validates this predicate with a nested lock. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
76d9d7806d
commit
e6af0f8caa
9 changed files with 97 additions and 0 deletions
|
@ -37,6 +37,15 @@ void posix_irq_full_unlock(void);
|
|||
int posix_get_current_irq(void);
|
||||
/* irq_offload() from irq_offload.h must also be defined by the SOC or board */
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static inline bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return key == false;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -42,6 +42,15 @@ static inline void z_arch_irq_unlock(unsigned int key)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static inline bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return (key & 0x200) != 0;
|
||||
}
|
||||
|
||||
static inline void arch_nop(void)
|
||||
{
|
||||
__asm__ volatile("nop");
|
||||
|
|
|
@ -127,6 +127,19 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
__asm__ volatile("seti %0" : : "ir"(key) : "memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* ARC irq lock uses instruction "clri r0",
|
||||
* r0 == {26’d0, 1’b1, STATUS32.IE, STATUS32.E[3:0] }
|
||||
* bit4 is used to record IE (Interrupt Enable) bit
|
||||
*/
|
||||
return (key & 0x10) == 0x10;
|
||||
}
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -184,6 +184,15 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* This convention works for both PRIMASK and BASEPRI */
|
||||
return key == 0;
|
||||
}
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
|
|
|
@ -124,6 +124,15 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return key & 1;
|
||||
}
|
||||
|
||||
void z_arch_irq_enable(unsigned int irq);
|
||||
void z_arch_irq_disable(unsigned int irq);
|
||||
|
||||
|
|
|
@ -115,6 +115,22 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
/* FIXME: looking at z_arch_irq_lock, this should be reducable
|
||||
* to just testing that key is nonzero (because it should only
|
||||
* have the single bit set). But there is a mask applied to
|
||||
* the argument in z_arch_irq_unlock() that has me worried
|
||||
* that something elseswhere might try to set a bit? Do it
|
||||
* the safe way for now.
|
||||
*/
|
||||
return (key & SOC_MSTATUS_IEN) == SOC_MSTATUS_IEN;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Explicitly nop operation.
|
||||
*/
|
||||
|
|
|
@ -441,6 +441,15 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
z_do_irq_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return (key & 0x200) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Explicitly nop operation.
|
||||
*/
|
||||
|
|
|
@ -73,6 +73,15 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
|||
XTOS_RESTORE_INTLEVEL(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if interrupts were unlocked prior to the
|
||||
* z_arch_irq_lock() call that produced the key argument.
|
||||
*/
|
||||
static ALWAYS_INLINE bool z_arch_irq_unlocked(unsigned int key)
|
||||
{
|
||||
return (key & 0xf) == 0; /* INTLEVEL field */
|
||||
}
|
||||
|
||||
#include <irq.h>
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_IRQ_H_ */
|
||||
|
|
|
@ -40,6 +40,20 @@ static void offload_function(void *param)
|
|||
*/
|
||||
void test_irq_offload(void)
|
||||
{
|
||||
/* Simple validation of nested locking. */
|
||||
unsigned int key1, key2;
|
||||
|
||||
key1 = z_arch_irq_lock();
|
||||
zassert_true(z_arch_irq_unlocked(key1),
|
||||
"IRQs should have been unlocked, but key is 0x%x\n",
|
||||
key1);
|
||||
key2 = z_arch_irq_lock();
|
||||
zassert_false(z_arch_irq_unlocked(key2),
|
||||
"IRQs should have been locked, but key is 0x%x\n",
|
||||
key2);
|
||||
z_arch_irq_unlock(key2);
|
||||
z_arch_irq_unlock(key1);
|
||||
|
||||
/**TESTPOINT: Offload to IRQ context*/
|
||||
irq_offload(offload_function, (void *)SENTINEL_VALUE);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue