irq: do not use _inline versions of irq_lock/unlock

Standardize on using the irq_lock/irq_unlock (non-inline) symbols
everywhere.

The non-inline versions provide absolutely no benefits, so they will be
removed in a subsequent commit, and the inline versions will have their
_inline suffix removed.

Change-Id: Ib0b55f450447366468723e065a60adbadf7067a9
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2015-08-12 17:53:54 -04:00 committed by Anas Nashif
commit 6469e578cb
19 changed files with 124 additions and 185 deletions

View file

@ -84,10 +84,10 @@ static ALWAYS_INLINE void context_monitor_init(struct ccs *pCcs /* context */
* runnable. * runnable.
*/ */
key = irq_lock_inline(); key = irq_lock();
pCcs->next_context = _nanokernel.contexts; pCcs->next_context = _nanokernel.contexts;
_nanokernel.contexts = pCcs; _nanokernel.contexts = pCcs;
irq_unlock_inline(key); irq_unlock(key);
} }
#endif /* CONFIG_CONTEXT_MONITOR */ #endif /* CONFIG_CONTEXT_MONITOR */

View file

@ -72,7 +72,7 @@ void _irq_handler_set(
void *arg void *arg
) )
{ {
int key = irq_lock_inline(); int key = irq_lock();
int index = irq - 16; int index = irq - 16;
__ASSERT(old == _sw_isr_table[index].isr, __ASSERT(old == _sw_isr_table[index].isr,
@ -83,7 +83,7 @@ void _irq_handler_set(
_sw_isr_table[index].arg = arg; _sw_isr_table[index].arg = arg;
} }
irq_unlock_inline(key); irq_unlock(key);
} }
/* /*
@ -98,10 +98,10 @@ void _irq_handler_set(
void irq_enable(unsigned int irq) void irq_enable(unsigned int irq)
{ {
int key = irq_lock_inline(); int key = irq_lock();
_arc_v2_irq_unit_int_enable(irq); _arc_v2_irq_unit_int_enable(irq);
irq_unlock_inline(key); irq_unlock(key);
} }
/* /*
@ -115,10 +115,10 @@ void irq_enable(unsigned int irq)
void irq_disable(unsigned int irq) void irq_disable(unsigned int irq)
{ {
int key = irq_lock_inline(); int key = irq_lock();
_arc_v2_irq_unit_int_disable(irq); _arc_v2_irq_unit_int_disable(irq);
irq_unlock_inline(key); irq_unlock(key);
} }
/* /*
@ -140,12 +140,12 @@ void _irq_priority_set(
unsigned int prio unsigned int prio
) )
{ {
int key = irq_lock_inline(); int key = irq_lock();
__ASSERT(prio >= 0 && prio < CONFIG_NUM_IRQ_PRIORITIES, __ASSERT(prio >= 0 && prio < CONFIG_NUM_IRQ_PRIORITIES,
"invalid priority!"); "invalid priority!");
_arc_v2_irq_unit_prio_set(irq, prio); _arc_v2_irq_unit_prio_set(irq, prio);
irq_unlock_inline(key); irq_unlock(key);
} }
/* /*

View file

@ -65,7 +65,7 @@ void _irq_handler_set(unsigned int irq,
void (*new)(void *arg), void (*new)(void *arg),
void *arg) void *arg)
{ {
int key = irq_lock_inline(); int key = irq_lock();
__ASSERT(old == _sw_isr_table[irq].isr, "expected ISR not found in table"); __ASSERT(old == _sw_isr_table[irq].isr, "expected ISR not found in table");
@ -74,7 +74,7 @@ void _irq_handler_set(unsigned int irq,
_sw_isr_table[irq].arg = arg; _sw_isr_table[irq].arg = arg;
} }
irq_unlock_inline(key); irq_unlock(key);
} }
/** /**

View file

@ -77,14 +77,14 @@ int atomic_cas(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* temporary storage */ atomic_val_t ovalue; /* temporary storage */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
if (ovalue != oldValue) { if (ovalue != oldValue) {
irq_unlock_inline(key); irq_unlock(key);
return 0; return 0;
} }
*target = newValue; *target = newValue;
irq_unlock_inline(key); irq_unlock(key);
return 1; return 1;
} }
@ -107,10 +107,10 @@ atomic_val_t atomic_add(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue + value; *target = ovalue + value;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -133,10 +133,10 @@ atomic_val_t atomic_sub(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue - value; *target = ovalue - value;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -157,10 +157,10 @@ atomic_val_t atomic_inc(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* value from <target> before the increment */ atomic_val_t ovalue; /* value from <target> before the increment */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue + 1; *target = ovalue + 1;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -181,10 +181,10 @@ atomic_val_t atomic_dec(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* value from <target> prior to the decrement */ atomic_val_t ovalue; /* value from <target> prior to the decrement */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue - 1; *target = ovalue - 1;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -223,10 +223,10 @@ atomic_val_t atomic_set(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = value; *target = value;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -248,10 +248,10 @@ atomic_val_t atomic_clear(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = 0; *target = 0;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -274,10 +274,10 @@ atomic_val_t atomic_or(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue | value; *target = ovalue | value;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -300,10 +300,10 @@ atomic_val_t atomic_xor(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue ^ value; *target = ovalue ^ value;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -326,10 +326,10 @@ atomic_val_t atomic_and(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ovalue & value; *target = ovalue & value;
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }
@ -352,10 +352,10 @@ atomic_val_t atomic_nand(
int key; /* interrupt lock level */ int key; /* interrupt lock level */
atomic_val_t ovalue; /* previous value from <target> */ atomic_val_t ovalue; /* previous value from <target> */
key = irq_lock_inline(); key = irq_lock();
ovalue = *target; ovalue = *target;
*target = ~(ovalue & value); *target = ~(ovalue & value);
irq_unlock_inline(key); irq_unlock(key);
return ovalue; return ovalue;
} }

View file

@ -191,7 +191,7 @@ void _FpEnable(tCCS *ccs,
/* Lock interrupts to prevent a pre-emptive context switch from occuring /* Lock interrupts to prevent a pre-emptive context switch from occuring
*/ */
imask = irq_lock_inline(); imask = irq_lock();
/* Indicate task/fiber requires non-integer context saving */ /* Indicate task/fiber requires non-integer context saving */
@ -284,7 +284,7 @@ void _FpEnable(tCCS *ccs,
} }
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
/** /**
@ -352,7 +352,7 @@ void _FpDisable(tCCS *ccs)
/* Lock interrupts to prevent a pre-emptive context switch from occuring /* Lock interrupts to prevent a pre-emptive context switch from occuring
*/ */
imask = irq_lock_inline(); imask = irq_lock();
/* /*
* Disable _all_ floating point capabilities for the task/fiber, * Disable _all_ floating point capabilities for the task/fiber,
@ -373,7 +373,7 @@ void _FpDisable(tCCS *ccs)
_nanokernel.current_fp = (tCCS *)0; _nanokernel.current_fp = (tCCS *)0;
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
/** /**

View file

@ -83,12 +83,12 @@ cmdPkt_t *_cmd_pkt_get(
uint32_t index; /* index into command packet array */ uint32_t index; /* index into command packet array */
int key; /* interrupt lock level */ int key; /* interrupt lock level */
key = irq_lock_inline(); key = irq_lock();
index = pSet->index; index = pSet->index;
pSet->index++; pSet->index++;
if (pSet->index >= pSet->nPkts) if (pSet->index >= pSet->nPkts)
pSet->index = 0; pSet->index = 0;
irq_unlock_inline(key); irq_unlock(key);
return &pSet->cmdPkt[index]; return &pSet->cmdPkt[index];
} }

View file

@ -415,7 +415,7 @@ static void _power_save(void)
if (_sys_power_save_flag) { if (_sys_power_save_flag) {
for (;;) { for (;;) {
irq_lock_inline(); irq_lock();
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT #ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
_sys_power_save_idle(_get_next_timer_expiry()); _sys_power_save_idle(_get_next_timer_expiry());
#else #else

View file

@ -117,10 +117,10 @@ int32_t task_tick_get_32(void)
int64_t task_tick_get(void) int64_t task_tick_get(void)
{ {
int64_t ticks; int64_t ticks;
int key = irq_lock_inline(); int key = irq_lock();
ticks = _k_sys_clock_tick_count; ticks = _k_sys_clock_tick_count;
irq_unlock_inline(key); irq_unlock(key);
return ticks; return ticks;
} }
@ -136,10 +136,10 @@ int64_t task_tick_get(void)
static void sys_clock_increment(int inc) static void sys_clock_increment(int inc)
{ {
int key = irq_lock_inline(); int key = irq_lock();
_k_sys_clock_tick_count += inc; _k_sys_clock_tick_count += inc;
irq_unlock_inline(key); irq_unlock(key);
} }
/** /**

View file

@ -57,7 +57,7 @@ static void event_logger_put(struct event_logger *logger, uint16_t event_id,
int i; int i;
/* Lock interrupt to be sure this function will be atomic */ /* Lock interrupt to be sure this function will be atomic */
key = irq_lock_inline(); key = irq_lock();
buffer_capacity_used = (logger->head - logger->tail + buffer_capacity_used = (logger->head - logger->tail +
logger->buffer_size) % logger->buffer_size; logger->buffer_size) % logger->buffer_size;
@ -90,7 +90,7 @@ static void event_logger_put(struct event_logger *logger, uint16_t event_id,
sem_give_fn(&(logger->sync_sema)); sem_give_fn(&(logger->sync_sema));
} }
irq_unlock_inline(key); irq_unlock(key);
} }

View file

@ -125,7 +125,7 @@ void _fiber_start(char *pStack,
void fiber_yield(void) void fiber_yield(void)
{ {
unsigned int imask = irq_lock_inline(); unsigned int imask = irq_lock();
if ((_nanokernel.fiber != (tCCS *)NULL) && if ((_nanokernel.fiber != (tCCS *)NULL) &&
(_nanokernel.current->prio >= _nanokernel.fiber->prio)) { (_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
@ -138,7 +138,7 @@ void fiber_yield(void)
_nano_fiber_schedule(_nanokernel.current); _nano_fiber_schedule(_nanokernel.current);
_Swap(imask); _Swap(imask);
} else } else
irq_unlock_inline(imask); irq_unlock(imask);
} }
/** /**
@ -197,7 +197,7 @@ void fiber_sleep(int32_t timeout_in_ticks)
return; return;
} }
key = irq_lock_inline(); key = irq_lock();
_nano_timeout_add(_nanokernel.current, NULL, timeout_in_ticks); _nano_timeout_add(_nanokernel.current, NULL, timeout_in_ticks);
_Swap(key); _Swap(key);
} }
@ -217,11 +217,11 @@ void *fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes,
_NewContext(stack, stack_size_in_bytes, (_ContextEntry)entry_point, _NewContext(stack, stack_size_in_bytes, (_ContextEntry)entry_point,
(void *)param1, (void *)param2, (void *)0, priority, options); (void *)param1, (void *)param2, (void *)0, priority, options);
key = irq_lock_inline(); key = irq_lock();
_nano_timeout_add(ccs, NULL, timeout_in_ticks); _nano_timeout_add(ccs, NULL, timeout_in_ticks);
irq_unlock_inline(key); irq_unlock(key);
return ccs; return ccs;
} }
@ -230,11 +230,11 @@ FUNC_ALIAS(fiber_delayed_start_cancel, task_fiber_delayed_start_cancel, void);
void fiber_delayed_start_cancel(void *handle) void fiber_delayed_start_cancel(void *handle)
{ {
int key = irq_lock_inline(); int key = irq_lock();
_nano_timeout_abort((struct ccs *)handle); _nano_timeout_abort((struct ccs *)handle);
irq_unlock_inline(key); irq_unlock(key);
} }
#endif /* CONFIG_NANO_TIMEOUTS */ #endif /* CONFIG_NANO_TIMEOUTS */

View file

@ -122,7 +122,7 @@ void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
{ {
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
fifo->stat++; fifo->stat++;
if (fifo->stat <= 0) { if (fifo->stat <= 0) {
@ -133,14 +133,14 @@ void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
enqueue_data(fifo, data); enqueue_data(fifo, data);
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
void nano_task_fifo_put( struct nano_fifo *fifo, void *data) void nano_task_fifo_put( struct nano_fifo *fifo, void *data)
{ {
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
fifo->stat++; fifo->stat++;
if (fifo->stat <= 0) { if (fifo->stat <= 0) {
@ -153,7 +153,7 @@ void nano_task_fifo_put( struct nano_fifo *fifo, void *data)
enqueue_data(fifo, data); enqueue_data(fifo, data);
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
@ -207,13 +207,13 @@ void *_fifo_get(struct nano_fifo *fifo)
void *data = NULL; void *data = NULL;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
if (fifo->stat > 0) { if (fifo->stat > 0) {
fifo->stat--; fifo->stat--;
data = dequeue_data(fifo); data = dequeue_data(fifo);
} }
irq_unlock_inline(imask); irq_unlock(imask);
return data; return data;
} }
@ -222,7 +222,7 @@ void *nano_fiber_fifo_get_wait( struct nano_fifo *fifo)
void *data; void *data;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
fifo->stat--; fifo->stat--;
if (fifo->stat < 0) { if (fifo->stat < 0) {
@ -230,7 +230,7 @@ void *nano_fiber_fifo_get_wait( struct nano_fifo *fifo)
data = (void *)_Swap(imask); data = (void *)_Swap(imask);
} else { } else {
data = dequeue_data(fifo); data = dequeue_data(fifo);
irq_unlock_inline(imask); irq_unlock(imask);
} }
return data; return data;
@ -244,7 +244,7 @@ void *nano_task_fifo_get_wait( struct nano_fifo *fifo)
/* spin until data is put onto the FIFO */ /* spin until data is put onto the FIFO */
while (1) { while (1) {
imask = irq_lock_inline(); imask = irq_lock();
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the loop.
@ -261,7 +261,7 @@ void *nano_task_fifo_get_wait( struct nano_fifo *fifo)
fifo->stat--; fifo->stat--;
data = dequeue_data(fifo); data = dequeue_data(fifo);
irq_unlock_inline(imask); irq_unlock(imask);
return data; return data;
} }
@ -291,7 +291,7 @@ void *nano_fiber_fifo_get_wait_timeout(struct nano_fifo *fifo,
return nano_fiber_fifo_get(fifo); return nano_fiber_fifo_get(fifo);
} }
key = irq_lock_inline(); key = irq_lock();
fifo->stat--; fifo->stat--;
if (fifo->stat < 0) { if (fifo->stat < 0) {
@ -300,7 +300,7 @@ void *nano_fiber_fifo_get_wait_timeout(struct nano_fifo *fifo,
data = (void *)_Swap(key); data = (void *)_Swap(key);
} else { } else {
data = dequeue_data(fifo); data = dequeue_data(fifo);
irq_unlock_inline(key); irq_unlock(key);
} }
return data; return data;
@ -321,7 +321,7 @@ void *nano_task_fifo_get_wait_timeout(struct nano_fifo *fifo,
return nano_task_fifo_get(fifo); return nano_task_fifo_get(fifo);
} }
key = irq_lock_inline(); key = irq_lock();
cur_ticks = nano_tick_get(); cur_ticks = nano_tick_get();
limit = cur_ticks + timeout_in_ticks; limit = cur_ticks + timeout_in_ticks;
@ -335,7 +335,7 @@ void *nano_task_fifo_get_wait_timeout(struct nano_fifo *fifo,
if (likely(fifo->stat > 0)) { if (likely(fifo->stat > 0)) {
fifo->stat--; fifo->stat--;
data = dequeue_data(fifo); data = dequeue_data(fifo);
irq_unlock_inline(key); irq_unlock(key);
return data; return data;
} }
@ -343,11 +343,11 @@ void *nano_task_fifo_get_wait_timeout(struct nano_fifo *fifo,
nano_cpu_atomic_idle(key); nano_cpu_atomic_idle(key);
key = irq_lock_inline(); key = irq_lock();
cur_ticks = nano_tick_get(); cur_ticks = nano_tick_get();
} }
irq_unlock_inline(key); irq_unlock(key);
return NULL; return NULL;
} }
#endif /* CONFIG_NANO_TIMEOUTS */ #endif /* CONFIG_NANO_TIMEOUTS */

View file

@ -79,7 +79,7 @@ void _lifo_put_non_preemptible(struct nano_lifo *lifo, void *data)
tCCS *ccs; tCCS *ccs;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
ccs = _nano_wait_q_remove(&lifo->wait_q); ccs = _nano_wait_q_remove(&lifo->wait_q);
if (ccs) { if (ccs) {
_nano_timeout_abort(ccs); _nano_timeout_abort(ccs);
@ -89,7 +89,7 @@ void _lifo_put_non_preemptible(struct nano_lifo *lifo, void *data)
lifo->list = data; lifo->list = data;
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
void nano_task_lifo_put(struct nano_lifo *lifo, void *data) void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
@ -97,7 +97,7 @@ void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
tCCS *ccs; tCCS *ccs;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
ccs = _nano_wait_q_remove(&lifo->wait_q); ccs = _nano_wait_q_remove(&lifo->wait_q);
if (ccs) { if (ccs) {
_nano_timeout_abort(ccs); _nano_timeout_abort(ccs);
@ -109,7 +109,7 @@ void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
lifo->list = data; lifo->list = data;
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
FUNC_ALIAS(_lifo_get, nano_isr_lifo_get, void *); FUNC_ALIAS(_lifo_get, nano_isr_lifo_get, void *);
@ -129,14 +129,14 @@ void *_lifo_get(struct nano_lifo *lifo)
void *data; void *data;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
data = lifo->list; data = lifo->list;
if (data) { if (data) {
lifo->list = *(void **) data; lifo->list = *(void **) data;
} }
irq_unlock_inline(imask); irq_unlock(imask);
return data; return data;
} }
@ -152,7 +152,7 @@ void *nano_fiber_lifo_get_wait(struct nano_lifo *lifo )
void *data; void *data;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
if (!lifo->list) { if (!lifo->list) {
_nano_wait_q_put(&lifo->wait_q); _nano_wait_q_put(&lifo->wait_q);
@ -160,7 +160,7 @@ void *nano_fiber_lifo_get_wait(struct nano_lifo *lifo )
} else { } else {
data = lifo->list; data = lifo->list;
lifo->list = *(void **) data; lifo->list = *(void **) data;
irq_unlock_inline(imask); irq_unlock(imask);
} }
return data; return data;
@ -174,7 +174,7 @@ void *nano_task_lifo_get_wait(struct nano_lifo *lifo)
/* spin until data is put onto the LIFO */ /* spin until data is put onto the LIFO */
while (1) { while (1) {
imask = irq_lock_inline(); imask = irq_lock();
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the loop.
@ -192,7 +192,7 @@ void *nano_task_lifo_get_wait(struct nano_lifo *lifo)
data = lifo->list; data = lifo->list;
lifo->list = *(void **) data; lifo->list = *(void **) data;
irq_unlock_inline(imask); irq_unlock(imask);
return data; return data;
} }
@ -227,12 +227,12 @@ void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo)
void *nano_fiber_lifo_get_wait_timeout(struct nano_lifo *lifo, void *nano_fiber_lifo_get_wait_timeout(struct nano_lifo *lifo,
int32_t timeout_in_ticks) int32_t timeout_in_ticks)
{ {
unsigned int key = irq_lock_inline(); unsigned int key = irq_lock();
void *data; void *data;
if (!lifo->list) { if (!lifo->list) {
if (unlikely(TICKS_NONE == timeout_in_ticks)) { if (unlikely(TICKS_NONE == timeout_in_ticks)) {
irq_unlock_inline(key); irq_unlock(key);
return NULL; return NULL;
} }
if (likely(timeout_in_ticks != TICKS_UNLIMITED)) { if (likely(timeout_in_ticks != TICKS_UNLIMITED)) {
@ -244,7 +244,7 @@ void *nano_fiber_lifo_get_wait_timeout(struct nano_lifo *lifo,
} else { } else {
data = lifo->list; data = lifo->list;
lifo->list = *(void **)data; lifo->list = *(void **)data;
irq_unlock_inline(key); irq_unlock(key);
} }
return data; return data;
@ -265,7 +265,7 @@ void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
return nano_task_lifo_get(lifo); return nano_task_lifo_get(lifo);
} }
key = irq_lock_inline(); key = irq_lock();
cur_ticks = nano_tick_get(); cur_ticks = nano_tick_get();
limit = cur_ticks + timeout_in_ticks; limit = cur_ticks + timeout_in_ticks;
@ -279,7 +279,7 @@ void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
if (likely(lifo->list)) { if (likely(lifo->list)) {
data = lifo->list; data = lifo->list;
lifo->list = *(void **)data; lifo->list = *(void **)data;
irq_unlock_inline(key); irq_unlock(key);
return data; return data;
} }
@ -287,11 +287,11 @@ void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
nano_cpu_atomic_idle(key); nano_cpu_atomic_idle(key);
key = irq_lock_inline(); key = irq_lock();
cur_ticks = nano_tick_get(); cur_ticks = nano_tick_get();
} }
irq_unlock_inline(key); irq_unlock(key);
return NULL; return NULL;
} }
#endif /* CONFIG_NANO_TIMEOUTS */ #endif /* CONFIG_NANO_TIMEOUTS */

View file

@ -92,7 +92,7 @@ void _sem_give_non_preemptible(struct nano_sem *sem)
tCCS *ccs; tCCS *ccs;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
ccs = _nano_wait_q_remove(&sem->wait_q); ccs = _nano_wait_q_remove(&sem->wait_q);
if (!ccs) { if (!ccs) {
sem->nsig++; sem->nsig++;
@ -101,7 +101,7 @@ void _sem_give_non_preemptible(struct nano_sem *sem)
set_sem_available(ccs); set_sem_available(ccs);
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
void nano_task_sem_give(struct nano_sem *sem) void nano_task_sem_give(struct nano_sem *sem)
@ -109,7 +109,7 @@ void nano_task_sem_give(struct nano_sem *sem)
tCCS *ccs; tCCS *ccs;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
ccs = _nano_wait_q_remove(&sem->wait_q); ccs = _nano_wait_q_remove(&sem->wait_q);
if (ccs) { if (ccs) {
_nano_timeout_abort(ccs); _nano_timeout_abort(ccs);
@ -120,7 +120,7 @@ void nano_task_sem_give(struct nano_sem *sem)
sem->nsig++; sem->nsig++;
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
void nano_sem_give(struct nano_sem *sem) void nano_sem_give(struct nano_sem *sem)
@ -142,10 +142,10 @@ int _sem_take(
unsigned int imask; unsigned int imask;
int avail; int avail;
imask = irq_lock_inline(); imask = irq_lock();
avail = (sem->nsig > 0); avail = (sem->nsig > 0);
sem->nsig -= avail; sem->nsig -= avail;
irq_unlock_inline(imask); irq_unlock(imask);
return avail; return avail;
} }
@ -160,13 +160,13 @@ void nano_fiber_sem_take_wait(struct nano_sem *sem)
{ {
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
if (sem->nsig == 0) { if (sem->nsig == 0) {
_nano_wait_q_put(&sem->wait_q); _nano_wait_q_put(&sem->wait_q);
_Swap(imask); _Swap(imask);
} else { } else {
sem->nsig--; sem->nsig--;
irq_unlock_inline(imask); irq_unlock(imask);
} }
} }
@ -177,7 +177,7 @@ void nano_task_sem_take_wait(struct nano_sem *sem)
/* spin until the sempahore is signaled */ /* spin until the sempahore is signaled */
while (1) { while (1) {
imask = irq_lock_inline(); imask = irq_lock();
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the loop.
@ -193,7 +193,7 @@ void nano_task_sem_take_wait(struct nano_sem *sem)
} }
sem->nsig--; sem->nsig--;
irq_unlock_inline(imask); irq_unlock(imask);
} }
void nano_sem_take_wait(struct nano_sem *sem) void nano_sem_take_wait(struct nano_sem *sem)
@ -208,11 +208,11 @@ void nano_sem_take_wait(struct nano_sem *sem)
int nano_fiber_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_ticks) int nano_fiber_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_ticks)
{ {
unsigned int key = irq_lock_inline(); unsigned int key = irq_lock();
if (sem->nsig == 0) { if (sem->nsig == 0) {
if (unlikely(TICKS_NONE == timeout_in_ticks)) { if (unlikely(TICKS_NONE == timeout_in_ticks)) {
irq_unlock_inline(key); irq_unlock(key);
return 0; return 0;
} }
if (likely(timeout_in_ticks != TICKS_UNLIMITED)) { if (likely(timeout_in_ticks != TICKS_UNLIMITED)) {
@ -225,7 +225,7 @@ int nano_fiber_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_ti
sem->nsig--; sem->nsig--;
irq_unlock_inline(key); irq_unlock(key);
return 1; return 1;
} }
@ -244,7 +244,7 @@ int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_tic
return nano_task_sem_take(sem); return nano_task_sem_take(sem);
} }
key = irq_lock_inline(); key = irq_lock();
cur_ticks = nano_tick_get(); cur_ticks = nano_tick_get();
limit = cur_ticks + timeout_in_ticks; limit = cur_ticks + timeout_in_ticks;
@ -257,7 +257,7 @@ int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_tic
if (likely(sem->nsig > 0)) { if (likely(sem->nsig > 0)) {
sem->nsig--; sem->nsig--;
irq_unlock_inline(key); irq_unlock(key);
return 1; return 1;
} }
@ -265,11 +265,11 @@ int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_tic
nano_cpu_atomic_idle(key); nano_cpu_atomic_idle(key);
key = irq_lock_inline(); key = irq_lock();
cur_ticks = nano_tick_get(); cur_ticks = nano_tick_get();
} }
irq_unlock_inline(key); irq_unlock(key);
return 0; return 0;
} }

View file

@ -103,7 +103,7 @@ void _stack_push_non_preemptible(
tCCS *ccs; tCCS *ccs;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
ccs = stack->fiber; ccs = stack->fiber;
if (ccs) { if (ccs) {
@ -115,7 +115,7 @@ void _stack_push_non_preemptible(
stack->next++; stack->next++;
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
/** /**
@ -137,7 +137,7 @@ void nano_task_stack_push(
tCCS *ccs; tCCS *ccs;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
ccs = stack->fiber; ccs = stack->fiber;
if (ccs) { if (ccs) {
@ -151,7 +151,7 @@ void nano_task_stack_push(
stack->next++; stack->next++;
} }
irq_unlock_inline(imask); irq_unlock(imask);
} }
FUNC_ALIAS(_stack_pop, nano_isr_stack_pop, int); FUNC_ALIAS(_stack_pop, nano_isr_stack_pop, int);
@ -187,7 +187,7 @@ int _stack_pop(
unsigned int imask; unsigned int imask;
int rv = 0; int rv = 0;
imask = irq_lock_inline(); imask = irq_lock();
if (stack->next > stack->base) { if (stack->next > stack->base) {
stack->next--; stack->next--;
@ -195,7 +195,7 @@ int _stack_pop(
rv = 1; rv = 1;
} }
irq_unlock_inline(imask); irq_unlock(imask);
return rv; return rv;
} }
@ -224,7 +224,7 @@ uint32_t nano_fiber_stack_pop_wait(
uint32_t data; uint32_t data;
unsigned int imask; unsigned int imask;
imask = irq_lock_inline(); imask = irq_lock();
if (stack->next == stack->base) { if (stack->next == stack->base) {
stack->fiber = _nanokernel.current; stack->fiber = _nanokernel.current;
@ -232,7 +232,7 @@ uint32_t nano_fiber_stack_pop_wait(
} else { } else {
stack->next--; stack->next--;
data = *(stack->next); data = *(stack->next);
irq_unlock_inline(imask); irq_unlock(imask);
} }
return data; return data;
@ -261,7 +261,7 @@ uint32_t nano_task_stack_pop_wait(
/* spin until data is pushed onto the stack */ /* spin until data is pushed onto the stack */
while (1) { while (1) {
imask = irq_lock_inline(); imask = irq_lock();
/* /*
* Predict that the branch will be taken to break out of the loop. * Predict that the branch will be taken to break out of the loop.
@ -294,7 +294,7 @@ uint32_t nano_task_stack_pop_wait(
stack->next--; stack->next--;
data = *(stack->next); data = *(stack->next);
irq_unlock_inline(imask); irq_unlock(imask);
return data; return data;
} }

View file

@ -86,9 +86,9 @@ int64_t nano_tick_get(void)
* we have to lock the timer interrupt that causes change of * we have to lock the timer interrupt that causes change of
* _nano_ticks * _nano_ticks
*/ */
unsigned int imask = irq_lock_inline(); unsigned int imask = irq_lock();
tmp_nano_ticks = _nano_ticks; tmp_nano_ticks = _nano_ticks;
irq_unlock_inline(imask); irq_unlock(imask);
return tmp_nano_ticks; return tmp_nano_ticks;
} }
@ -145,9 +145,9 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
* we have to lock the timer interrupt that causes change of * we have to lock the timer interrupt that causes change of
* _nano_ticks * _nano_ticks
*/ */
unsigned int imask = irq_lock_inline(); unsigned int imask = irq_lock();
saved = _nano_ticks; saved = _nano_ticks;
irq_unlock_inline(imask); irq_unlock(imask);
delta = saved - (*reftime); delta = saved - (*reftime);
*reftime = saved; *reftime = saved;

View file

@ -105,7 +105,7 @@ void _timer_start(struct nano_timer *timer, /* timer to start */
timer->ticks = ticks; timer->ticks = ticks;
imask = irq_lock_inline(); imask = irq_lock();
cur = _nano_timer_list; cur = _nano_timer_list;
@ -124,7 +124,7 @@ void _timer_start(struct nano_timer *timer, /* timer to start */
else else
_nano_timer_list = timer; _nano_timer_list = timer;
irq_unlock_inline(imask); irq_unlock(imask);
} }
/** /**
@ -145,7 +145,7 @@ static void _timer_stop(struct nano_timer *timer /* timer to stop */
struct nano_timer *cur; struct nano_timer *cur;
struct nano_timer *prev = NULL; struct nano_timer *prev = NULL;
imask = irq_lock_inline(); imask = irq_lock();
cur = _nano_timer_list; cur = _nano_timer_list;
@ -173,7 +173,7 @@ static void _timer_stop(struct nano_timer *timer /* timer to stop */
/* now the timer can't expire since it is removed from the list */ /* now the timer can't expire since it is removed from the list */
irq_unlock_inline(imask); irq_unlock(imask);
} }
/** /**

View file

@ -64,8 +64,6 @@ int nanoIntLockUnlock(void)
unsigned int mask; unsigned int mask;
PRINT_FORMAT(" 5- Measure average time to lock then unlock interrupts"); PRINT_FORMAT(" 5- Measure average time to lock then unlock interrupts");
PRINT_FORMAT(" 5.1- When each lock and unlock is executed as a function"
" call");
bench_test_start(); bench_test_start();
timestamp = TIME_STAMP_DELTA_GET(0); timestamp = TIME_STAMP_DELTA_GET(0);
for (i = 0; i < NTESTS; i++) { for (i = 0; i < NTESTS; i++) {
@ -81,24 +79,5 @@ int nanoIntLockUnlock(void)
errorCount++; errorCount++;
PRINT_OVERFLOW_ERROR(); PRINT_OVERFLOW_ERROR();
} }
PRINT_FORMAT(" ");
PRINT_FORMAT(" 5.2- When each lock and unlock is executed as inline"
" function call");
bench_test_start();
timestamp = TIME_STAMP_DELTA_GET(0);
for (i = 0; i < NTESTS; i++) {
mask = irq_lock_inline();
irq_unlock_inline(mask);
}
timestamp = TIME_STAMP_DELTA_GET(timestamp);
if (bench_test_end() == 0) {
PRINT_FORMAT(" Average time for lock then unlock "
"is %lu tcs = %lu nsec",
timestamp / NTESTS, SYS_CLOCK_HW_CYCLES_TO_NS_AVG(timestamp, NTESTS));
} else {
errorCount++;
PRINT_OVERFLOW_ERROR();
}
return 0; return 0;
} }

View file

@ -37,11 +37,6 @@ irq_unlock
- Continuation irq_lock: unlock interrupts, loop and verify the tick - Continuation irq_lock: unlock interrupts, loop and verify the tick
count changes. count changes.
irq_lock_inline
irq_unlock_inline
- These two tests are tested in the same way as irq_lock()
and irq_unlock().
irq_connect irq_connect
- Used during nanokernel object initialization. Verified when triggering - Used during nanokernel object initialization. Verified when triggering
an ISR to perform ISR context work. an ISR to perform ISR context work.

View file

@ -36,7 +36,6 @@ This module tests the following CPU and context related routines:
fiber_fiber_start(), task_fiber_start(), fiber_yield(), fiber_fiber_start(), task_fiber_start(), fiber_yield(),
context_self_get(), context_type_get(), nano_cpu_idle(), context_self_get(), context_type_get(), nano_cpu_idle(),
irq_lock(), irq_unlock(), irq_lock(), irq_unlock(),
irq_lock_inline(), irq_unlock_inline(),
irq_connect(), nanoCpuExcConnect(), irq_connect(), nanoCpuExcConnect(),
irq_enable(), irq_disable(), irq_enable(), irq_disable(),
*/ */
@ -255,32 +254,6 @@ void irq_unlockWrapper(int imask)
irq_unlock(imask); irq_unlock(imask);
} }
/**
*
* @brief A wrapper for irq_lock_inline()
*
* @return irq_lock_inline() return value
*/
int irq_lock_inlineWrapper(int unused)
{
ARG_UNUSED(unused);
return irq_lock_inline();
}
/**
*
* @brief A wrapper for irq_unlock_inline()
*
* @return N/A
*/
void irq_unlock_inlineWrapper(int imask)
{
irq_unlock_inline(imask);
}
/** /**
* *
* @brief A wrapper for irq_disable() * @brief A wrapper for irq_disable()
@ -311,8 +284,7 @@ void irq_enableWrapper(int irq)
* @brief Test routines for disabling and enabling ints * @brief Test routines for disabling and enabling ints
* *
* This routine tests the routines for disabling and enabling interrupts. These * This routine tests the routines for disabling and enabling interrupts. These
* include irq_lock() and irq_unlock(), irq_lock_inline() and * include irq_lock() and irq_unlock(), irq_disable() and irq_enable().
* irq_unlock_inline(), irq_disable() and irq_enable().
* *
* @return TC_PASS on success, TC_FAIL on failure * @return TC_PASS on success, TC_FAIL on failure
*/ */
@ -848,13 +820,6 @@ void main(void)
} }
TC_PRINT("Testing inline interrupt locking and unlocking\n");
rv = nanoCpuDisableInterruptsTest(irq_lock_inlineWrapper,
irq_unlock_inlineWrapper, -1);
if (rv != TC_PASS) {
goto doneTests;
}
/* /*
* The Cortex-M3/M4 use the SYSTICK exception for the system timer, which is * The Cortex-M3/M4 use the SYSTICK exception for the system timer, which is
* not considered an IRQ by the irq_enable/Disable APIs. * not considered an IRQ by the irq_enable/Disable APIs.