all: Update reserved function names

Update reserved function names starting with one underscore, replacing
them as follows:
   '_k_' with 'z_'
   '_K_' with 'Z_'
   '_handler_' with 'z_handl_'
   '_Cstart' with 'z_cstart'
   '_Swap' with 'z_swap'

This renaming is done on both global and those static function names
in kernel/include and include/. Other static function names in kernel/
are renamed by removing the leading underscore. Other function names
not starting with any prefix listed above are renamed starting with
a 'z_' or 'Z_' prefix.

Function names starting with two or three leading underscores are not
automatcally renamed since these names will collide with the variants
with two or three leading underscores.

Various generator scripts have also been updated as well as perf,
linker and usb files. These are
   drivers/serial/uart_handlers.c
   include/linker/kobject-text.ld
   kernel/include/syscall_handler.h
   scripts/gen_kobject_list.py
   scripts/gen_syscall_header.py

Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
This commit is contained in:
Patrik Flykt 2019-03-08 14:19:05 -07:00 committed by Anas Nashif
commit 4344e27c26
324 changed files with 2264 additions and 2263 deletions

View file

@ -13,79 +13,79 @@
#include <stdbool.h>
#ifdef CONFIG_MULTITHREADING
#define _VALID_PRIO(prio, entry_point) \
(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
(_is_prio_higher_or_equal((prio), \
#define Z_VALID_PRIO(prio, entry_point) \
(((prio) == K_IDLE_PRIO && z_is_idle_thread(entry_point)) || \
(z_is_prio_higher_or_equal((prio), \
K_LOWEST_APPLICATION_THREAD_PRIO) && \
_is_prio_lower_or_equal((prio), \
z_is_prio_lower_or_equal((prio), \
K_HIGHEST_APPLICATION_THREAD_PRIO)))
#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
__ASSERT(_VALID_PRIO((prio), (entry_point)), \
#define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
__ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
"invalid priority (%d); allowed range: %d to %d", \
(prio), \
K_LOWEST_APPLICATION_THREAD_PRIO, \
K_HIGHEST_APPLICATION_THREAD_PRIO); \
} while (false)
#else
#define _VALID_PRIO(prio, entry_point) ((prio) == -1)
#define _ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif
void _sched_init(void);
void _add_thread_to_ready_q(struct k_thread *thread);
void _move_thread_to_end_of_prio_q(struct k_thread *thread);
void _remove_thread_from_ready_q(struct k_thread *thread);
int _is_thread_time_slicing(struct k_thread *thread);
void _unpend_thread_no_timeout(struct k_thread *thread);
int _pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
void z_sched_init(void);
void z_add_thread_to_ready_q(struct k_thread *thread);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
void z_remove_thread_from_ready_q(struct k_thread *thread);
int z_is_thread_time_slicing(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread);
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, s32_t timeout);
int _pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
void _reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void _reschedule_irqlock(u32_t key);
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
void _unpend_thread(struct k_thread *thread);
int _unpend_all(_wait_q_t *wait_q);
void _thread_priority_set(struct k_thread *thread, int prio);
void *_get_next_switch_handle(void *interrupted);
struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void z_reschedule_irqlock(u32_t key);
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
void z_unpend_thread(struct k_thread *thread);
int z_unpend_all(_wait_q_t *wait_q);
void z_thread_priority_set(struct k_thread *thread, int prio);
void *z_get_next_switch_handle(void *interrupted);
struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
struct k_thread *from);
void idle(void *a, void *b, void *c);
void z_time_slice(int ticks);
static inline void _pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
{
(void) _pend_curr_irqlock(_arch_irq_lock(), wait_q, timeout);
(void) z_pend_curr_irqlock(z_arch_irq_lock(), wait_q, timeout);
}
static inline void _reschedule_unlocked(void)
static inline void z_reschedule_unlocked(void)
{
(void) _reschedule_irqlock(_arch_irq_lock());
(void) z_reschedule_irqlock(z_arch_irq_lock());
}
/* find which one is the next thread to run */
/* must be called with interrupts locked */
#ifdef CONFIG_SMP
extern struct k_thread *_get_next_ready_thread(void);
extern struct k_thread *z_get_next_ready_thread(void);
#else
static ALWAYS_INLINE struct k_thread *_get_next_ready_thread(void)
static ALWAYS_INLINE struct k_thread *z_get_next_ready_thread(void)
{
return _kernel.ready_q.cache;
}
#endif
static inline bool _is_idle_thread(void *entry_point)
static inline bool z_is_idle_thread(void *entry_point)
{
return entry_point == idle;
}
static inline bool _is_thread_pending(struct k_thread *thread)
static inline bool z_is_thread_pending(struct k_thread *thread)
{
return !!(thread->base.thread_state & _THREAD_PENDING);
}
static inline int _is_thread_prevented_from_running(struct k_thread *thread)
static inline int z_is_thread_prevented_from_running(struct k_thread *thread)
{
u8_t state = thread->base.thread_state;
@ -94,143 +94,143 @@ static inline int _is_thread_prevented_from_running(struct k_thread *thread)
}
static inline bool _is_thread_timeout_active(struct k_thread *thread)
static inline bool z_is_thread_timeout_active(struct k_thread *thread)
{
return !_is_inactive_timeout(&thread->base.timeout);
return !z_is_inactive_timeout(&thread->base.timeout);
}
static inline bool _is_thread_ready(struct k_thread *thread)
static inline bool z_is_thread_ready(struct k_thread *thread)
{
return !((_is_thread_prevented_from_running(thread)) != 0 ||
_is_thread_timeout_active(thread));
return !((z_is_thread_prevented_from_running(thread)) != 0 ||
z_is_thread_timeout_active(thread));
}
static inline bool _has_thread_started(struct k_thread *thread)
static inline bool z_has_thread_started(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PRESTART) == 0;
}
static inline bool _is_thread_state_set(struct k_thread *thread, u32_t state)
static inline bool z_is_thread_state_set(struct k_thread *thread, u32_t state)
{
return !!(thread->base.thread_state & state);
}
static inline bool _is_thread_queued(struct k_thread *thread)
static inline bool z_is_thread_queued(struct k_thread *thread)
{
return _is_thread_state_set(thread, _THREAD_QUEUED);
return z_is_thread_state_set(thread, _THREAD_QUEUED);
}
static inline void _mark_thread_as_suspended(struct k_thread *thread)
static inline void z_mark_thread_as_suspended(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_SUSPENDED;
}
static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_SUSPENDED;
}
static inline void _mark_thread_as_started(struct k_thread *thread)
static inline void z_mark_thread_as_started(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PRESTART;
}
static inline void _mark_thread_as_pending(struct k_thread *thread)
static inline void z_mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_PENDING;
}
static inline void _mark_thread_as_not_pending(struct k_thread *thread)
static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PENDING;
}
static inline void _set_thread_states(struct k_thread *thread, u32_t states)
static inline void z_set_thread_states(struct k_thread *thread, u32_t states)
{
thread->base.thread_state |= states;
}
static inline void _reset_thread_states(struct k_thread *thread,
static inline void z_reset_thread_states(struct k_thread *thread,
u32_t states)
{
thread->base.thread_state &= ~states;
}
static inline void _mark_thread_as_queued(struct k_thread *thread)
static inline void z_mark_thread_as_queued(struct k_thread *thread)
{
_set_thread_states(thread, _THREAD_QUEUED);
z_set_thread_states(thread, _THREAD_QUEUED);
}
static inline void _mark_thread_as_not_queued(struct k_thread *thread)
static inline void z_mark_thread_as_not_queued(struct k_thread *thread)
{
_reset_thread_states(thread, _THREAD_QUEUED);
z_reset_thread_states(thread, _THREAD_QUEUED);
}
static inline bool _is_under_prio_ceiling(int prio)
static inline bool z_is_under_prio_ceiling(int prio)
{
return prio >= CONFIG_PRIORITY_CEILING;
}
static inline int _get_new_prio_with_ceiling(int prio)
static inline int z_get_new_prio_with_ceiling(int prio)
{
return _is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
}
static inline bool _is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
{
return prio1 <= prio2;
}
static inline bool _is_prio_higher_or_equal(int prio1, int prio2)
static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
{
return _is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
}
static inline bool _is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
{
return prio1 >= prio2;
}
static inline bool _is_prio1_higher_than_prio2(int prio1, int prio2)
static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
{
return prio1 < prio2;
}
static inline bool _is_prio_higher(int prio, int test_prio)
static inline bool z_is_prio_higher(int prio, int test_prio)
{
return _is_prio1_higher_than_prio2(prio, test_prio);
return z_is_prio1_higher_than_prio2(prio, test_prio);
}
static inline bool _is_prio_lower_or_equal(int prio1, int prio2)
static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
{
return _is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
}
bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2);
bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2);
static inline bool _is_valid_prio(int prio, void *entry_point)
{
if (prio == K_IDLE_PRIO && _is_idle_thread(entry_point)) {
if (prio == K_IDLE_PRIO && z_is_idle_thread(entry_point)) {
return true;
}
if (!_is_prio_higher_or_equal(prio,
K_LOWEST_APPLICATION_THREAD_PRIO)) {
if (!z_is_prio_higher_or_equal(prio,
K_LOWEST_APPLICATION_THREAD_PRIO)) {
return false;
}
if (!_is_prio_lower_or_equal(prio,
K_HIGHEST_APPLICATION_THREAD_PRIO)) {
if (!z_is_prio_lower_or_equal(prio,
K_HIGHEST_APPLICATION_THREAD_PRIO)) {
return false;
}
return true;
}
static ALWAYS_INLINE void _ready_thread(struct k_thread *thread)
static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread)
{
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
if (z_is_thread_ready(thread)) {
z_add_thread_to_ready_q(thread);
}
sys_trace_thread_ready(thread);
@ -238,17 +238,17 @@ static ALWAYS_INLINE void _ready_thread(struct k_thread *thread)
static inline void _ready_one_thread(_wait_q_t *wq)
{
struct k_thread *th = _unpend_first_thread(wq);
struct k_thread *th = z_unpend_first_thread(wq);
if (th != NULL) {
_ready_thread(th);
z_ready_thread(th);
}
}
static inline void _sched_lock(void)
static inline void z_sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
__ASSERT(!z_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1, "");
--_current->base.sched_locked;
@ -260,10 +260,10 @@ static inline void _sched_lock(void)
#endif
}
static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!_is_in_isr(), "");
__ASSERT(!z_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 0, "");
compiler_barrier();
@ -272,7 +272,7 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
#endif
}
static ALWAYS_INLINE bool _is_thread_timeout_expired(struct k_thread *thread)
static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
return thread->base.timeout.dticks == _EXPIRED;
@ -281,12 +281,12 @@ static ALWAYS_INLINE bool _is_thread_timeout_expired(struct k_thread *thread)
#endif
}
static inline struct k_thread *_unpend1_no_timeout(_wait_q_t *wait_q)
static inline struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
{
struct k_thread *thread = _find_first_thread_to_unpend(wait_q, NULL);
struct k_thread *thread = z_find_first_thread_to_unpend(wait_q, NULL);
if (thread != NULL) {
_unpend_thread_no_timeout(thread);
z_unpend_thread_no_timeout(thread);
}
return thread;