all: Update reserved function names
Update reserved function names starting with one underscore, replacing them as follows: '_k_' with 'z_' '_K_' with 'Z_' '_handler_' with 'z_handl_' '_Cstart' with 'z_cstart' '_Swap' with 'z_swap' This renaming is done on both global and those static function names in kernel/include and include/. Other static function names in kernel/ are renamed by removing the leading underscore. Other function names not starting with any prefix listed above are renamed starting with a 'z_' or 'Z_' prefix. Function names starting with two or three leading underscores are not automatcally renamed since these names will collide with the variants with two or three leading underscores. Various generator scripts have also been updated as well as perf, linker and usb files. These are drivers/serial/uart_handlers.c include/linker/kobject-text.ld kernel/include/syscall_handler.h scripts/gen_kobject_list.py scripts/gen_syscall_header.py Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
This commit is contained in:
parent
cf2d57952e
commit
4344e27c26
324 changed files with 2264 additions and 2263 deletions
140
include/kernel.h
140
include/kernel.h
|
@ -81,9 +81,9 @@ typedef struct {
|
|||
struct _priq_rb waitq;
|
||||
} _wait_q_t;
|
||||
|
||||
extern bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
|
||||
#define _WAIT_Q_INIT(wait_q) { { { .lessthan_fn = _priq_rb_lessthan } } }
|
||||
#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
|
||||
|
||||
#else
|
||||
|
||||
|
@ -91,7 +91,7 @@ typedef struct {
|
|||
sys_dlist_t waitq;
|
||||
} _wait_q_t;
|
||||
|
||||
#define _WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
|
||||
#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -157,7 +157,7 @@ enum k_objects {
|
|||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Table generated by gperf, these objects are retrieved via
|
||||
* _k_object_find() */
|
||||
* z_object_find() */
|
||||
struct _k_object {
|
||||
char *name;
|
||||
u8_t perms[CONFIG_MAX_THREAD_BYTES];
|
||||
|
@ -205,7 +205,7 @@ struct _k_object_assignment {
|
|||
*
|
||||
* @param obj Address of the kernel object
|
||||
*/
|
||||
void _k_object_init(void *obj);
|
||||
void z_object_init(void *obj);
|
||||
#else
|
||||
|
||||
#define K_THREAD_ACCESS_GRANT(thread, ...)
|
||||
|
@ -213,7 +213,7 @@ void _k_object_init(void *obj);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
static inline void _k_object_init(void *obj)
|
||||
static inline void z_object_init(void *obj)
|
||||
{
|
||||
ARG_UNUSED(obj);
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ static inline void _k_object_init(void *obj)
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
static inline void _impl_k_object_access_grant(void *object,
|
||||
static inline void z_impl_k_object_access_grant(void *object,
|
||||
struct k_thread *thread)
|
||||
{
|
||||
ARG_UNUSED(object);
|
||||
|
@ -241,7 +241,7 @@ static inline void k_object_access_revoke(void *object,
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
static inline void _impl_k_object_release(void *object)
|
||||
static inline void z_impl_k_object_release(void *object)
|
||||
{
|
||||
ARG_UNUSED(object);
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ __syscall void *k_object_alloc(enum k_objects otype);
|
|||
*/
|
||||
void k_object_free(void *obj);
|
||||
#else
|
||||
static inline void *_impl_k_object_alloc(enum k_objects otype)
|
||||
static inline void *z_impl_k_object_alloc(enum k_objects otype)
|
||||
{
|
||||
ARG_UNUSED(otype);
|
||||
|
||||
|
@ -573,7 +573,7 @@ struct k_thread {
|
|||
* become part of the core OS
|
||||
*/
|
||||
|
||||
/** _Swap() return value */
|
||||
/** z_swap() return value */
|
||||
int swap_retval;
|
||||
|
||||
/** Context handle returned via _arch_switch() */
|
||||
|
@ -1392,9 +1392,9 @@ struct k_timer {
|
|||
.timeout = { \
|
||||
.node = {},\
|
||||
.dticks = 0, \
|
||||
.fn = _timer_expiration_handler \
|
||||
.fn = z_timer_expiration_handler \
|
||||
}, \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.expiry_fn = expiry, \
|
||||
.stop_fn = stop, \
|
||||
.period = 0, \
|
||||
|
@ -1559,7 +1559,7 @@ extern s32_t z_timeout_remaining(struct _timeout *timeout);
|
|||
*/
|
||||
__syscall u32_t k_timer_remaining_get(struct k_timer *timer);
|
||||
|
||||
static inline u32_t _impl_k_timer_remaining_get(struct k_timer *timer)
|
||||
static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer)
|
||||
{
|
||||
const s32_t ticks = z_timeout_remaining(&timer->timeout);
|
||||
return (ticks > 0) ? (u32_t)__ticks_to_ms(ticks) : 0U;
|
||||
|
@ -1584,7 +1584,7 @@ __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
static inline void _impl_k_timer_user_data_set(struct k_timer *timer,
|
||||
static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
|
||||
void *user_data)
|
||||
{
|
||||
timer->user_data = user_data;
|
||||
|
@ -1599,7 +1599,7 @@ static inline void _impl_k_timer_user_data_set(struct k_timer *timer,
|
|||
*/
|
||||
__syscall void *k_timer_user_data_get(struct k_timer *timer);
|
||||
|
||||
static inline void *_impl_k_timer_user_data_get(struct k_timer *timer)
|
||||
static inline void *z_impl_k_timer_user_data_get(struct k_timer *timer)
|
||||
{
|
||||
return timer->user_data;
|
||||
}
|
||||
|
@ -1721,7 +1721,7 @@ static inline u32_t k_uptime_delta_32(s64_t *reftime)
|
|||
*
|
||||
* @return Current hardware clock up-counter (in cycles).
|
||||
*/
|
||||
#define k_cycle_get_32() _arch_k_cycle_get_32()
|
||||
#define k_cycle_get_32() z_arch_k_cycle_get_32()
|
||||
|
||||
/**
|
||||
* @}
|
||||
|
@ -1746,7 +1746,7 @@ struct k_queue {
|
|||
#define _K_QUEUE_INITIALIZER(obj) \
|
||||
{ \
|
||||
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
_POLL_EVENT_OBJ_INIT(obj) \
|
||||
_OBJECT_TRACING_INIT \
|
||||
}
|
||||
|
@ -1989,7 +1989,7 @@ static inline bool k_queue_unique_append(struct k_queue *queue, void *data)
|
|||
*/
|
||||
__syscall int k_queue_is_empty(struct k_queue *queue);
|
||||
|
||||
static inline int _impl_k_queue_is_empty(struct k_queue *queue)
|
||||
static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
|
||||
{
|
||||
return (int)sys_sflist_is_empty(&queue->data_q);
|
||||
}
|
||||
|
@ -2005,7 +2005,7 @@ static inline int _impl_k_queue_is_empty(struct k_queue *queue)
|
|||
*/
|
||||
__syscall void *k_queue_peek_head(struct k_queue *queue);
|
||||
|
||||
static inline void *_impl_k_queue_peek_head(struct k_queue *queue)
|
||||
static inline void *z_impl_k_queue_peek_head(struct k_queue *queue)
|
||||
{
|
||||
return z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
|
||||
}
|
||||
|
@ -2021,7 +2021,7 @@ static inline void *_impl_k_queue_peek_head(struct k_queue *queue)
|
|||
*/
|
||||
__syscall void *k_queue_peek_tail(struct k_queue *queue);
|
||||
|
||||
static inline void *_impl_k_queue_peek_tail(struct k_queue *queue)
|
||||
static inline void *z_impl_k_queue_peek_tail(struct k_queue *queue)
|
||||
{
|
||||
return z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
|
||||
}
|
||||
|
@ -2384,7 +2384,7 @@ struct k_stack {
|
|||
|
||||
#define _K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
|
||||
{ \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.base = stack_buffer, \
|
||||
.next = stack_buffer, \
|
||||
.top = stack_buffer + stack_num_entries, \
|
||||
|
@ -2558,14 +2558,14 @@ extern struct k_work_q k_sys_work_q;
|
|||
* INTERNAL_HIDDEN @endcond
|
||||
*/
|
||||
|
||||
#define _K_WORK_INITIALIZER(work_handler) \
|
||||
#define Z_WORK_INITIALIZER(work_handler) \
|
||||
{ \
|
||||
._reserved = NULL, \
|
||||
.handler = work_handler, \
|
||||
.flags = { 0 } \
|
||||
}
|
||||
|
||||
#define K_WORK_INITIALIZER DEPRECATED_MACRO _K_WORK_INITIALIZER
|
||||
#define K_WORK_INITIALIZER DEPRECATED_MACRO Z_WORK_INITIALIZER
|
||||
|
||||
/**
|
||||
* @brief Initialize a statically-defined work item.
|
||||
|
@ -2580,7 +2580,7 @@ extern struct k_work_q k_sys_work_q;
|
|||
* @req K-WORK-002
|
||||
*/
|
||||
#define K_WORK_DEFINE(work, work_handler) \
|
||||
struct k_work work = _K_WORK_INITIALIZER(work_handler)
|
||||
struct k_work work = Z_WORK_INITIALIZER(work_handler)
|
||||
|
||||
/**
|
||||
* @brief Initialize a work item.
|
||||
|
@ -2595,7 +2595,7 @@ extern struct k_work_q k_sys_work_q;
|
|||
*/
|
||||
static inline void k_work_init(struct k_work *work, k_work_handler_t handler)
|
||||
{
|
||||
*work = (struct k_work)_K_WORK_INITIALIZER(handler);
|
||||
*work = (struct k_work)Z_WORK_INITIALIZER(handler);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2910,7 +2910,7 @@ struct k_mutex {
|
|||
*/
|
||||
#define _K_MUTEX_INITIALIZER(obj) \
|
||||
{ \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.owner = NULL, \
|
||||
.lock_count = 0, \
|
||||
.owner_orig_prio = K_LOWEST_THREAD_PRIO, \
|
||||
|
@ -3009,7 +3009,7 @@ struct k_sem {
|
|||
|
||||
#define _K_SEM_INITIALIZER(obj, initial_count, count_limit) \
|
||||
{ \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.count = initial_count, \
|
||||
.limit = count_limit, \
|
||||
_POLL_EVENT_OBJ_INIT(obj) \
|
||||
|
@ -3097,7 +3097,7 @@ __syscall void k_sem_reset(struct k_sem *sem);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
static inline void _impl_k_sem_reset(struct k_sem *sem)
|
||||
static inline void z_impl_k_sem_reset(struct k_sem *sem)
|
||||
{
|
||||
sem->count = 0;
|
||||
}
|
||||
|
@ -3117,7 +3117,7 @@ __syscall unsigned int k_sem_count_get(struct k_sem *sem);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
static inline unsigned int _impl_k_sem_count_get(struct k_sem *sem)
|
||||
static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
|
||||
{
|
||||
return sem->count;
|
||||
}
|
||||
|
@ -3173,7 +3173,7 @@ struct k_msgq {
|
|||
|
||||
#define _K_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
|
||||
{ \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.max_msgs = q_max_msgs, \
|
||||
.msg_size = q_msg_size, \
|
||||
.buffer_start = q_buffer, \
|
||||
|
@ -3375,7 +3375,7 @@ __syscall u32_t k_msgq_num_free_get(struct k_msgq *q);
|
|||
__syscall void k_msgq_get_attrs(struct k_msgq *q, struct k_msgq_attrs *attrs);
|
||||
|
||||
|
||||
static inline u32_t _impl_k_msgq_num_free_get(struct k_msgq *q)
|
||||
static inline u32_t z_impl_k_msgq_num_free_get(struct k_msgq *q)
|
||||
{
|
||||
return q->max_msgs - q->used_msgs;
|
||||
}
|
||||
|
@ -3392,7 +3392,7 @@ static inline u32_t _impl_k_msgq_num_free_get(struct k_msgq *q)
|
|||
*/
|
||||
__syscall u32_t k_msgq_num_used_get(struct k_msgq *q);
|
||||
|
||||
static inline u32_t _impl_k_msgq_num_used_get(struct k_msgq *q)
|
||||
static inline u32_t z_impl_k_msgq_num_used_get(struct k_msgq *q)
|
||||
{
|
||||
return q->used_msgs;
|
||||
}
|
||||
|
@ -3467,8 +3467,8 @@ struct k_mbox {
|
|||
|
||||
#define _K_MBOX_INITIALIZER(obj) \
|
||||
{ \
|
||||
.tx_msg_queue = _WAIT_Q_INIT(&obj.tx_msg_queue), \
|
||||
.rx_msg_queue = _WAIT_Q_INIT(&obj.rx_msg_queue), \
|
||||
.tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
|
||||
.rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
|
||||
_OBJECT_TRACING_INIT \
|
||||
}
|
||||
|
||||
|
@ -3662,8 +3662,8 @@ struct k_pipe {
|
|||
.write_index = 0, \
|
||||
.lock = {}, \
|
||||
.wait_q = { \
|
||||
.readers = _WAIT_Q_INIT(&obj.wait_q.readers), \
|
||||
.writers = _WAIT_Q_INIT(&obj.wait_q.writers) \
|
||||
.readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
|
||||
.writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
|
||||
}, \
|
||||
_OBJECT_TRACING_INIT \
|
||||
.flags = 0 \
|
||||
|
@ -3828,7 +3828,7 @@ struct k_mem_slab {
|
|||
#define _K_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
|
||||
slab_num_blocks) \
|
||||
{ \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||
.num_blocks = slab_num_blocks, \
|
||||
.block_size = slab_block_size, \
|
||||
.buffer = slab_buffer, \
|
||||
|
@ -4009,13 +4009,13 @@ struct k_mem_pool {
|
|||
#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
|
||||
char __aligned(align) _mpool_buf_##name[_ALIGN4(maxsz * nmax) \
|
||||
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
|
||||
struct sys_mem_pool_lvl _mpool_lvls_##name[_MPOOL_LVLS(maxsz, minsz)]; \
|
||||
struct sys_mem_pool_lvl _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
|
||||
struct k_mem_pool name __in_section(_k_mem_pool, static, name) = { \
|
||||
.base = { \
|
||||
.buf = _mpool_buf_##name, \
|
||||
.max_sz = maxsz, \
|
||||
.n_max = nmax, \
|
||||
.n_levels = _MPOOL_LVLS(maxsz, minsz), \
|
||||
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
|
||||
.levels = _mpool_lvls_##name, \
|
||||
.flags = SYS_MEM_POOL_KERNEL \
|
||||
} \
|
||||
|
@ -4166,7 +4166,7 @@ enum _poll_types_bits {
|
|||
_POLL_NUM_TYPES
|
||||
};
|
||||
|
||||
#define _POLL_TYPE_BIT(type) (1 << ((type) - 1))
|
||||
#define Z_POLL_TYPE_BIT(type) (1 << ((type) - 1))
|
||||
|
||||
/* private - states bit positions */
|
||||
enum _poll_states_bits {
|
||||
|
@ -4188,7 +4188,7 @@ enum _poll_states_bits {
|
|||
_POLL_NUM_STATES
|
||||
};
|
||||
|
||||
#define _POLL_STATE_BIT(state) (1 << ((state) - 1))
|
||||
#define Z_POLL_STATE_BIT(state) (1 << ((state) - 1))
|
||||
|
||||
#define _POLL_EVENT_NUM_UNUSED_BITS \
|
||||
(32 - (0 \
|
||||
|
@ -4211,9 +4211,9 @@ enum _poll_states_bits {
|
|||
|
||||
/* public - values for k_poll_event.type bitfield */
|
||||
#define K_POLL_TYPE_IGNORE 0
|
||||
#define K_POLL_TYPE_SIGNAL _POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
|
||||
#define K_POLL_TYPE_SEM_AVAILABLE _POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
|
||||
#define K_POLL_TYPE_DATA_AVAILABLE _POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
|
||||
#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
|
||||
#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
|
||||
#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
|
||||
#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
|
||||
|
||||
/* public - polling modes */
|
||||
|
@ -4226,11 +4226,11 @@ enum k_poll_modes {
|
|||
|
||||
/* public - values for k_poll_event.state bitfield */
|
||||
#define K_POLL_STATE_NOT_READY 0
|
||||
#define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED)
|
||||
#define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
|
||||
#define K_POLL_STATE_DATA_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
|
||||
#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
|
||||
#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
|
||||
#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
|
||||
#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
|
||||
#define K_POLL_STATE_CANCELLED _POLL_STATE_BIT(_POLL_STATE_CANCELLED)
|
||||
#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
|
||||
|
||||
/* public - poll signal object */
|
||||
struct k_poll_signal {
|
||||
|
@ -4396,7 +4396,7 @@ __syscall void k_poll_signal_init(struct k_poll_signal *signal);
|
|||
*/
|
||||
__syscall void k_poll_signal_reset(struct k_poll_signal *signal);
|
||||
|
||||
static inline void _impl_k_poll_signal_reset(struct k_poll_signal *signal)
|
||||
static inline void z_impl_k_poll_signal_reset(struct k_poll_signal *signal)
|
||||
{
|
||||
signal->signaled = 0;
|
||||
}
|
||||
|
@ -4441,7 +4441,7 @@ __syscall int k_poll_signal_raise(struct k_poll_signal *signal, int result);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
extern void _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
|
||||
extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state);
|
||||
|
||||
/** @} */
|
||||
|
||||
|
@ -4486,23 +4486,23 @@ extern void k_cpu_atomic_idle(unsigned int key);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
extern void _sys_power_save_idle_exit(s32_t ticks);
|
||||
extern void z_sys_power_save_idle_exit(s32_t ticks);
|
||||
|
||||
#ifdef _ARCH_EXCEPT
|
||||
#ifdef Z_ARCH_EXCEPT
|
||||
/* This archtecture has direct support for triggering a CPU exception */
|
||||
#define _k_except_reason(reason) _ARCH_EXCEPT(reason)
|
||||
#define z_except_reason(reason) Z_ARCH_EXCEPT(reason)
|
||||
#else
|
||||
|
||||
/* NOTE: This is the implementation for arches that do not implement
|
||||
* _ARCH_EXCEPT() to generate a real CPU exception.
|
||||
* Z_ARCH_EXCEPT() to generate a real CPU exception.
|
||||
*
|
||||
* We won't have a real exception frame to determine the PC value when
|
||||
* the oops occurred, so print file and line number before we jump into
|
||||
* the fatal error handler.
|
||||
*/
|
||||
#define _k_except_reason(reason) do { \
|
||||
#define z_except_reason(reason) do { \
|
||||
printk("@ %s:%d:\n", __FILE__, __LINE__); \
|
||||
_NanoFatalErrorHandler(reason, &_default_esf); \
|
||||
z_NanoFatalErrorHandler(reason, &_default_esf); \
|
||||
CODE_UNREACHABLE; \
|
||||
} while (false)
|
||||
|
||||
|
@ -4520,7 +4520,7 @@ extern void _sys_power_save_idle_exit(s32_t ticks);
|
|||
* will treat it as an unrecoverable system error, just like k_panic().
|
||||
* @req K-MISC-003
|
||||
*/
|
||||
#define k_oops() _k_except_reason(_NANO_ERR_KERNEL_OOPS)
|
||||
#define k_oops() z_except_reason(_NANO_ERR_KERNEL_OOPS)
|
||||
|
||||
/**
|
||||
* @brief Fatally terminate the system
|
||||
|
@ -4531,7 +4531,7 @@ extern void _sys_power_save_idle_exit(s32_t ticks);
|
|||
* will be called will reason code _NANO_ERR_KERNEL_PANIC.
|
||||
* @req K-MISC-004
|
||||
*/
|
||||
#define k_panic() _k_except_reason(_NANO_ERR_KERNEL_PANIC)
|
||||
#define k_panic() z_except_reason(_NANO_ERR_KERNEL_PANIC)
|
||||
|
||||
/*
|
||||
* private APIs that are utilized by one or more public APIs
|
||||
|
@ -4541,22 +4541,22 @@ extern void _sys_power_save_idle_exit(s32_t ticks);
|
|||
/**
|
||||
* @internal
|
||||
*/
|
||||
extern void _init_static_threads(void);
|
||||
extern void z_init_static_threads(void);
|
||||
#else
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
#define _init_static_threads() do { } while (false)
|
||||
#define z_init_static_threads() do { } while (false)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
extern bool _is_thread_essential(void);
|
||||
extern bool z_is_thread_essential(void);
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
extern void _timer_expiration_handler(struct _timeout *t);
|
||||
extern void z_timer_expiration_handler(struct _timeout *t);
|
||||
|
||||
/* arch/cpu.h may declare an architecture or platform-specific macro
|
||||
* for properly declaring stacks, compatible with MMU/MPU constraints if
|
||||
|
@ -4574,16 +4574,16 @@ extern void _timer_expiration_handler(struct _timeout *t);
|
|||
*/
|
||||
#define K_THREAD_STACK_EXTERN(sym) extern k_thread_stack_t sym[]
|
||||
|
||||
#ifdef _ARCH_THREAD_STACK_DEFINE
|
||||
#define K_THREAD_STACK_DEFINE(sym, size) _ARCH_THREAD_STACK_DEFINE(sym, size)
|
||||
#ifdef Z_ARCH_THREAD_STACK_DEFINE
|
||||
#define K_THREAD_STACK_DEFINE(sym, size) Z_ARCH_THREAD_STACK_DEFINE(sym, size)
|
||||
#define K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
|
||||
#define K_THREAD_STACK_LEN(size) _ARCH_THREAD_STACK_LEN(size)
|
||||
#define K_THREAD_STACK_MEMBER(sym, size) _ARCH_THREAD_STACK_MEMBER(sym, size)
|
||||
#define K_THREAD_STACK_SIZEOF(sym) _ARCH_THREAD_STACK_SIZEOF(sym)
|
||||
Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
|
||||
#define K_THREAD_STACK_LEN(size) Z_ARCH_THREAD_STACK_LEN(size)
|
||||
#define K_THREAD_STACK_MEMBER(sym, size) Z_ARCH_THREAD_STACK_MEMBER(sym, size)
|
||||
#define K_THREAD_STACK_SIZEOF(sym) Z_ARCH_THREAD_STACK_SIZEOF(sym)
|
||||
static inline char *K_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
{
|
||||
return _ARCH_THREAD_STACK_BUFFER(sym);
|
||||
return Z_ARCH_THREAD_STACK_BUFFER(sym);
|
||||
}
|
||||
#else
|
||||
/**
|
||||
|
@ -4849,7 +4849,7 @@ __syscall void k_str_out(char *c, size_t n);
|
|||
* an irq_unlock() key.
|
||||
* @param arg Untyped argument to be passed to "fn"
|
||||
*/
|
||||
extern void _arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
extern void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
||||
void (*fn)(int key, void *data), void *arg);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue