kernel: Name of static functions should not begin with an underscore

Names that begin with an underscore are reserved by the C standard.
This patch does not change names of functions defined and implemented
in header files.

Signed-off-by: Leandro Pereira <leandro.pereira@intel.com>
This commit is contained in:
Leandro Pereira 2018-03-06 15:08:55 -08:00 committed by Anas Nashif
commit a1ae8453f7
9 changed files with 75 additions and 74 deletions

View file

@ -63,7 +63,7 @@ static void set_kernel_idle_time_in_ticks(s32_t ticks)
#define set_kernel_idle_time_in_ticks(x) do { } while (0)
#endif
static void _sys_power_save_idle(s32_t ticks)
static void sys_power_save_idle(s32_t ticks)
{
#ifdef CONFIG_TICKLESS_KERNEL
if (ticks != K_FOREVER) {
@ -180,7 +180,7 @@ void idle(void *unused1, void *unused2, void *unused3)
for (;;) {
(void)irq_lock();
_sys_power_save_idle(_get_next_timeout_expiry());
sys_power_save_idle(_get_next_timeout_expiry());
IDLE_YIELD_IF_COOP();
}

View file

@ -208,14 +208,14 @@ void _data_copy(void)
/**
*
* @brief Mainline for kernel's background task
* @brief Mainline for kernel's background thread
*
* This routine completes kernel initialization by invoking the remaining
* init functions, then invokes application's main() routine.
*
* @return N/A
*/
static void _main(void *unused1, void *unused2, void *unused3)
static void bg_thread_main(void *unused1, void *unused2, void *unused3)
{
ARG_UNUSED(unused1);
ARG_UNUSED(unused2);
@ -348,7 +348,8 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
#endif
_setup_new_thread(_main_thread, _main_stack,
MAIN_STACK_SIZE, _main, NULL, NULL, NULL,
MAIN_STACK_SIZE, bg_thread_main,
NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
_mark_thread_as_started(_main_thread);
_add_thread_to_ready_q(_main_thread);
@ -389,7 +390,7 @@ static void switch_to_main_thread(void)
{
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
_arch_switch_to_main_thread(_main_thread, _main_stack, MAIN_STACK_SIZE,
_main);
bg_thread_main);
#else
/*
* Context switch to main task (entry function is _main()): the

View file

@ -34,13 +34,13 @@ static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
/* allocate an asynchronous message descriptor */
static inline void _mbox_async_alloc(struct k_mbox_async **async)
static inline void mbox_async_alloc(struct k_mbox_async **async)
{
k_stack_pop(&async_msg_free, (u32_t *)async, K_FOREVER);
}
/* free an asynchronous message descriptor */
static inline void _mbox_async_free(struct k_mbox_async *async)
static inline void mbox_async_free(struct k_mbox_async *async)
{
k_stack_push(&async_msg_free, (u32_t)async);
}
@ -121,7 +121,7 @@ void k_mbox_init(struct k_mbox *mbox_ptr)
*
* @return 0 if successfully matched, otherwise -1.
*/
static int _mbox_message_match(struct k_mbox_msg *tx_msg,
static int mbox_message_match(struct k_mbox_msg *tx_msg,
struct k_mbox_msg *rx_msg)
{
u32_t temp_info;
@ -173,7 +173,7 @@ static int _mbox_message_match(struct k_mbox_msg *tx_msg,
*
* @return N/A
*/
static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
{
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
@ -206,7 +206,7 @@ static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
if (sending_thread->base.thread_state & _THREAD_DUMMY) {
struct k_sem *async_sem = tx_msg->_async_sem;
_mbox_async_free((struct k_mbox_async *)sending_thread);
mbox_async_free((struct k_mbox_async *)sending_thread);
if (async_sem != NULL) {
k_sem_give(async_sem);
}
@ -236,7 +236,7 @@ static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
*
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
s32_t timeout)
{
struct k_thread *sending_thread;
@ -258,7 +258,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
next, base.k_q_node) {
rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
if (mbox_message_match(tx_msg, rx_msg) == 0) {
/* take receiver out of rx queue */
_unpend_thread(receiving_thread);
_abort_thread_timeout(receiving_thread);
@ -316,7 +316,7 @@ int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
/* configure things for a synchronous send, then send the message */
tx_msg->_syncing_thread = _current;
return _mbox_message_put(mbox, tx_msg, timeout);
return mbox_message_put(mbox, tx_msg, timeout);
}
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
@ -329,7 +329,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* allocate an asynchronous message descriptor, configure both parts,
* then send the message asynchronously
*/
_mbox_async_alloc(&async);
mbox_async_alloc(&async);
async->thread.prio = _current->base.prio;
@ -337,7 +337,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
async->tx_msg._async_sem = sem;
_mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
}
#endif
@ -346,7 +346,7 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
/* handle case where data is to be discarded */
if (buffer == NULL) {
rx_msg->size = 0;
_mbox_message_dispose(rx_msg);
mbox_message_dispose(rx_msg);
return;
}
@ -354,7 +354,7 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0)) {
memcpy(buffer, rx_msg->tx_data, rx_msg->size);
}
_mbox_message_dispose(rx_msg);
mbox_message_dispose(rx_msg);
}
int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
@ -365,7 +365,7 @@ int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
/* handle case where data is to be discarded */
if (pool == NULL) {
rx_msg->size = 0;
_mbox_message_dispose(rx_msg);
mbox_message_dispose(rx_msg);
return 0;
}
@ -376,7 +376,7 @@ int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
rx_msg->tx_block.data = NULL;
/* now dispose of message */
_mbox_message_dispose(rx_msg);
mbox_message_dispose(rx_msg);
return 0;
}
@ -407,14 +407,14 @@ int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
*
* @return 0
*/
static int _mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
{
if (buffer != NULL) {
/* retrieve data now, then dispose of message */
k_mbox_data_get(rx_msg, buffer);
} else if (rx_msg->size == 0) {
/* there is no data to get, so just dispose of message */
_mbox_message_dispose(rx_msg);
mbox_message_dispose(rx_msg);
} else {
/* keep message around for later data retrieval */
}
@ -441,7 +441,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
if (mbox_message_match(tx_msg, rx_msg) == 0) {
/* take sender out of mailbox's tx queue */
_unpend_thread(sending_thread);
_abort_thread_timeout(sending_thread);
@ -449,7 +449,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
irq_unlock(key);
/* consume message data immediately, if needed */
return _mbox_message_data_check(rx_msg, buffer);
return mbox_message_data_check(rx_msg, buffer);
}
}
@ -468,7 +468,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
/* consume message data immediately, if needed */
if (result == 0) {
result = _mbox_message_data_check(rx_msg, buffer);
result = mbox_message_data_check(rx_msg, buffer);
}
return result;

View file

@ -52,19 +52,19 @@ static struct k_pipe_async __noinit async_msg[CONFIG_NUM_PIPE_ASYNC_MSGS];
K_STACK_DEFINE(pipe_async_msgs, CONFIG_NUM_PIPE_ASYNC_MSGS);
/* Allocate an asynchronous message descriptor */
static void _pipe_async_alloc(struct k_pipe_async **async)
static void pipe_async_alloc(struct k_pipe_async **async)
{
k_stack_pop(&pipe_async_msgs, (u32_t *)async, K_FOREVER);
}
/* Free an asynchronous message descriptor */
static void _pipe_async_free(struct k_pipe_async *async)
static void pipe_async_free(struct k_pipe_async *async)
{
k_stack_push(&pipe_async_msgs, (u32_t)async);
}
/* Finish an asynchronous operation */
static void _pipe_async_finish(struct k_pipe_async *async_desc)
static void pipe_async_finish(struct k_pipe_async *async_desc)
{
/*
* An asynchronous operation is finished with the scheduler locked
@ -77,7 +77,7 @@ static void _pipe_async_finish(struct k_pipe_async *async_desc)
k_sem_give(async_desc->desc.sem);
}
_pipe_async_free(async_desc);
pipe_async_free(async_desc);
}
#endif /* CONFIG_NUM_PIPE_ASYNC_MSGS > 0 */
@ -158,7 +158,7 @@ _SYSCALL_HANDLER(k_pipe_init, pipe, buffer, size)
*
* @return Number of bytes copied
*/
static size_t _pipe_xfer(unsigned char *dest, size_t dest_size,
static size_t pipe_xfer(unsigned char *dest, size_t dest_size,
const unsigned char *src, size_t src_size)
{
size_t num_bytes = min(dest_size, src_size);
@ -181,7 +181,7 @@ static size_t _pipe_xfer(unsigned char *dest, size_t dest_size,
*
* @return Number of bytes written to the pipe's circular buffer
*/
static size_t _pipe_buffer_put(struct k_pipe *pipe,
static size_t pipe_buffer_put(struct k_pipe *pipe,
const unsigned char *src, size_t src_size)
{
size_t bytes_copied;
@ -194,7 +194,7 @@ static size_t _pipe_buffer_put(struct k_pipe *pipe,
run_length = min(pipe->size - pipe->bytes_used,
pipe->size - pipe->write_index);
bytes_copied = _pipe_xfer(pipe->buffer + pipe->write_index,
bytes_copied = pipe_xfer(pipe->buffer + pipe->write_index,
run_length,
src + num_bytes_written,
src_size - num_bytes_written);
@ -218,7 +218,7 @@ static size_t _pipe_buffer_put(struct k_pipe *pipe,
*
* @return Number of bytes read from the pipe's circular buffer
*/
static size_t _pipe_buffer_get(struct k_pipe *pipe,
static size_t pipe_buffer_get(struct k_pipe *pipe,
unsigned char *dest, size_t dest_size)
{
size_t bytes_copied;
@ -230,7 +230,7 @@ static size_t _pipe_buffer_get(struct k_pipe *pipe,
run_length = min(pipe->bytes_used,
pipe->size - pipe->read_index);
bytes_copied = _pipe_xfer(dest + num_bytes_read,
bytes_copied = pipe_xfer(dest + num_bytes_read,
dest_size - num_bytes_read,
pipe->buffer + pipe->read_index,
run_length);
@ -276,7 +276,7 @@ static size_t _pipe_buffer_get(struct k_pipe *pipe,
*
* @return false if request is unsatisfiable, otherwise true
*/
static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list,
static bool pipe_xfer_prepare(sys_dlist_t *xfer_list,
struct k_thread **waiter,
_wait_q_t *wait_q,
size_t pipe_space,
@ -357,7 +357,7 @@ static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list,
*
* @return See table above
*/
static int _pipe_return_code(size_t min_xfer, size_t bytes_remaining,
static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
size_t bytes_requested)
{
if (bytes_requested - bytes_remaining >= min_xfer) {
@ -379,13 +379,13 @@ static int _pipe_return_code(size_t min_xfer, size_t bytes_remaining,
*
* @return N/A
*/
static void _pipe_thread_ready(struct k_thread *thread)
static void pipe_thread_ready(struct k_thread *thread)
{
unsigned int key;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (thread->base.thread_state & _THREAD_DUMMY) {
_pipe_async_finish((struct k_pipe_async *)thread);
pipe_async_finish((struct k_pipe_async *)thread);
return;
}
#endif
@ -421,7 +421,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
* directly copied.
*/
if (!_pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
if (!pipe_xfer_prepare(&xfer_list, &reader, &pipe->wait_q.readers,
pipe->size - pipe->bytes_used, bytes_to_write,
min_xfer, timeout)) {
irq_unlock(key);
@ -448,7 +448,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
sys_dlist_get(&xfer_list);
while (thread) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
bytes_copied = pipe_xfer(desc->buffer, desc->bytes_to_xfer,
data + num_bytes_written,
bytes_to_write - num_bytes_written);
@ -470,7 +470,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
*/
if (reader) {
desc = (struct k_pipe_desc *)reader->base.swap_data;
bytes_copied = _pipe_xfer(desc->buffer, desc->bytes_to_xfer,
bytes_copied = pipe_xfer(desc->buffer, desc->bytes_to_xfer,
data + num_bytes_written,
bytes_to_write - num_bytes_written);
@ -485,14 +485,14 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
*/
num_bytes_written +=
_pipe_buffer_put(pipe, data + num_bytes_written,
pipe_buffer_put(pipe, data + num_bytes_written,
bytes_to_write - num_bytes_written);
if (num_bytes_written == bytes_to_write) {
*bytes_written = num_bytes_written;
#if (CONFIG_NUM_PIPE_ASYNC_MSGS > 0)
if (async_desc != NULL) {
_pipe_async_finish(async_desc);
pipe_async_finish(async_desc);
}
#endif
k_sched_unlock();
@ -537,7 +537,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
*bytes_written = bytes_to_write - pipe_desc.bytes_to_xfer;
return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
return pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
bytes_to_write);
}
@ -561,7 +561,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
* directly copied.
*/
if (!_pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
if (!pipe_xfer_prepare(&xfer_list, &writer, &pipe->wait_q.writers,
pipe->bytes_used, bytes_to_read,
min_xfer, timeout)) {
irq_unlock(key);
@ -572,7 +572,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
_sched_lock();
irq_unlock(key);
num_bytes_read = _pipe_buffer_get(pipe, data, bytes_to_read);
num_bytes_read = pipe_buffer_get(pipe, data, bytes_to_read);
/*
* 1. 'xfer_list' currently contains a list of writer threads that can
@ -592,7 +592,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
sys_dlist_get(&xfer_list);
while (thread && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
bytes_copied = _pipe_xfer(data + num_bytes_read,
bytes_copied = pipe_xfer(data + num_bytes_read,
bytes_to_read - num_bytes_read,
desc->buffer, desc->bytes_to_xfer);
@ -609,14 +609,14 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
if (num_bytes_read == bytes_to_read) {
break;
}
_pipe_thread_ready(thread);
pipe_thread_ready(thread);
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
if (writer && (num_bytes_read < bytes_to_read)) {
desc = (struct k_pipe_desc *)writer->base.swap_data;
bytes_copied = _pipe_xfer(data + num_bytes_read,
bytes_copied = pipe_xfer(data + num_bytes_read,
bytes_to_read - num_bytes_read,
desc->buffer, desc->bytes_to_xfer);
@ -632,21 +632,21 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
while (thread) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
bytes_copied = pipe_buffer_put(pipe, desc->buffer,
desc->bytes_to_xfer);
desc->buffer += bytes_copied;
desc->bytes_to_xfer -= bytes_copied;
/* Write request has been satsified */
_pipe_thread_ready(thread);
pipe_thread_ready(thread);
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
}
if (writer) {
desc = (struct k_pipe_desc *)writer->base.swap_data;
bytes_copied = _pipe_buffer_put(pipe, desc->buffer,
bytes_copied = pipe_buffer_put(pipe, desc->buffer,
desc->bytes_to_xfer);
desc->buffer += bytes_copied;
@ -680,7 +680,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
*bytes_read = bytes_to_read - pipe_desc.bytes_to_xfer;
return _pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
return pipe_return_code(min_xfer, pipe_desc.bytes_to_xfer,
bytes_to_read);
}
@ -739,7 +739,7 @@ void k_pipe_block_put(struct k_pipe *pipe, struct k_mem_block *block,
size_t dummy_bytes_written;
/* For simplicity, always allocate an asynchronous descriptor */
_pipe_async_alloc(&async_desc);
pipe_async_alloc(&async_desc);
async_desc->desc.block = &async_desc->desc.copy_block;
async_desc->desc.copy_block = *block;

View file

@ -267,7 +267,7 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
}
/* must be called with interrupts locked */
static int _signal_poll_event(struct k_poll_event *event, u32_t state,
static int signal_poll_event(struct k_poll_event *event, u32_t state,
int *must_reschedule)
{
*must_reschedule = 0;
@ -318,7 +318,7 @@ int _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
return 0;
}
(void)_signal_poll_event(poll_event, state, &must_reschedule);
(void) signal_poll_event(poll_event, state, &must_reschedule);
return must_reschedule;
}
@ -344,7 +344,7 @@ int k_poll_signal(struct k_poll_signal *signal, int result)
return 0;
}
int rc = _signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
&must_reschedule);
if (must_reschedule) {

View file

@ -18,7 +18,7 @@ struct _kernel _kernel = {0};
/* set the bit corresponding to prio in ready q bitmap */
#if defined(CONFIG_MULTITHREADING) && !defined(CONFIG_SMP)
static void _set_ready_q_prio_bit(int prio)
static void set_ready_q_prio_bit(int prio)
{
int bmap_index = _get_ready_q_prio_bmap_index(prio);
u32_t *bmap = &_ready_q.prio_bmap[bmap_index];
@ -27,7 +27,7 @@ static void _set_ready_q_prio_bit(int prio)
}
/* clear the bit corresponding to prio in ready q bitmap */
static void _clear_ready_q_prio_bit(int prio)
static void clear_ready_q_prio_bit(int prio)
{
int bmap_index = _get_ready_q_prio_bmap_index(prio);
u32_t *bmap = &_ready_q.prio_bmap[bmap_index];
@ -41,7 +41,7 @@ static void _clear_ready_q_prio_bit(int prio)
* Find the next thread to run when there is no thread in the cache and update
* the cache.
*/
static struct k_thread *_get_ready_q_head(void)
static struct k_thread *get_ready_q_head(void)
{
int prio = _get_highest_ready_prio();
int q_index = _get_ready_q_q_index(prio);
@ -75,7 +75,7 @@ void _add_thread_to_ready_q(struct k_thread *thread)
sys_dlist_t *q = &_ready_q.q[q_index];
# ifndef CONFIG_SMP
_set_ready_q_prio_bit(thread->base.prio);
set_ready_q_prio_bit(thread->base.prio);
# endif
sys_dlist_append(q, &thread->base.k_q_node);
@ -108,12 +108,12 @@ void _remove_thread_from_ready_q(struct k_thread *thread)
sys_dlist_remove(&thread->base.k_q_node);
if (sys_dlist_is_empty(q)) {
_clear_ready_q_prio_bit(thread->base.prio);
clear_ready_q_prio_bit(thread->base.prio);
}
struct k_thread **cache = &_ready_q.cache;
*cache = *cache == thread ? _get_ready_q_head() : *cache;
*cache = *cache == thread ? get_ready_q_head() : *cache;
#else
# if !defined(CONFIG_SMP)
_ready_q.prio_bmap[0] = 0;
@ -217,7 +217,7 @@ void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout)
#if defined(CONFIG_PREEMPT_ENABLED) && defined(CONFIG_KERNEL_DEBUG)
/* debug aid */
static void _dump_ready_q(void)
static void dump_ready_q(void)
{
K_DEBUG("bitmaps: ");
for (int bitmap = 0; bitmap < K_NUM_PRIO_BITMAPS; bitmap++) {
@ -243,7 +243,7 @@ int __must_switch_threads(void)
_current->base.prio, _get_highest_ready_prio());
#ifdef CONFIG_KERNEL_DEBUG
_dump_ready_q();
dump_ready_q();
#endif /* CONFIG_KERNEL_DEBUG */
return _is_prio_higher(_get_highest_ready_prio(), _current->base.prio);
@ -318,7 +318,7 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread)
# ifndef CONFIG_SMP
struct k_thread **cache = &_ready_q.cache;
*cache = *cache == thread ? _get_ready_q_head() : *cache;
*cache = *cache == thread ? get_ready_q_head() : *cache;
# endif
#endif
}

View file

@ -56,7 +56,7 @@ extern k_thread_stack_t _interrupt_stack2[];
extern k_thread_stack_t _interrupt_stack3[];
#ifdef CONFIG_SMP
static void _smp_init_top(int key, void *arg)
static void smp_init_top(int key, void *arg)
{
atomic_t *start_flag = arg;
@ -90,17 +90,17 @@ void smp_init(void)
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
_arch_start_cpu(1, _interrupt_stack1, CONFIG_ISR_STACK_SIZE,
_smp_init_top, &start_flag);
smp_init_top, &start_flag);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
_arch_start_cpu(2, _interrupt_stack2, CONFIG_ISR_STACK_SIZE,
_smp_init_top, &start_flag);
smp_init_top, &start_flag);
#endif
#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
_arch_start_cpu(3, _interrupt_stack3, CONFIG_ISR_STACK_SIZE,
_smp_init_top, &start_flag);
smp_init_top, &start_flag);
#endif
atomic_set(&start_flag, 1);

View file

@ -304,7 +304,7 @@ void _k_object_uninit(void *object)
ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
}
static u32_t _handler_bad_syscall(u32_t bad_id, u32_t arg2, u32_t arg3,
static u32_t handler_bad_syscall(u32_t bad_id, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
{
printk("Bad system call id %u invoked\n", bad_id);
@ -312,7 +312,7 @@ static u32_t _handler_bad_syscall(u32_t bad_id, u32_t arg2, u32_t arg3,
CODE_UNREACHABLE;
}
static u32_t _handler_no_syscall(u32_t arg1, u32_t arg2, u32_t arg3,
static u32_t handler_no_syscall(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
{
printk("Unimplemented system call\n");

View file

@ -75,7 +75,7 @@ extern u32_t %s(u32_t arg1, u32_t arg2, u32_t arg3,
"""
weak_template = """
__weak ALIAS_OF(_handler_no_syscall)
__weak ALIAS_OF(handler_no_syscall)
u32_t %s(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6, void *ssf);
"""
@ -117,7 +117,7 @@ def main():
handlers.append(handler)
with open(args.syscall_dispatch, "w") as fp:
table_entries.append("[K_SYSCALL_BAD] = _handler_bad_syscall")
table_entries.append("[K_SYSCALL_BAD] = handler_bad_syscall")
weak_defines = "".join([weak_template % name for name in handlers])