kernel: Change k_poll_signal api

k_poll_signal was being used by both, struct and function. Besides
this being extremely error prone it is also a MISRA-C violation.
Changing the function to contain a verb, since it performs an action
and the struct will be a noun. This pattern must be formalized and
followed and across the project.

MISRA-C rules 5.7 and 5.9

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2018-11-02 12:35:30 -07:00 committed by Anas Nashif
commit aecd4ecb8d
13 changed files with 32 additions and 32 deletions

View file

@ -172,8 +172,8 @@ to :c:macro:`K_POLL_STATE_NOT_READY` by the user.
}
}
Using k_poll_signal()
=====================
Using k_poll_signal_raise()
===========================
One of the types of events is :c:macro:`K_POLL_TYPE_SIGNAL`: this is a "direct"
signal to a poll event. This can be seen as a lightweight binary semaphore only
@ -192,7 +192,7 @@ first be initialized either via :c:macro:`K_POLL_SIGNAL_INITIALIZER()` or
k_poll_signal_init(&signal);
}
It is signaled via the :cpp:func:`k_poll_signal()` function. This function
It is signaled via the :cpp:func:`k_poll_signal_raise()` function. This function
takes a user **result** parameter that is opaque to the API and can be used to
pass extra information to the thread waiting on the event.
@ -223,7 +223,7 @@ pass extra information to the thread waiting on the event.
// thread B
void signal_do_stuff(void)
{
k_poll_signal(&signal, 0x1337);
k_poll_signal_raise(&signal, 0x1337);
}
If the signal is to be polled in a loop, *both* its event state and its
@ -290,4 +290,4 @@ The following polling APIs are provided by :file:`kernel.h`:
* :cpp:func:`k_poll_event_init()`
* :cpp:func:`k_poll()`
* :cpp:func:`k_poll_signal_init()`
* :cpp:func:`k_poll_signal()`
* :cpp:func:`k_poll_signal_raise()`

View file

@ -159,7 +159,7 @@ static inline void adc_context_complete(struct adc_context *ctx, int status)
#ifdef CONFIG_ADC_ASYNC
if (ctx->asynchronous) {
if (ctx->signal) {
k_poll_signal(ctx->signal, status);
k_poll_signal_raise(ctx->signal, status);
}
k_sem_give(&ctx->lock);

View file

@ -141,7 +141,7 @@ static inline void spi_context_complete(struct spi_context *ctx, int status)
status = ctx->recv_frames;
}
#endif /* CONFIG_SPI_SLAVE */
k_poll_signal(ctx->signal, status);
k_poll_signal_raise(ctx->signal, status);
}
if (!(ctx->config->operation & SPI_LOCK_ON)) {

View file

@ -4141,7 +4141,7 @@ enum _poll_types_bits {
/* can be used to ignore an event */
_POLL_TYPE_IGNORE,
/* to be signaled by k_poll_signal() */
/* to be signaled by k_poll_signal_raise() */
_POLL_TYPE_SIGNAL,
/* semaphore availability */
@ -4160,7 +4160,7 @@ enum _poll_states_bits {
/* default state when creating event */
_POLL_STATE_NOT_READY,
/* signaled by k_poll_signal() */
/* signaled by k_poll_signal_raise() */
_POLL_STATE_SIGNALED,
/* semaphore is available */
@ -4230,7 +4230,7 @@ struct k_poll_signal {
*/
unsigned int signaled;
/* custom result value passed to k_poll_signal() if needed */
/* custom result value passed to k_poll_signal_raise() if needed */
int result;
};
@ -4365,7 +4365,7 @@ __syscall int k_poll(struct k_poll_event *events, int num_events,
/**
* @brief Initialize a poll signal object.
*
* Ready a poll signal object to be signaled via k_poll_signal().
* Ready a poll signal object to be signaled via k_poll_signal_raise().
*
* @param signal A poll signal.
*
@ -4410,7 +4410,7 @@ __syscall void k_poll_signal_check(struct k_poll_signal *signal,
* made ready to run. A @a result value can be specified.
*
* The poll signal contains a 'signaled' field that, when set by
* k_poll_signal(), stays set until the user sets it back to 0 with
* k_poll_signal_raise(), stays set until the user sets it back to 0 with
* k_poll_signal_reset(). It thus has to be reset by the user before being
* passed again to k_poll() or k_poll() will consider it being signaled, and
* will return immediately.
@ -4423,7 +4423,7 @@ __syscall void k_poll_signal_check(struct k_poll_signal *signal,
* @req K-POLL-001
*/
__syscall int k_poll_signal(struct k_poll_signal *signal, int result);
__syscall int k_poll_signal_raise(struct k_poll_signal *signal, int result);
/**
* @internal

View file

@ -434,7 +434,7 @@ config POLL
bool "Async I/O Framework"
help
Asynchronous notification framework. Enable the k_poll() and
k_poll_signal() APIs. The former can wait on multiple events
k_poll_signal_raise() APIs. The former can wait on multiple events
concurrently, which can be either directly triggered or triggered by
the availability of some kernel objects (semaphores and fifos).

View file

@ -403,7 +403,7 @@ Z_SYSCALL_HANDLER(k_poll_signal_check, signal, signaled, result)
}
#endif
int _impl_k_poll_signal(struct k_poll_signal *signal, int result)
int _impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
{
unsigned int key = irq_lock();
struct k_poll_event *poll_event;
@ -424,10 +424,10 @@ int _impl_k_poll_signal(struct k_poll_signal *signal, int result)
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(k_poll_signal, signal, result)
Z_SYSCALL_HANDLER(k_poll_signal_raise, signal, result)
{
Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
return _impl_k_poll_signal((struct k_poll_signal *)signal, result);
return _impl_k_poll_signal_raise((struct k_poll_signal *)signal, result);
}
Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_poll_signal_reset, K_OBJ_POLL_SIGNAL,
struct k_poll_signal *);

View file

@ -36,7 +36,7 @@ int32_t osSignalSet(osThreadId thread_id, int32_t signals)
thread_def->signal_results |= signals;
irq_unlock(key);
k_poll_signal(thread_def->poll_signal, signals);
k_poll_signal_raise(thread_def->poll_signal, signals);
return sig;
}

View file

@ -226,7 +226,7 @@ static void reset(struct net_buf *buf, struct net_buf **evt)
conn_count = 0;
if (buf) {
atomic_set_bit(&hci_state_mask, HCI_STATE_BIT_RESET);
k_poll_signal(hbuf_signal, 0x0);
k_poll_signal_raise(hbuf_signal, 0x0);
}
#endif
}
@ -336,7 +336,7 @@ static void host_num_completed_packets(struct net_buf *buf,
BT_DBG("FC: acked: %d", count);
hci_hbuf_acked += count;
k_poll_signal(hbuf_signal, 0x0);
k_poll_signal_raise(hbuf_signal, 0x0);
}
#endif

View file

@ -228,7 +228,7 @@ static inline struct net_buf *process_hbuf(struct radio_pdu_node_rx *n)
(class == HCI_CLASS_ACL_DATA && hbuf_count)) {
/* node to process later, schedule an iteration */
BT_DBG("FC: signalling");
k_poll_signal(&hbuf_signal, 0x0);
k_poll_signal_raise(&hbuf_signal, 0x0);
}
return NULL;
}
@ -270,7 +270,7 @@ static inline struct net_buf *process_hbuf(struct radio_pdu_node_rx *n)
* iteration
*/
BT_DBG("FC: signalling");
k_poll_signal(&hbuf_signal, 0x0);
k_poll_signal_raise(&hbuf_signal, 0x0);
}
}
}

View file

@ -1507,7 +1507,7 @@ void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
}
k_fifo_init(&conn->tx_queue);
k_fifo_init(&conn->tx_notify);
k_poll_signal(&conn_change, 0);
k_poll_signal_raise(&conn_change, 0);
sys_slist_init(&conn->channels);
@ -1536,7 +1536,7 @@ void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
}
atomic_set_bit(conn->flags, BT_CONN_CLEANUP);
k_poll_signal(&conn_change, 0);
k_poll_signal_raise(&conn_change, 0);
/* The last ref will be dropped by the tx_thread */
} else if (old_state == BT_CONN_CONNECT) {
/* conn->err will be set in this case */

View file

@ -1130,7 +1130,7 @@ static void shell_transport_evt_handler(enum shell_transport_evt evt_type,
signal = (evt_type == SHELL_TRANSPORT_EVT_RX_RDY) ?
&shell->ctx->signals[SHELL_SIGNAL_RXRDY] :
&shell->ctx->signals[SHELL_SIGNAL_TXDONE];
k_poll_signal(signal, 0);
k_poll_signal_raise(signal, 0);
}
static void shell_current_command_erase(const struct shell *shell)
@ -1338,7 +1338,7 @@ int shell_uninit(const struct shell *shell)
{
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
/* signal kill message */
(void)k_poll_signal(&shell->ctx->signals[SHELL_SIGNAL_KILL], 0);
(void)k_poll_signal_raise(&shell->ctx->signals[SHELL_SIGNAL_KILL], 0);
return 0;
} else {

View file

@ -114,7 +114,7 @@ static void put(const struct log_backend *const backend, struct log_msg *msg)
if (IS_ENABLED(CONFIG_MULTITHREADING)) {
signal = &shell->ctx->signals[SHELL_SIGNAL_LOG_MSG];
k_poll_signal(signal, 0);
k_poll_signal_raise(signal, 0);
}
break;

View file

@ -38,7 +38,7 @@ static __kernel struct k_poll_signal no_wait_signal;
* @ingroup kernel_poll_tests
*
* @see K_POLL_EVENT_INITIALIZER(), k_poll_signal_init(),
* k_poll_signal(), k_poll_signal_check()
* k_poll_signal_raise(), k_poll_signal_check()
*/
void test_poll_no_wait(void)
{
@ -67,7 +67,7 @@ void test_poll_no_wait(void)
/* test polling events that are already ready */
zassert_false(k_fifo_alloc_put(&no_wait_fifo, &msg), NULL);
k_poll_signal(&no_wait_signal, SIGNAL_RESULT);
k_poll_signal_raise(&no_wait_signal, SIGNAL_RESULT);
zassert_equal(k_poll(events, ARRAY_SIZE(events), 0), 0, "");
@ -147,7 +147,7 @@ static void poll_wait_helper(void *use_fifo, void *p2, void *p3)
k_fifo_alloc_put(&wait_fifo, &wait_msg);
}
k_poll_signal(&wait_signal, SIGNAL_RESULT);
k_poll_signal_raise(&wait_signal, SIGNAL_RESULT);
}
/**
@ -537,7 +537,7 @@ static void threadstate(void *p1, void *p2, void *p3)
/* Update polling thread state explicitly to improve code coverage */
k_thread_suspend(p1);
/* Enable polling thread by signalling */
k_poll_signal(&signal, SIGNAL_RESULT);
k_poll_signal_raise(&signal, SIGNAL_RESULT);
k_thread_resume(p1);
}
@ -551,7 +551,7 @@ static void threadstate(void *p1, void *p2, void *p3)
* @ingroup kernel_poll_tests
*
* @see K_POLL_EVENT_INITIALIZER(), k_poll(), k_poll_signal_init(),
* k_poll_signal_check(), k_poll_signal()
* k_poll_signal_check(), k_poll_signal_raise()
*/
void test_poll_threadstate(void)
{