kernel: make tests of a value against zero should be made explicit
Tests of a value against zero should be made explicit, unless the operand is effectively Boolean. This is based on MISRA rule 14.4. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
0630452890
commit
3f4f3f6c43
6 changed files with 12 additions and 12 deletions
|
@ -48,9 +48,9 @@ __weak void k_sys_fatal_error_handler(unsigned int reason,
|
|||
|
||||
static const char *thread_name_get(struct k_thread *thread)
|
||||
{
|
||||
const char *thread_name = thread ? k_thread_name_get(thread) : NULL;
|
||||
const char *thread_name = (thread != NULL) ? k_thread_name_get(thread) : NULL;
|
||||
|
||||
if (thread_name == NULL || thread_name[0] == '\0') {
|
||||
if ((thread_name == NULL) || (thread_name[0] == '\0')) {
|
||||
thread_name = "unknown";
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ int z_impl_k_futex_wake(struct k_futex *futex, bool wake_all)
|
|||
|
||||
do {
|
||||
thread = z_unpend_first_thread(&futex_data->wait_q);
|
||||
if (thread) {
|
||||
if (thread != NULL) {
|
||||
woken++;
|
||||
arch_thread_return_value_set(thread, 0);
|
||||
z_ready_thread(thread);
|
||||
|
|
|
@ -103,7 +103,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size)
|
|||
heap = _current->resource_pool;
|
||||
}
|
||||
|
||||
if (heap) {
|
||||
if (heap != NULL) {
|
||||
ret = z_heap_aligned_alloc(heap, align, size);
|
||||
} else {
|
||||
ret = NULL;
|
||||
|
|
|
@ -387,7 +387,7 @@ static int signal_poll_event(struct k_poll_event *event, uint32_t state)
|
|||
struct z_poller *poller = event->poller;
|
||||
int retcode = 0;
|
||||
|
||||
if (poller) {
|
||||
if (poller != NULL) {
|
||||
if (poller->mode == MODE_POLL) {
|
||||
retcode = signal_poller(event, state);
|
||||
} else if (poller->mode == MODE_TRIGGERED) {
|
||||
|
|
|
@ -268,7 +268,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
|
|||
* responsible for putting it back in z_swap and ISR return!),
|
||||
* which makes this choice simple.
|
||||
*/
|
||||
return thread ? thread : _current_cpu->idle_thread;
|
||||
return (thread != NULL) ? thread : _current_cpu->idle_thread;
|
||||
#else
|
||||
/* Under SMP, the "cache" mechanism for selecting the next
|
||||
* thread doesn't work, so we have more work to do to test
|
||||
|
@ -781,7 +781,7 @@ void z_thread_priority_set(struct k_thread *thread, int prio)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int resched(uint32_t key)
|
||||
static inline bool resched(uint32_t key)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
_current_cpu->swap_ok = 0;
|
||||
|
@ -1481,7 +1481,7 @@ void z_thread_abort(struct k_thread *thread)
|
|||
{
|
||||
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
|
||||
|
||||
if (thread->base.thread_state & _THREAD_DEAD) {
|
||||
if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
|
||||
k_spin_unlock(&sched_spinlock, key);
|
||||
return;
|
||||
}
|
||||
|
@ -1537,12 +1537,12 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
|
|||
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
|
||||
int ret = 0;
|
||||
|
||||
if (thread->base.thread_state & _THREAD_DEAD) {
|
||||
if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
|
||||
ret = 0;
|
||||
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||
ret = -EBUSY;
|
||||
} else if (thread == _current ||
|
||||
thread->base.pended_on == &_current->join_queue) {
|
||||
} else if ((thread == _current) ||
|
||||
(thread->base.pended_on == &_current->join_queue)) {
|
||||
ret = -EDEADLK;
|
||||
} else {
|
||||
__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
|
||||
|
|
|
@ -844,7 +844,7 @@ bool z_spin_lock_valid(struct k_spinlock *l)
|
|||
{
|
||||
uintptr_t thread_cpu = l->thread_cpu;
|
||||
|
||||
if (thread_cpu) {
|
||||
if (thread_cpu != 0U) {
|
||||
if ((thread_cpu & 3U) == _current_cpu->id) {
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue