kernel: Make If statement have essentially Boolean type
Make if statement using pointers explicitly check whether the value is NULL or not. The C standard does not say that the null pointer is the same as the pointer to memory address 0 and because of this is a good practice always compare with the macro NULL. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
0ff2ae1fe8
commit
4218d5f8f0
17 changed files with 50 additions and 49 deletions
|
@ -192,7 +192,7 @@ void *k_calloc(size_t nmemb, size_t size)
|
|||
}
|
||||
|
||||
ret = k_malloc(bounds);
|
||||
if (ret) {
|
||||
if (ret != NULL) {
|
||||
(void)memset(ret, 0, bounds);
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -202,7 +202,7 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
|||
|
||||
/* handle first thread waiting to write (if any) */
|
||||
pending_thread = _unpend_first_thread(&q->wait_q);
|
||||
if (pending_thread) {
|
||||
if (pending_thread != NULL) {
|
||||
/* add thread's message to queue */
|
||||
(void)memcpy(q->write_ptr, pending_thread->base.swap_data,
|
||||
q->msg_size);
|
||||
|
|
|
@ -241,7 +241,7 @@ void _impl_k_mutex_unlock(struct k_mutex *mutex)
|
|||
K_DEBUG("new owner of mutex %p: %p (prio: %d)\n",
|
||||
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
|
||||
|
||||
if (new_owner) {
|
||||
if (new_owner != NULL) {
|
||||
_ready_thread(new_owner);
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
@ -499,7 +499,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
|||
* Copy any data to the reader that we left on the wait_q.
|
||||
* It is possible no data will be copied.
|
||||
*/
|
||||
if (reader) {
|
||||
if (reader != NULL) {
|
||||
desc = (struct k_pipe_desc *)reader->base.swap_data;
|
||||
bytes_copied = pipe_xfer(desc->buffer, desc->bytes_to_xfer,
|
||||
data + num_bytes_written,
|
||||
|
@ -649,7 +649,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
|||
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
|
||||
}
|
||||
|
||||
if (writer && (num_bytes_read < bytes_to_read)) {
|
||||
if ((writer != NULL) && (num_bytes_read < bytes_to_read)) {
|
||||
desc = (struct k_pipe_desc *)writer->base.swap_data;
|
||||
bytes_copied = pipe_xfer(data + num_bytes_read,
|
||||
bytes_to_read - num_bytes_read,
|
||||
|
@ -679,7 +679,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
|||
thread = (struct k_thread *)sys_dlist_get(&xfer_list);
|
||||
}
|
||||
|
||||
if (writer) {
|
||||
if (writer != NULL) {
|
||||
desc = (struct k_pipe_desc *)writer->base.swap_data;
|
||||
bytes_copied = pipe_buffer_put(pipe, desc->buffer,
|
||||
desc->bytes_to_xfer);
|
||||
|
|
|
@ -80,8 +80,9 @@ static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
|
|||
struct k_poll_event *pending;
|
||||
|
||||
pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
|
||||
if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread,
|
||||
poller->thread)) {
|
||||
if ((pending == NULL) ||
|
||||
_is_t1_higher_prio_than_t2(pending->poller->thread,
|
||||
poller->thread)) {
|
||||
sys_dlist_append(events, &event->_node);
|
||||
return;
|
||||
}
|
||||
|
@ -358,7 +359,7 @@ void _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
|
|||
struct k_poll_event *poll_event;
|
||||
|
||||
poll_event = (struct k_poll_event *)sys_dlist_get(events);
|
||||
if (poll_event) {
|
||||
if (poll_event != NULL) {
|
||||
(void) signal_poll_event(poll_event, state);
|
||||
}
|
||||
}
|
||||
|
@ -409,7 +410,7 @@ int _impl_k_poll_signal(struct k_poll_signal *signal, int result)
|
|||
signal->signaled = 1;
|
||||
|
||||
poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
|
||||
if (!poll_event) {
|
||||
if (poll_event == NULL) {
|
||||
irq_unlock(key);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ void _impl_k_queue_cancel_wait(struct k_queue *queue)
|
|||
|
||||
first_pending_thread = _unpend_first_thread(&queue->wait_q);
|
||||
|
||||
if (first_pending_thread) {
|
||||
if (first_pending_thread != NULL) {
|
||||
prepare_thread_to_run(first_pending_thread, NULL);
|
||||
}
|
||||
#else
|
||||
|
@ -151,7 +151,7 @@ static int queue_insert(struct k_queue *queue, void *prev, void *data,
|
|||
|
||||
first_pending_thread = _unpend_first_thread(&queue->wait_q);
|
||||
|
||||
if (first_pending_thread) {
|
||||
if (first_pending_thread != NULL) {
|
||||
prepare_thread_to_run(first_pending_thread, data);
|
||||
_reschedule(key);
|
||||
return 0;
|
||||
|
@ -163,7 +163,7 @@ static int queue_insert(struct k_queue *queue, void *prev, void *data,
|
|||
struct alloc_node *anode;
|
||||
|
||||
anode = z_thread_malloc(sizeof(*anode));
|
||||
if (!anode) {
|
||||
if (anode == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
anode->data = data;
|
||||
|
@ -242,7 +242,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|||
head = *(void **)head;
|
||||
}
|
||||
|
||||
if (head) {
|
||||
if (head != NULL) {
|
||||
sys_sflist_append_list(&queue->data_q, head, tail);
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
|
|||
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
|
||||
irq_unlock(key);
|
||||
|
||||
if (!val && timeout != K_FOREVER) {
|
||||
if ((val == NULL) && (timeout != K_FOREVER)) {
|
||||
elapsed = k_uptime_get_32() - start;
|
||||
done = elapsed > timeout;
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ static struct k_thread *next_up(void)
|
|||
|
||||
/* Choose the best thread that is not current */
|
||||
struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq);
|
||||
if (!th) {
|
||||
if (th == NULL) {
|
||||
th = _current_cpu->idle_thread;
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
|
|||
irq_unlock(key);
|
||||
}
|
||||
|
||||
if (wait_q) {
|
||||
if (wait_q != NULL) {
|
||||
#ifdef CONFIG_WAITQ_SCALABLE
|
||||
thread->base.pended_on = wait_q;
|
||||
#endif
|
||||
|
@ -333,7 +333,7 @@ struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
|
|||
{
|
||||
struct k_thread *t = _unpend1_no_timeout(wait_q);
|
||||
|
||||
if (t) {
|
||||
if (t != NULL) {
|
||||
(void)_abort_thread_timeout(t);
|
||||
}
|
||||
|
||||
|
@ -622,7 +622,7 @@ int _is_thread_time_slicing(struct k_thread *thread)
|
|||
LOCKED(&sched_lock) {
|
||||
struct k_thread *next = _priq_run_best(&_kernel.ready_q.runq);
|
||||
|
||||
if (next) {
|
||||
if (next != NULL) {
|
||||
ret = thread->base.prio == next->base.prio;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ static inline void handle_timeouts(s32_t ticks)
|
|||
K_DEBUG("head: %p, delta: %d\n",
|
||||
timeout, timeout ? timeout->delta_ticks_from_prev : -2112);
|
||||
|
||||
if (!next) {
|
||||
if (next == NULL) {
|
||||
irq_unlock(key);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ void _timer_expiration_handler(struct _timeout *t)
|
|||
|
||||
thread = _waitq_head(&timer->wait_q);
|
||||
|
||||
if (!thread) {
|
||||
if (thread == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ void _impl_k_timer_stop(struct k_timer *timer)
|
|||
key = irq_lock();
|
||||
struct k_thread *pending_thread = _unpend1_no_timeout(&timer->wait_q);
|
||||
|
||||
if (pending_thread) {
|
||||
if (pending_thread != NULL) {
|
||||
_ready_thread(pending_thread);
|
||||
}
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ void *_impl_k_object_alloc(enum k_objects otype)
|
|||
"bad object type requested");
|
||||
|
||||
dyn_obj = z_thread_malloc(sizeof(*dyn_obj) + obj_size_get(otype));
|
||||
if (!dyn_obj) {
|
||||
if (dyn_obj == NULL) {
|
||||
SYS_LOG_WRN("could not allocate kernel object");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ void k_object_free(void *obj)
|
|||
|
||||
key = irq_lock();
|
||||
dyn_obj = dyn_object_find(obj);
|
||||
if (dyn_obj) {
|
||||
if (dyn_obj != NULL) {
|
||||
rb_remove(&obj_rb_tree, &dyn_obj->node);
|
||||
sys_dlist_remove(&dyn_obj->obj_list);
|
||||
|
||||
|
@ -275,7 +275,7 @@ void k_object_free(void *obj)
|
|||
}
|
||||
irq_unlock(key);
|
||||
|
||||
if (dyn_obj) {
|
||||
if (dyn_obj != NULL) {
|
||||
k_free(dyn_obj);
|
||||
}
|
||||
}
|
||||
|
@ -286,11 +286,11 @@ struct _k_object *_k_object_find(void *obj)
|
|||
|
||||
ret = _k_object_gperf_find(obj);
|
||||
|
||||
if (!ret) {
|
||||
if (ret == NULL) {
|
||||
struct dyn_obj *dyn_obj;
|
||||
|
||||
dyn_obj = dyn_object_find(obj);
|
||||
if (dyn_obj) {
|
||||
if (dyn_obj != NULL) {
|
||||
ret = &dyn_obj->kobj;
|
||||
}
|
||||
}
|
||||
|
@ -319,7 +319,7 @@ static int thread_index_get(struct k_thread *t)
|
|||
|
||||
ko = _k_object_find(t);
|
||||
|
||||
if (!ko) {
|
||||
if (ko == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -482,7 +482,7 @@ void _impl_k_object_access_grant(void *object, struct k_thread *thread)
|
|||
{
|
||||
struct _k_object *ko = _k_object_find(object);
|
||||
|
||||
if (ko) {
|
||||
if (ko != NULL) {
|
||||
_thread_perms_set(ko, thread);
|
||||
}
|
||||
}
|
||||
|
@ -491,7 +491,7 @@ void k_object_access_revoke(void *object, struct k_thread *thread)
|
|||
{
|
||||
struct _k_object *ko = _k_object_find(object);
|
||||
|
||||
if (ko) {
|
||||
if (ko != NULL) {
|
||||
_thread_perms_clear(ko, thread);
|
||||
}
|
||||
}
|
||||
|
@ -505,7 +505,7 @@ void k_object_access_all_grant(void *object)
|
|||
{
|
||||
struct _k_object *ko = _k_object_find(object);
|
||||
|
||||
if (ko) {
|
||||
if (ko != NULL) {
|
||||
ko->flags |= K_OBJ_FLAG_PUBLIC;
|
||||
}
|
||||
}
|
||||
|
@ -553,7 +553,7 @@ void _k_object_init(void *object)
|
|||
*/
|
||||
|
||||
ko = _k_object_find(object);
|
||||
if (!ko) {
|
||||
if (ko == NULL) {
|
||||
/* Supervisor threads can ignore rules about kernel objects
|
||||
* and may declare them on stacks, etc. Such objects will never
|
||||
* be usable from userspace, but we shouldn't explode.
|
||||
|
@ -569,7 +569,7 @@ void _k_object_recycle(void *object)
|
|||
{
|
||||
struct _k_object *ko = _k_object_find(object);
|
||||
|
||||
if (ko) {
|
||||
if (ko != NULL) {
|
||||
(void)memset(ko->perms, 0, sizeof(ko->perms));
|
||||
_thread_perms_set(ko, k_current_get());
|
||||
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
||||
|
@ -582,7 +582,7 @@ void _k_object_uninit(void *object)
|
|||
|
||||
/* See comments in _k_object_init() */
|
||||
ko = _k_object_find(object);
|
||||
if (!ko) {
|
||||
if (ko == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -605,7 +605,7 @@ void *z_user_alloc_from_copy(void *src, size_t size)
|
|||
}
|
||||
|
||||
dst = z_thread_malloc(size);
|
||||
if (!dst) {
|
||||
if (dst == NULL) {
|
||||
printk("out of thread resource pool memory (%zu)", size);
|
||||
goto out_err;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ static void work_q_main(void *work_q_ptr, void *p2, void *p3)
|
|||
k_work_handler_t handler;
|
||||
|
||||
work = k_queue_get(&work_q->queue, K_FOREVER);
|
||||
if (!work) {
|
||||
if (work == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ void *malloc(size_t size)
|
|||
void *ret;
|
||||
|
||||
ret = sys_mem_pool_alloc(&z_malloc_mem_pool, size);
|
||||
if (!ret) {
|
||||
if (ret == NULL) {
|
||||
errno = ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ void *calloc(size_t nmemb, size_t size)
|
|||
|
||||
ret = malloc(size);
|
||||
|
||||
if (ret) {
|
||||
if (ret != NULL) {
|
||||
(void)memset(ret, 0, size);
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ void *realloc(void *ptr, size_t requested_size)
|
|||
}
|
||||
|
||||
new_ptr = malloc(requested_size);
|
||||
if (!new_ptr) {
|
||||
if (new_ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ long strtol(const char *nptr, char **endptr, register int base)
|
|||
errno = ERANGE;
|
||||
} else if (neg)
|
||||
acc = -acc;
|
||||
if (endptr != 0)
|
||||
if (endptr != NULL)
|
||||
*endptr = (char *)(any ? s - 1 : nptr);
|
||||
return acc;
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ unsigned long strtoul(const char *nptr, char **endptr, register int base)
|
|||
errno = ERANGE;
|
||||
} else if (neg)
|
||||
acc = -acc;
|
||||
if (endptr != 0)
|
||||
if (endptr != NULL)
|
||||
*endptr = (char *)(any ? s - 1 : nptr);
|
||||
return acc;
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ static void *block_alloc(struct sys_mem_pool_base *p, int l, size_t lsz)
|
|||
int key = pool_irq_lock(p);
|
||||
|
||||
block = sys_dlist_get(&p->levels[l].free_list);
|
||||
if (block) {
|
||||
if (block != NULL) {
|
||||
clear_free_bit(p, l, block_num(p, block, lsz));
|
||||
}
|
||||
pool_irq_unlock(p, key);
|
||||
|
@ -258,7 +258,7 @@ int _sys_mem_pool_block_alloc(struct sys_mem_pool_base *p, size_t size,
|
|||
/* Iteratively break the smallest enclosing block... */
|
||||
data = block_alloc(p, free_l, lsizes[free_l]);
|
||||
|
||||
if (!data) {
|
||||
if (data == NULL) {
|
||||
/* This can happen if we race with another allocator.
|
||||
* It's OK, just back out and the timeout code will
|
||||
* retry. Note mild overloading: -EAGAIN isn't for
|
||||
|
@ -337,7 +337,7 @@ void sys_mem_pool_free(void *ptr)
|
|||
struct sys_mem_pool_block *blk;
|
||||
struct sys_mem_pool *p;
|
||||
|
||||
if (!ptr) {
|
||||
if (ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ static void fix_extra_red(struct rbnode **stack, int stacksz)
|
|||
int side = get_side(grandparent, parent);
|
||||
struct rbnode *aunt = get_child(grandparent, !side);
|
||||
|
||||
if (aunt && is_red(aunt)) {
|
||||
if ((aunt != NULL) && is_red(aunt)) {
|
||||
set_color(grandparent, RED);
|
||||
set_color(parent, BLACK);
|
||||
set_color(aunt, BLACK);
|
||||
|
@ -431,7 +431,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node)
|
|||
|
||||
struct rbnode *child = get_child(node, 0);
|
||||
|
||||
if (!child) {
|
||||
if (child == NULL) {
|
||||
child = get_child(node, 1);
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ void rb_remove(struct rbtree *tree, struct rbnode *node)
|
|||
|
||||
void _rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie)
|
||||
{
|
||||
if (node) {
|
||||
if (node != NULL) {
|
||||
_rb_walk(get_child(node, 0), visit_fn, cookie);
|
||||
visit_fn(node, cookie);
|
||||
_rb_walk(get_child(node, 1), visit_fn, cookie);
|
||||
|
@ -557,7 +557,7 @@ struct rbnode *_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f)
|
|||
* it's right subtree if it has a right child
|
||||
*/
|
||||
n = get_child(f->stack[f->top], 1);
|
||||
if (n) {
|
||||
if (n != NULL) {
|
||||
return stack_left_limb(n, f);
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
|||
int i;
|
||||
|
||||
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
|
||||
if (wordlist[i].name) {
|
||||
if (wordlist[i].name != NULL) {
|
||||
func(&wordlist[i], context);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue