coding guidelines: comply with MISRA Rule 12.1.
added parentheses verifying lack of ambiguities Signed-off-by: Hess Nathan <nhess@baumer.com>
This commit is contained in:
parent
2b53c83058
commit
6d417d52c2
12 changed files with 28 additions and 28 deletions
|
@ -17,7 +17,7 @@ static struct z_futex_data *k_futex_find_data(struct k_futex *futex)
|
|||
struct k_object *obj;
|
||||
|
||||
obj = k_object_find(futex);
|
||||
if (obj == NULL || obj->type != K_OBJ_FUTEX) {
|
||||
if ((obj == NULL) || (obj->type != K_OBJ_FUTEX)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2);
|
|||
|
||||
static inline bool _is_valid_prio(int prio, void *entry_point)
|
||||
{
|
||||
if (prio == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) {
|
||||
if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ void k_heap_free(struct k_heap *heap, void *mem)
|
|||
sys_heap_free(&heap->heap, mem);
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, heap);
|
||||
if (IS_ENABLED(CONFIG_MULTITHREADING) && z_unpend_all(&heap->wait_q) != 0) {
|
||||
if (IS_ENABLED(CONFIG_MULTITHREADING) && (z_unpend_all(&heap->wait_q) != 0)) {
|
||||
z_reschedule(&heap->lock, key);
|
||||
} else {
|
||||
k_spin_unlock(&heap->lock, key);
|
||||
|
|
|
@ -225,8 +225,8 @@ int k_mem_domain_remove_partition(struct k_mem_domain *domain,
|
|||
|
||||
/* find a partition that matches the given start and size */
|
||||
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
||||
if (domain->partitions[p_idx].start == part->start &&
|
||||
domain->partitions[p_idx].size == part->size) {
|
||||
if ((domain->partitions[p_idx].start == part->start) &&
|
||||
(domain->partitions[p_idx].size == part->size)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -261,7 +261,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
|
|||
"Invalid memory pointer provided");
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
|
||||
if (slab->free_list == NULL && IS_ENABLED(CONFIG_MULTITHREADING)) {
|
||||
if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) {
|
||||
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
|
||||
|
||||
if (pending_thread != NULL) {
|
||||
|
|
|
@ -581,7 +581,7 @@ void *k_mem_map_impl(uintptr_t phys, size_t size, uint32_t flags, bool is_anon)
|
|||
/* Need extra for the guard pages (before and after) which we
|
||||
* won't map.
|
||||
*/
|
||||
total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
|
||||
total_size = size + (CONFIG_MMU_PAGE_SIZE * 2);
|
||||
|
||||
dst = virt_region_alloc(total_size, CONFIG_MMU_PAGE_SIZE);
|
||||
if (dst == NULL) {
|
||||
|
@ -731,7 +731,7 @@ void k_mem_unmap_impl(void *addr, size_t size, bool is_anon)
|
|||
* region. So we also need to free them from the bitmap.
|
||||
*/
|
||||
pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
|
||||
total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
|
||||
total_size = size + (CONFIG_MMU_PAGE_SIZE * 2);
|
||||
virt_region_free(pos, total_size);
|
||||
|
||||
out:
|
||||
|
|
|
@ -162,8 +162,8 @@ int k_pipe_cleanup(struct k_pipe *pipe)
|
|||
|
||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||
|
||||
CHECKIF(z_waitq_head(&pipe->wait_q.readers) != NULL ||
|
||||
z_waitq_head(&pipe->wait_q.writers) != NULL) {
|
||||
CHECKIF((z_waitq_head(&pipe->wait_q.readers) != NULL) ||
|
||||
(z_waitq_head(&pipe->wait_q.writers) != NULL)) {
|
||||
k_spin_unlock(&pipe->lock, key);
|
||||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, -EAGAIN);
|
||||
|
@ -308,7 +308,7 @@ static size_t pipe_buffer_list_populate(sys_dlist_t *list,
|
|||
static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
|
||||
size_t bytes_requested)
|
||||
{
|
||||
if (bytes_requested - bytes_remaining >= min_xfer) {
|
||||
if ((bytes_requested - bytes_remaining) >= min_xfer) {
|
||||
/*
|
||||
* At least the minimum number of requested
|
||||
* bytes have been transferred.
|
||||
|
@ -394,7 +394,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
|
|||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, put, pipe, timeout);
|
||||
|
||||
CHECKIF((min_xfer > bytes_to_write) || bytes_written == NULL) {
|
||||
CHECKIF((min_xfer > bytes_to_write) || (bytes_written == NULL)) {
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout,
|
||||
-EINVAL);
|
||||
|
||||
|
@ -704,7 +704,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
|||
|
||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, get, pipe, timeout);
|
||||
|
||||
CHECKIF((min_xfer > bytes_to_read) || bytes_read == NULL) {
|
||||
CHECKIF((min_xfer > bytes_to_read) || (bytes_read == NULL)) {
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe,
|
||||
timeout, -EINVAL);
|
||||
|
||||
|
@ -742,7 +742,7 @@ size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
|
|||
k_spinlock_key_t key;
|
||||
|
||||
/* Buffer and size are fixed. No need to spin. */
|
||||
if (pipe->buffer == NULL || pipe->size == 0U) {
|
||||
if ((pipe->buffer == NULL) || (pipe->size == 0U)) {
|
||||
res = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@ -779,7 +779,7 @@ size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
|
|||
k_spinlock_key_t key;
|
||||
|
||||
/* Buffer and size are fixed. No need to spin. */
|
||||
if (pipe->buffer == NULL || pipe->size == 0U) {
|
||||
if ((pipe->buffer == NULL) || (pipe->size == 0U)) {
|
||||
res = 0;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -249,7 +249,7 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
|||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
|
||||
|
||||
/* invalid head or tail of list */
|
||||
CHECKIF(head == NULL || tail == NULL) {
|
||||
CHECKIF((head == NULL) || (tail == NULL)) {
|
||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
|
||||
|
||||
return -EINVAL;
|
||||
|
|
|
@ -140,7 +140,7 @@ static ALWAYS_INLINE struct k_thread *runq_best(void)
|
|||
*/
|
||||
static inline bool should_queue_thread(struct k_thread *thread)
|
||||
{
|
||||
return !IS_ENABLED(CONFIG_SMP) || thread != _current;
|
||||
return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
|
||||
|
@ -272,7 +272,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
|
|||
}
|
||||
|
||||
/* Put _current back into the queue */
|
||||
if (thread != _current && active &&
|
||||
if ((thread != _current) && active &&
|
||||
!z_is_idle_thread_object(_current) && !queued) {
|
||||
queue_thread(_current);
|
||||
}
|
||||
|
@ -1007,7 +1007,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
|
|||
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
||||
|
||||
flag_ipi();
|
||||
if (need_sched && _current->base.sched_locked == 0U) {
|
||||
if (need_sched && (_current->base.sched_locked == 0U)) {
|
||||
z_reschedule_unlocked();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -284,15 +284,15 @@ static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
|
|||
/* Special case: we allow reading the names of initialized threads
|
||||
* even if we don't have permission on them
|
||||
*/
|
||||
if (thread == NULL || ko->type != K_OBJ_THREAD ||
|
||||
(ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
|
||||
if ((thread == NULL) || (ko->type != K_OBJ_THREAD) ||
|
||||
((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
|
||||
return -EFAULT;
|
||||
}
|
||||
len = strlen(thread->name);
|
||||
if (len + 1 > size) {
|
||||
if ((len + 1) > size) {
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,14 +40,14 @@ static struct _timeout *first(void)
|
|||
{
|
||||
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
|
||||
|
||||
return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
|
||||
return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node);
|
||||
}
|
||||
|
||||
static struct _timeout *next(struct _timeout *t)
|
||||
{
|
||||
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
|
||||
|
||||
return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
|
||||
return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node);
|
||||
}
|
||||
|
||||
static void remove_timeout(struct _timeout *t)
|
||||
|
@ -114,7 +114,7 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
|
|||
struct _timeout *t;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
|
||||
Z_TICK_ABS(timeout.ticks) >= 0) {
|
||||
(Z_TICK_ABS(timeout.ticks) >= 0)) {
|
||||
k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
|
||||
|
||||
to->dticks = MAX(1, ticks);
|
||||
|
|
|
@ -391,7 +391,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size)
|
|||
struct k_object *zo;
|
||||
uintptr_t tidx = 0;
|
||||
|
||||
if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
|
||||
if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
|
||||
LOG_ERR("bad object type %d requested", otype);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -586,7 +586,7 @@ static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
|
|||
struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
|
||||
|
||||
if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
|
||||
(struct k_thread *)ko->name != ctx->parent) {
|
||||
((struct k_thread *)ko->name != ctx->parent)) {
|
||||
sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
|
||||
}
|
||||
}
|
||||
|
@ -727,7 +727,7 @@ int k_object_validate(struct k_object *ko, enum k_objects otype,
|
|||
enum _obj_init_check init)
|
||||
{
|
||||
if (unlikely((ko == NULL) ||
|
||||
(otype != K_OBJ_ANY && ko->type != otype))) {
|
||||
((otype != K_OBJ_ANY) && (ko->type != otype)))) {
|
||||
return -EBADF;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue