coding guidelines: comply with MISRA Rule 12.1.
added parentheses verifying lack of ambiguities Signed-off-by: Hess Nathan <nhess@baumer.com>
This commit is contained in:
parent
2b53c83058
commit
6d417d52c2
12 changed files with 28 additions and 28 deletions
|
@ -17,7 +17,7 @@ static struct z_futex_data *k_futex_find_data(struct k_futex *futex)
|
||||||
struct k_object *obj;
|
struct k_object *obj;
|
||||||
|
|
||||||
obj = k_object_find(futex);
|
obj = k_object_find(futex);
|
||||||
if (obj == NULL || obj->type != K_OBJ_FUTEX) {
|
if ((obj == NULL) || (obj->type != K_OBJ_FUTEX)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,7 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2);
|
||||||
|
|
||||||
static inline bool _is_valid_prio(int prio, void *entry_point)
|
static inline bool _is_valid_prio(int prio, void *entry_point)
|
||||||
{
|
{
|
||||||
if (prio == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) {
|
if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,7 @@ void k_heap_free(struct k_heap *heap, void *mem)
|
||||||
sys_heap_free(&heap->heap, mem);
|
sys_heap_free(&heap->heap, mem);
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, heap);
|
SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, heap);
|
||||||
if (IS_ENABLED(CONFIG_MULTITHREADING) && z_unpend_all(&heap->wait_q) != 0) {
|
if (IS_ENABLED(CONFIG_MULTITHREADING) && (z_unpend_all(&heap->wait_q) != 0)) {
|
||||||
z_reschedule(&heap->lock, key);
|
z_reschedule(&heap->lock, key);
|
||||||
} else {
|
} else {
|
||||||
k_spin_unlock(&heap->lock, key);
|
k_spin_unlock(&heap->lock, key);
|
||||||
|
|
|
@ -225,8 +225,8 @@ int k_mem_domain_remove_partition(struct k_mem_domain *domain,
|
||||||
|
|
||||||
/* find a partition that matches the given start and size */
|
/* find a partition that matches the given start and size */
|
||||||
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
|
||||||
if (domain->partitions[p_idx].start == part->start &&
|
if ((domain->partitions[p_idx].start == part->start) &&
|
||||||
domain->partitions[p_idx].size == part->size) {
|
(domain->partitions[p_idx].size == part->size)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,7 +261,7 @@ void k_mem_slab_free(struct k_mem_slab *slab, void *mem)
|
||||||
"Invalid memory pointer provided");
|
"Invalid memory pointer provided");
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mem_slab, free, slab);
|
||||||
if (slab->free_list == NULL && IS_ENABLED(CONFIG_MULTITHREADING)) {
|
if ((slab->free_list == NULL) && IS_ENABLED(CONFIG_MULTITHREADING)) {
|
||||||
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
|
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
|
||||||
|
|
||||||
if (pending_thread != NULL) {
|
if (pending_thread != NULL) {
|
||||||
|
|
|
@ -581,7 +581,7 @@ void *k_mem_map_impl(uintptr_t phys, size_t size, uint32_t flags, bool is_anon)
|
||||||
/* Need extra for the guard pages (before and after) which we
|
/* Need extra for the guard pages (before and after) which we
|
||||||
* won't map.
|
* won't map.
|
||||||
*/
|
*/
|
||||||
total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
|
total_size = size + (CONFIG_MMU_PAGE_SIZE * 2);
|
||||||
|
|
||||||
dst = virt_region_alloc(total_size, CONFIG_MMU_PAGE_SIZE);
|
dst = virt_region_alloc(total_size, CONFIG_MMU_PAGE_SIZE);
|
||||||
if (dst == NULL) {
|
if (dst == NULL) {
|
||||||
|
@ -731,7 +731,7 @@ void k_mem_unmap_impl(void *addr, size_t size, bool is_anon)
|
||||||
* region. So we also need to free them from the bitmap.
|
* region. So we also need to free them from the bitmap.
|
||||||
*/
|
*/
|
||||||
pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
|
pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE;
|
||||||
total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
|
total_size = size + (CONFIG_MMU_PAGE_SIZE * 2);
|
||||||
virt_region_free(pos, total_size);
|
virt_region_free(pos, total_size);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -162,8 +162,8 @@ int k_pipe_cleanup(struct k_pipe *pipe)
|
||||||
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
|
||||||
|
|
||||||
CHECKIF(z_waitq_head(&pipe->wait_q.readers) != NULL ||
|
CHECKIF((z_waitq_head(&pipe->wait_q.readers) != NULL) ||
|
||||||
z_waitq_head(&pipe->wait_q.writers) != NULL) {
|
(z_waitq_head(&pipe->wait_q.writers) != NULL)) {
|
||||||
k_spin_unlock(&pipe->lock, key);
|
k_spin_unlock(&pipe->lock, key);
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, -EAGAIN);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, cleanup, pipe, -EAGAIN);
|
||||||
|
@ -308,7 +308,7 @@ static size_t pipe_buffer_list_populate(sys_dlist_t *list,
|
||||||
static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
|
static int pipe_return_code(size_t min_xfer, size_t bytes_remaining,
|
||||||
size_t bytes_requested)
|
size_t bytes_requested)
|
||||||
{
|
{
|
||||||
if (bytes_requested - bytes_remaining >= min_xfer) {
|
if ((bytes_requested - bytes_remaining) >= min_xfer) {
|
||||||
/*
|
/*
|
||||||
* At least the minimum number of requested
|
* At least the minimum number of requested
|
||||||
* bytes have been transferred.
|
* bytes have been transferred.
|
||||||
|
@ -394,7 +394,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data,
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, put, pipe, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, put, pipe, timeout);
|
||||||
|
|
||||||
CHECKIF((min_xfer > bytes_to_write) || bytes_written == NULL) {
|
CHECKIF((min_xfer > bytes_to_write) || (bytes_written == NULL)) {
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout,
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, put, pipe, timeout,
|
||||||
-EINVAL);
|
-EINVAL);
|
||||||
|
|
||||||
|
@ -704,7 +704,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
||||||
|
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, get, pipe, timeout);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_pipe, get, pipe, timeout);
|
||||||
|
|
||||||
CHECKIF((min_xfer > bytes_to_read) || bytes_read == NULL) {
|
CHECKIF((min_xfer > bytes_to_read) || (bytes_read == NULL)) {
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe,
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_pipe, get, pipe,
|
||||||
timeout, -EINVAL);
|
timeout, -EINVAL);
|
||||||
|
|
||||||
|
@ -742,7 +742,7 @@ size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
|
|
||||||
/* Buffer and size are fixed. No need to spin. */
|
/* Buffer and size are fixed. No need to spin. */
|
||||||
if (pipe->buffer == NULL || pipe->size == 0U) {
|
if ((pipe->buffer == NULL) || (pipe->size == 0U)) {
|
||||||
res = 0;
|
res = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -779,7 +779,7 @@ size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
|
|
||||||
/* Buffer and size are fixed. No need to spin. */
|
/* Buffer and size are fixed. No need to spin. */
|
||||||
if (pipe->buffer == NULL || pipe->size == 0U) {
|
if ((pipe->buffer == NULL) || (pipe->size == 0U)) {
|
||||||
res = 0;
|
res = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -249,7 +249,7 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
|
||||||
|
|
||||||
/* invalid head or tail of list */
|
/* invalid head or tail of list */
|
||||||
CHECKIF(head == NULL || tail == NULL) {
|
CHECKIF((head == NULL) || (tail == NULL)) {
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
|
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -140,7 +140,7 @@ static ALWAYS_INLINE struct k_thread *runq_best(void)
|
||||||
*/
|
*/
|
||||||
static inline bool should_queue_thread(struct k_thread *thread)
|
static inline bool should_queue_thread(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
return !IS_ENABLED(CONFIG_SMP) || thread != _current;
|
return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
|
static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
|
||||||
|
@ -272,7 +272,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Put _current back into the queue */
|
/* Put _current back into the queue */
|
||||||
if (thread != _current && active &&
|
if ((thread != _current) && active &&
|
||||||
!z_is_idle_thread_object(_current) && !queued) {
|
!z_is_idle_thread_object(_current) && !queued) {
|
||||||
queue_thread(_current);
|
queue_thread(_current);
|
||||||
}
|
}
|
||||||
|
@ -1007,7 +1007,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
|
||||||
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
||||||
|
|
||||||
flag_ipi();
|
flag_ipi();
|
||||||
if (need_sched && _current->base.sched_locked == 0U) {
|
if (need_sched && (_current->base.sched_locked == 0U)) {
|
||||||
z_reschedule_unlocked();
|
z_reschedule_unlocked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -284,15 +284,15 @@ static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
|
||||||
/* Special case: we allow reading the names of initialized threads
|
/* Special case: we allow reading the names of initialized threads
|
||||||
* even if we don't have permission on them
|
* even if we don't have permission on them
|
||||||
*/
|
*/
|
||||||
if (thread == NULL || ko->type != K_OBJ_THREAD ||
|
if ((thread == NULL) || (ko->type != K_OBJ_THREAD) ||
|
||||||
(ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
|
((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
|
if (K_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
len = strlen(thread->name);
|
len = strlen(thread->name);
|
||||||
if (len + 1 > size) {
|
if ((len + 1) > size) {
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,14 +40,14 @@ static struct _timeout *first(void)
|
||||||
{
|
{
|
||||||
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
|
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
|
||||||
|
|
||||||
return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
|
return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct _timeout *next(struct _timeout *t)
|
static struct _timeout *next(struct _timeout *t)
|
||||||
{
|
{
|
||||||
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
|
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
|
||||||
|
|
||||||
return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
|
return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void remove_timeout(struct _timeout *t)
|
static void remove_timeout(struct _timeout *t)
|
||||||
|
@ -114,7 +114,7 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
|
||||||
struct _timeout *t;
|
struct _timeout *t;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
|
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
|
||||||
Z_TICK_ABS(timeout.ticks) >= 0) {
|
(Z_TICK_ABS(timeout.ticks) >= 0)) {
|
||||||
k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
|
k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
|
||||||
|
|
||||||
to->dticks = MAX(1, ticks);
|
to->dticks = MAX(1, ticks);
|
||||||
|
|
|
@ -391,7 +391,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size)
|
||||||
struct k_object *zo;
|
struct k_object *zo;
|
||||||
uintptr_t tidx = 0;
|
uintptr_t tidx = 0;
|
||||||
|
|
||||||
if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
|
if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
|
||||||
LOG_ERR("bad object type %d requested", otype);
|
LOG_ERR("bad object type %d requested", otype);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -586,7 +586,7 @@ static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
|
||||||
struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
|
struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
|
||||||
|
|
||||||
if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
|
if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
|
||||||
(struct k_thread *)ko->name != ctx->parent) {
|
((struct k_thread *)ko->name != ctx->parent)) {
|
||||||
sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
|
sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -727,7 +727,7 @@ int k_object_validate(struct k_object *ko, enum k_objects otype,
|
||||||
enum _obj_init_check init)
|
enum _obj_init_check init)
|
||||||
{
|
{
|
||||||
if (unlikely((ko == NULL) ||
|
if (unlikely((ko == NULL) ||
|
||||||
(otype != K_OBJ_ANY && ko->type != otype))) {
|
((otype != K_OBJ_ANY) && (ko->type != otype)))) {
|
||||||
return -EBADF;
|
return -EBADF;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue