kernel: Make both operands of operators of same essential type category

Add a 'U' suffix to values when computing and comparing against
unsigned variables and other related fixes of the same MISRA rule (10.4)

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-03-29 10:03:49 -04:00
commit bbbc38ba8f
15 changed files with 47 additions and 47 deletions

View file

@ -112,7 +112,7 @@ const struct device *z_impl_device_get_binding(const char *name)
/* A null string identifies no device. So does an empty
* string.
*/
if ((name == NULL) || (*name == 0)) {
if ((name == NULL) || (*name == 0U)) {
return NULL;
}
@ -167,7 +167,7 @@ size_t z_device_get_all_static(struct device const **devices)
bool z_device_ready(const struct device *dev)
{
return dev->state->initialized && (dev->state->init_res == 0);
return dev->state->initialized && (dev->state->init_res == 0U);
}
int device_required_foreach(const struct device *dev,

View file

@ -250,7 +250,7 @@ static inline void z_sched_lock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 1, "");
__ASSERT(_current->base.sched_locked != 1U, "");
--_current->base.sched_locked;
@ -263,7 +263,7 @@ static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(!arch_is_in_isr(), "");
__ASSERT(_current->base.sched_locked != 0, "");
__ASSERT(_current->base.sched_locked != 0U, "");
compiler_barrier();

View file

@ -28,7 +28,7 @@
#define Z_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
#define Z_PHYS_RAM_SIZE ((size_t)KB(CONFIG_SRAM_SIZE))
#define Z_PHYS_RAM_END (Z_PHYS_RAM_START + Z_PHYS_RAM_SIZE)
#define Z_NUM_PAGE_FRAMES (Z_PHYS_RAM_SIZE / CONFIG_MMU_PAGE_SIZE)
#define Z_NUM_PAGE_FRAMES (Z_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
/** End virtual address of virtual address space */
#define Z_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
@ -118,27 +118,27 @@ struct z_page_frame {
static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
{
return (pf->flags & Z_PAGE_FRAME_PINNED) != 0;
return (pf->flags & Z_PAGE_FRAME_PINNED) != 0U;
}
static inline bool z_page_frame_is_reserved(struct z_page_frame *pf)
{
return (pf->flags & Z_PAGE_FRAME_RESERVED) != 0;
return (pf->flags & Z_PAGE_FRAME_RESERVED) != 0U;
}
static inline bool z_page_frame_is_mapped(struct z_page_frame *pf)
{
return (pf->flags & Z_PAGE_FRAME_MAPPED) != 0;
return (pf->flags & Z_PAGE_FRAME_MAPPED) != 0U;
}
static inline bool z_page_frame_is_busy(struct z_page_frame *pf)
{
return (pf->flags & Z_PAGE_FRAME_BUSY) != 0;
return (pf->flags & Z_PAGE_FRAME_BUSY) != 0U;
}
static inline bool z_page_frame_is_backed(struct z_page_frame *pf)
{
return (pf->flags & Z_PAGE_FRAME_BACKED) != 0;
return (pf->flags & Z_PAGE_FRAME_BACKED) != 0U;
}
static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
@ -152,12 +152,12 @@ static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
*/
static inline bool z_page_frame_is_available(struct z_page_frame *page)
{
return page->flags == 0;
return page->flags == 0U;
}
static inline void z_assert_phys_aligned(uintptr_t phys)
{
__ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0,
__ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
"physical address 0x%lx is not page-aligned", phys);
(void)phys;
}
@ -193,9 +193,9 @@ static inline struct z_page_frame *z_phys_to_page_frame(uintptr_t phys)
static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
{
__ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0,
__ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
"unaligned addr %p", addr);
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0,
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
"unaligned size %zu", size);
__ASSERT(addr + size > addr,
"region %p size %zu zero or wraps around", addr, size);

View file

@ -332,7 +332,7 @@ sys_rand_fallback:
* those devices without a HWRNG entropy driver.
*/
while (length > 0) {
while (length > 0U) {
uint32_t rndbits;
uint8_t *p_rndbits = (uint8_t *)&rndbits;

View file

@ -343,7 +343,7 @@ void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
}
/* copy message data to buffer, then dispose of message */
if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0)) {
if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
(void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
}
mbox_message_dispose(rx_msg);
@ -370,7 +370,7 @@ static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
if (buffer != NULL) {
/* retrieve data now, then dispose of message */
k_mbox_data_get(rx_msg, buffer);
} else if (rx_msg->size == 0) {
} else if (rx_msg->size == 0U) {
/* there is no data to get, so just dispose of message */
mbox_message_dispose(rx_msg);
} else {

View file

@ -45,7 +45,7 @@ static bool check_add_partition(struct k_mem_domain *domain,
}
#endif
if (part->size == 0) {
if (part->size == 0U) {
LOG_ERR("zero sized partition at %p with base 0x%lx",
part, part->start);
return false;
@ -66,7 +66,7 @@ static bool check_add_partition(struct k_mem_domain *domain,
for (i = 0; i < domain->num_partitions; i++) {
struct k_mem_partition *dpart = &domain->partitions[i];
if (dpart->size == 0) {
if (dpart->size == 0U) {
/* Unused slot */
continue;
}

View file

@ -36,7 +36,7 @@ static int create_free_list(struct k_mem_slab *slab)
/* blocks must be word aligned */
CHECKIF(((slab->block_size | (uintptr_t)slab->buffer) &
(sizeof(void *) - 1)) != 0) {
(sizeof(void *) - 1)) != 0U) {
return -EINVAL;
}

View file

@ -293,8 +293,8 @@ static int map_anon_page(void *addr, uint32_t flags)
{
struct z_page_frame *pf;
uintptr_t phys;
bool lock = (flags & K_MEM_MAP_LOCK) != 0;
bool uninit = (flags & K_MEM_MAP_UNINIT) != 0;
bool lock = (flags & K_MEM_MAP_LOCK) != 0U;
bool uninit = (flags & K_MEM_MAP_UNINIT) != 0U;
pf = free_page_frame_list_get();
if (pf == NULL) {
@ -346,17 +346,17 @@ void *k_mem_map(size_t size, uint32_t flags)
size_t total_size = size;
int ret;
k_spinlock_key_t key;
bool guard = (flags & K_MEM_MAP_GUARD) != 0;
bool guard = (flags & K_MEM_MAP_GUARD) != 0U;
uint8_t *pos;
__ASSERT(!(((flags & K_MEM_PERM_USER) != 0) &&
((flags & K_MEM_MAP_UNINIT) != 0)),
__ASSERT(!(((flags & K_MEM_PERM_USER) != 0U) &&
((flags & K_MEM_MAP_UNINIT) != 0U)),
"user access to anonymous uninitialized pages is forbidden");
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0,
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
"unaligned size %zu passed to %s", size, __func__);
__ASSERT(size != 0, "zero sized memory mapping");
__ASSERT(page_frames_initialized, "%s called too early", __func__);
__ASSERT((flags & K_MEM_CACHE_MASK) == 0,
__ASSERT((flags & K_MEM_CACHE_MASK) == 0U,
"%s does not support explicit cache settings", __func__);
key = k_spin_lock(&z_mm_lock);
@ -406,7 +406,7 @@ size_t k_mem_free_get(void)
ret = z_free_page_count;
k_spin_unlock(&z_mm_lock, key);
return ret * CONFIG_MMU_PAGE_SIZE;
return ret * (size_t)CONFIG_MMU_PAGE_SIZE;
}
/* This may be called from arch early boot code before z_cstart() is invoked.
@ -423,7 +423,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
addr_offset = k_mem_region_align(&aligned_phys, &aligned_size,
phys, size,
CONFIG_MMU_PAGE_SIZE);
__ASSERT(aligned_size != 0, "0-length mapping at 0x%lx", aligned_phys);
__ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_phys);
__ASSERT(aligned_phys < (aligned_phys + (aligned_size - 1)),
"wraparound for physical address 0x%lx (size %zu)",
aligned_phys, aligned_size);
@ -497,7 +497,7 @@ void z_kernel_map_fixup(void)
CONFIG_MMU_PAGE_SIZE);
size_t kobject_size = (size_t)(Z_KERNEL_VIRT_END - kobject_page_begin);
if (kobject_size != 0) {
if (kobject_size != 0U) {
arch_mem_map(kobject_page_begin,
Z_BOOT_VIRT_TO_PHYS(kobject_page_begin),
kobject_size, K_MEM_PERM_RW | K_MEM_CACHE_WB);

View file

@ -105,7 +105,7 @@ int k_msgq_cleanup(struct k_msgq *msgq)
return -EBUSY;
}
if ((msgq->flags & K_MSGQ_FLAG_ALLOC) != 0) {
if ((msgq->flags & K_MSGQ_FLAG_ALLOC) != 0U) {
k_free(msgq->buffer_start);
msgq->flags &= ~K_MSGQ_FLAG_ALLOC;
}
@ -199,7 +199,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
key = k_spin_lock(&msgq->lock);
if (msgq->used_msgs > 0) {
if (msgq->used_msgs > 0U) {
/* take first available message from queue */
(void)memcpy(data, msgq->read_ptr, msgq->msg_size);
msgq->read_ptr += msgq->msg_size;
@ -260,7 +260,7 @@ int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data)
key = k_spin_lock(&msgq->lock);
if (msgq->used_msgs > 0) {
if (msgq->used_msgs > 0U) {
/* take first available message from queue */
(void)memcpy(data, msgq->read_ptr, msgq->msg_size);
result = 0;

View file

@ -116,7 +116,7 @@ int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
void *buffer;
int ret;
if (size != 0) {
if (size != 0U) {
buffer = z_thread_malloc(size);
if (buffer != NULL) {
k_pipe_init(pipe, buffer, size);
@ -150,7 +150,7 @@ int k_pipe_cleanup(struct k_pipe *pipe)
return -EAGAIN;
}
if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0) {
if ((pipe->flags & K_PIPE_FLAG_ALLOC) != 0U) {
k_free(pipe->buffer);
pipe->buffer = NULL;
pipe->flags &= ~K_PIPE_FLAG_ALLOC;
@ -493,7 +493,7 @@ int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)
&& num_bytes_written >= min_xfer
&& min_xfer > 0) {
&& min_xfer > 0U) {
*bytes_written = num_bytes_written;
k_sched_unlock();
return 0;
@ -647,7 +647,7 @@ int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)
&& num_bytes_read >= min_xfer
&& min_xfer > 0) {
&& min_xfer > 0U) {
k_sched_unlock();
*bytes_read = num_bytes_read;
@ -725,7 +725,7 @@ size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
k_spinlock_key_t key;
/* Buffer and size are fixed. No need to spin. */
if (pipe->buffer == NULL || pipe->size == 0) {
if (pipe->buffer == NULL || pipe->size == 0U) {
res = 0;
goto out;
}
@ -762,7 +762,7 @@ size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
k_spinlock_key_t key;
/* Buffer and size are fixed. No need to spin. */
if (pipe->buffer == NULL || pipe->size == 0) {
if (pipe->buffer == NULL || pipe->size == 0U) {
res = 0;
goto out;
}

View file

@ -316,7 +316,7 @@ static inline int z_vrfy_k_poll(struct k_poll_event *events,
/* Validate the events buffer and make a copy of it in an
* allocated kernel-side buffer.
*/
if (Z_SYSCALL_VERIFY(num_events >= 0)) {
if (Z_SYSCALL_VERIFY(num_events >= 0U)) {
ret = -EINVAL;
goto out;
}

View file

@ -235,7 +235,7 @@ void z_requeue_current(struct k_thread *curr)
static inline bool is_aborting(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_ABORTING) != 0;
return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
}
static ALWAYS_INLINE struct k_thread *next_up(void)
@ -776,7 +776,7 @@ void z_thread_priority_set(struct k_thread *thread, int prio)
arch_sched_ipi();
#endif
if (need_sched && _current->base.sched_locked == 0) {
if (need_sched && _current->base.sched_locked == 0U) {
z_reschedule_unlocked();
}
}
@ -837,7 +837,7 @@ void k_sched_unlock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
LOCKED(&sched_spinlock) {
__ASSERT(_current->base.sched_locked != 0, "");
__ASSERT(_current->base.sched_locked != 0U, "");
__ASSERT(!arch_is_in_isr(), "");
++_current->base.sched_locked;
@ -1452,7 +1452,7 @@ static void end_thread(struct k_thread *thread)
/* We hold the lock, and the thread is known not to be running
* anywhere.
*/
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
if ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
thread->base.thread_state |= _THREAD_DEAD;
thread->base.thread_state &= ~_THREAD_ABORTING;
if (z_is_thread_queued(thread)) {

View file

@ -123,7 +123,7 @@ bool z_is_thread_essential(void)
#ifdef CONFIG_SYS_CLOCK_EXISTS
void z_impl_k_busy_wait(uint32_t usec_to_wait)
{
if (usec_to_wait == 0) {
if (usec_to_wait == 0U) {
return;
}

View file

@ -806,7 +806,7 @@ char *z_user_string_alloc_copy(const char *src, size_t maxlen)
* properly.
*/
if (ret != NULL) {
ret[actual_len - 1] = '\0';
ret[actual_len - 1U] = '\0';
}
out:
return ret;

View file

@ -395,7 +395,7 @@ static bool work_flush_locked(struct k_work *work,
struct z_work_flusher *flusher)
{
bool need_flush = (flags_get(&work->flags)
& (K_WORK_QUEUED | K_WORK_RUNNING)) != 0;
& (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
if (need_flush) {
struct k_work_q *queue = work->queue;