arch: x86: Make statements evaluate boolean expressions

MISRA-C requires that the if statement has essentially Boolean type.

MISRA-C rule 14.4

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2018-12-16 19:26:27 -08:00 committed by Anas Nashif
commit 6d50df212f
5 changed files with 13 additions and 13 deletions

View file

@ -95,7 +95,7 @@ void k_cpu_atomic_idle(unsigned int key)
"hlt\n\t");
/* restore interrupt lockout state before returning to caller */
if (!(key & 0x200)) {
if ((key & 0x200) == 0) {
_int_latency_start();
__asm__ volatile("cli");
}

View file

@ -40,7 +40,7 @@ static void unwind_stack(u32_t base_ptr)
struct stack_frame *frame;
int i;
if (!base_ptr) {
if (base_ptr == 0) {
printk("NULL base ptr\n");
return;
}
@ -52,7 +52,7 @@ static void unwind_stack(u32_t base_ptr)
}
frame = (struct stack_frame *)base_ptr;
if (!frame || !frame->ret_addr) {
if ((frame == NULL) || (frame->ret_addr == 0)) {
break;
}
#ifdef CONFIG_X86_IAMCU
@ -231,7 +231,7 @@ static FUNC_NORETURN void generic_exc_handle(unsigned int vector,
printk("CPU exception %d\n", vector);
break;
}
if (BIT(vector) & _EXC_ERROR_CODE_FAULTS) {
if ((BIT(vector) & _EXC_ERROR_CODE_FAULTS) != 0) {
printk("***** Exception code: 0x%x\n", pEsf->errorCode);
}
_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf);
@ -422,7 +422,7 @@ static FUNC_NORETURN __used void _df_handler_bottom(void)
* wouldn't be decremented
*/
_x86_mmu_get_flags((u8_t *)_df_esf.esp - 1, &pde_flags, &pte_flags);
if (pte_flags & MMU_ENTRY_PRESENT) {
if ((pte_flags & MMU_ENTRY_PRESENT) != 0) {
printk("***** Double Fault *****\n");
reason = _NANO_ERR_CPU_EXCEPTION;
} else {

View file

@ -60,7 +60,7 @@ extern u32_t _sse_mxcsr_default_value;
static void _FpCtxSave(struct k_thread *thread)
{
#ifdef CONFIG_SSE
if (thread->base.user_options & K_SSE_REGS) {
if ((thread->base.user_options & K_SSE_REGS) != 0) {
_do_fp_and_sse_regs_save(&thread->arch.preempFloatReg);
return;
}
@ -78,7 +78,7 @@ static inline void _FpCtxInit(struct k_thread *thread)
{
_do_fp_regs_init();
#ifdef CONFIG_SSE
if (thread->base.user_options & K_SSE_REGS) {
if ((thread->base.user_options & K_SSE_REGS) != 0) {
_do_sse_regs_init();
}
#endif
@ -121,8 +121,8 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
*/
fp_owner = _kernel.current_fp;
if (fp_owner) {
if (fp_owner->base.thread_state & _INT_OR_EXC_MASK) {
if (fp_owner != NULL) {
if ((fp_owner->base.thread_state & _INT_OR_EXC_MASK) != 0) {
_FpCtxSave(fp_owner);
}
}

View file

@ -72,7 +72,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
_new_thread_init(thread, stack_buf, stack_size, priority, options);
#if CONFIG_X86_USERSPACE
if (!(options & K_USER)) {
if ((options & K_USER) == 0) {
/* Running in kernel mode, kernel stack region is also a guard
* page */
_x86_mmu_set_flags((void *)(stack_buf - MMU_PAGE_SIZE),
@ -99,7 +99,7 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* initial EFLAGS; only modify IF and IOPL bits */
initial_frame->eflags = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
#ifdef CONFIG_X86_USERSPACE
if (options & K_USER) {
if ((options & K_USER) != 0) {
#ifdef _THREAD_WRAPPER_REQUIRED
initial_frame->edi = (u32_t)_arch_user_mode_enter;
initial_frame->thread_entry = _x86_thread_entry_wrapper;

View file

@ -46,7 +46,7 @@ void _x86_mmu_get_flags(void *addr,
*pde_flags = (x86_page_entry_data_t)(X86_MMU_GET_PDE(addr)->value &
~(x86_page_entry_data_t)MMU_PDE_PAGE_TABLE_MASK);
if (*pde_flags & MMU_ENTRY_PRESENT) {
if ((*pde_flags & MMU_ENTRY_PRESENT) != 0) {
*pte_flags = (x86_page_entry_data_t)
(X86_MMU_GET_PTE(addr)->value &
~(x86_page_entry_data_t)MMU_PTE_PAGE_MASK);
@ -191,7 +191,7 @@ void _x86_mmu_set_flags(void *ptr,
__ASSERT(!(addr & MMU_PAGE_MASK), "unaligned address provided");
__ASSERT(!(size & MMU_PAGE_MASK), "unaligned size provided");
while (size) {
while (size != 0) {
#ifdef CONFIG_X86_PAE_MODE
/* TODO we're not generating 2MB entries at the moment */