arch: x86: Use proper essential types in operands

MISRA defines a serie of essential types, boolean, signed/unsigned
integers, float, ... and operations must respect these essential types.

MISRA-C rule 10.1

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2019-03-10 21:48:48 -07:00 committed by Anas Nashif
commit 39a50f6392
10 changed files with 91 additions and 91 deletions

View file

@ -95,7 +95,7 @@ void k_cpu_atomic_idle(unsigned int key)
"hlt\n\t"); "hlt\n\t");
/* restore interrupt lockout state before returning to caller */ /* restore interrupt lockout state before returning to caller */
if ((key & 0x200) == 0) { if ((key & 0x200U) == 0) {
z_int_latency_start(); z_int_latency_start();
__asm__ volatile("cli"); __asm__ volatile("cli");
} }

View file

@ -47,7 +47,7 @@ static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs)
/* We were servicing an interrupt */ /* We were servicing an interrupt */
start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack); start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
end = start + CONFIG_ISR_STACK_SIZE; end = start + CONFIG_ISR_STACK_SIZE;
} else if ((cs & 0x3) != 0 || } else if ((cs & 0x3U) != 0 ||
(_current->base.user_options & K_USER) == 0) { (_current->base.user_options & K_USER) == 0) {
/* Thread was in user mode, or is not a user mode thread. /* Thread was in user mode, or is not a user mode thread.
* The normal stack buffer is what we will check. * The normal stack buffer is what we will check.
@ -195,7 +195,7 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
k_current_get(), k_current_get(),
pEsf->eax, pEsf->ebx, pEsf->ecx, pEsf->edx, pEsf->eax, pEsf->ebx, pEsf->ecx, pEsf->edx,
pEsf->esi, pEsf->edi, pEsf->ebp, pEsf->esp, pEsf->esi, pEsf->edi, pEsf->ebp, pEsf->esp,
pEsf->eflags, pEsf->cs & 0xFFFF, pEsf->eip); pEsf->eflags, pEsf->cs & 0xFFFFU, pEsf->eip);
#ifdef CONFIG_EXCEPTION_STACK_TRACE #ifdef CONFIG_EXCEPTION_STACK_TRACE
unwind_stack(pEsf->ebp, pEsf->cs); unwind_stack(pEsf->ebp, pEsf->cs);
#endif #endif
@ -372,9 +372,9 @@ static void dump_page_fault(NANO_ESF *esf)
printk("***** CPU Page Fault (error code 0x%08x)\n", err); printk("***** CPU Page Fault (error code 0x%08x)\n", err);
printk("%s thread %s address 0x%08x\n", printk("%s thread %s address 0x%08x\n",
err & US ? "User" : "Supervisor", (err & US) != 0 ? "User" : "Supervisor",
err & ID ? "executed" : (err & WR ? "wrote" : "read"), (err & ID) != 0 ? "executed" : ((err & WR) != 0 ? "wrote" :
cr2); "read"), cr2);
#ifdef CONFIG_X86_MMU #ifdef CONFIG_X86_MMU
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI

View file

@ -83,7 +83,7 @@ void z_arch_isr_direct_footer(int swap)
* 2) We are not in a nested interrupt * 2) We are not in a nested interrupt
* 3) Next thread to run in the ready queue is not this thread * 3) Next thread to run in the ready queue is not this thread
*/ */
if (swap && !_kernel.nested && if (swap != 0 && _kernel.nested == 0 &&
_kernel.ready_q.cache != _current) { _kernel.ready_q.cache != _current) {
unsigned int flags; unsigned int flags;
@ -163,7 +163,7 @@ static unsigned int priority_to_free_vector(unsigned int requested_priority)
unsigned int vector_block; unsigned int vector_block;
unsigned int vector; unsigned int vector;
static unsigned int mask[2] = {0x0000ffff, 0xffff0000}; static unsigned int mask[2] = {0x0000ffffU, 0xffff0000U};
vector_block = requested_priority + 2; vector_block = requested_priority + 2;

View file

@ -44,7 +44,7 @@ static int spec_ctrl_init(struct device *dev)
{ {
ARG_UNUSED(dev); ARG_UNUSED(dev);
u32_t enable_bits = 0; u32_t enable_bits = 0U;
u32_t cpuid7 = cpuid_extended_features(); u32_t cpuid7 = cpuid_extended_features();
#ifdef CONFIG_DISABLE_SSBD #ifdef CONFIG_DISABLE_SSBD

View file

@ -109,9 +109,9 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
union x86_mmu_pde_pt pde_value = union x86_mmu_pde_pt pde_value =
pd_address->entry[pde].pt; pd_address->entry[pde].pt;
if (!pde_value.p || if ((pde_value.p) == 0 ||
!pde_value.us || (pde_value.us) == 0 ||
(write && !pde_value.rw)) { ((write != 0) && (pde_value.rw == 0))) {
goto out; goto out;
} }
@ -140,7 +140,7 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
starting_pte_num = 0U; starting_pte_num = 0U;
} }
pte_value.value = 0xFFFFFFFF; pte_value.value = 0xFFFFFFFFU;
/* Bitwise AND all the pte values. /* Bitwise AND all the pte values.
* An optimization done to make sure a compare is * An optimization done to make sure a compare is
@ -153,9 +153,9 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
pte_address->entry[pte].value; pte_address->entry[pte].value;
} }
if (!pte_value.p || if ((pte_value.p) == 0 ||
!pte_value.us || (pte_value.us) == 0 ||
(write && !pte_value.rw)) { ((write != 0) && (pte_value.rw == 0))) {
goto out; goto out;
} }
} }
@ -189,8 +189,8 @@ void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
u32_t addr = (u32_t)ptr; u32_t addr = (u32_t)ptr;
__ASSERT(!(addr & MMU_PAGE_MASK), "unaligned address provided"); __ASSERT((addr & MMU_PAGE_MASK) == 0U, "unaligned address provided");
__ASSERT(!(size & MMU_PAGE_MASK), "unaligned size provided"); __ASSERT((size & MMU_PAGE_MASK) == 0U, "unaligned size provided");
/* L1TF mitigation: non-present PTEs will have address fields /* L1TF mitigation: non-present PTEs will have address fields
* zeroed. Expand the mask to include address bits if we are changing * zeroed. Expand the mask to include address bits if we are changing

View file

@ -383,8 +383,8 @@
* All other "flags" = Don't change state * All other "flags" = Don't change state
*/ */
#define EFLAGS_INITIAL 0x00000200 #define EFLAGS_INITIAL 0x00000200LLU
#define EFLAGS_MASK 0x00003200 #define EFLAGS_MASK 0x00003200ULL
/* Enable paging and write protection */ /* Enable paging and write protection */
#define CR0_PG_WP_ENABLE 0x80010000 #define CR0_PG_WP_ENABLE 0x80010000

View file

@ -21,43 +21,43 @@
* x86_mmu_pde_pt structure. * x86_mmu_pde_pt structure.
*/ */
#define MMU_PDE_P_MASK 0x00000001 #define MMU_PDE_P_MASK 0x00000001ULL
#define MMU_PDE_RW_MASK 0x00000002 #define MMU_PDE_RW_MASK 0x00000002ULL
#define MMU_PDE_US_MASK 0x00000004 #define MMU_PDE_US_MASK 0x00000004ULL
#define MMU_PDE_PWT_MASK 0x00000008 #define MMU_PDE_PWT_MASK 0x00000008ULL
#define MMU_PDE_PCD_MASK 0x00000010 #define MMU_PDE_PCD_MASK 0x00000010ULL
#define MMU_PDE_A_MASK 0x00000020 #define MMU_PDE_A_MASK 0x00000020ULL
#define MMU_PDE_PS_MASK 0x00000080 #define MMU_PDE_PS_MASK 0x00000080ULL
#define MMU_PDE_IGNORED_MASK 0x00000F40 #define MMU_PDE_IGNORED_MASK 0x00000F40ULL
#define MMU_PDE_XD_MASK 0x8000000000000000ULL #define MMU_PDE_XD_MASK 0x8000000000000000ULL
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000ULL #define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000ULL
#define MMU_PDE_NUM_SHIFT 21 #define MMU_PDE_NUM_SHIFT 21U
#define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ff) #define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ffU)
#define MMU_ENTRIES_PER_PGT 512 #define MMU_ENTRIES_PER_PGT 512U
#define MMU_PDPTE_NUM_SHIFT 30 #define MMU_PDPTE_NUM_SHIFT 30U
#define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3) #define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3U)
/* /*
* The following bitmasks correspond to the bit-fields in the * The following bitmasks correspond to the bit-fields in the
* x86_mmu_pde_2mb structure. * x86_mmu_pde_2mb structure.
*/ */
#define MMU_2MB_PDE_P_MASK 0x00000001 #define MMU_2MB_PDE_P_MASK 0x00000001ULL
#define MMU_2MB_PDE_RW_MASK 0x00000002 #define MMU_2MB_PDE_RW_MASK 0x00000002ULL
#define MMU_2MB_PDE_US_MASK 0x00000004 #define MMU_2MB_PDE_US_MASK 0x00000004ULL
#define MMU_2MB_PDE_PWT_MASK 0x00000008 #define MMU_2MB_PDE_PWT_MASK 0x00000008ULL
#define MMU_2MB_PDE_PCD_MASK 0x00000010 #define MMU_2MB_PDE_PCD_MASK 0x00000010ULL
#define MMU_2MB_PDE_A_MASK 0x00000020 #define MMU_2MB_PDE_A_MASK 0x00000020ULL
#define MMU_2MB_PDE_D_MASK 0x00000040 #define MMU_2MB_PDE_D_MASK 0x00000040ULL
#define MMU_2MB_PDE_PS_MASK 0x00000080 #define MMU_2MB_PDE_PS_MASK 0x00000080ULL
#define MMU_2MB_PDE_G_MASK 0x00000100 #define MMU_2MB_PDE_G_MASK 0x00000100ULL
#define MMU_2MB_PDE_IGNORED_MASK 0x00380e00 #define MMU_2MB_PDE_IGNORED_MASK 0x00380e00ULL
#define MMU_2MB_PDE_PAT_MASK 0x00001000 #define MMU_2MB_PDE_PAT_MASK 0x00001000ULL
#define MMU_2MB_PDE_PAGE_TABLE_MASK 0x0007e000 #define MMU_2MB_PDE_PAGE_TABLE_MASK 0x0007e000ULL
#define MMU_2MB_PDE_PAGE_MASK 0xffc00000 #define MMU_2MB_PDE_PAGE_MASK 0xffc00000ULL
#define MMU_2MB_PDE_CLEAR_PS 0x00000000 #define MMU_2MB_PDE_CLEAR_PS 0x00000000ULL
#define MMU_2MB_PDE_SET_PS 0x00000080 #define MMU_2MB_PDE_SET_PS 0x00000080ULL
/* /*
@ -65,21 +65,21 @@
* x86_mmu_pte structure. * x86_mmu_pte structure.
*/ */
#define MMU_PTE_P_MASK 0x00000001 #define MMU_PTE_P_MASK 0x00000001ULL
#define MMU_PTE_RW_MASK 0x00000002 #define MMU_PTE_RW_MASK 0x00000002ULL
#define MMU_PTE_US_MASK 0x00000004 #define MMU_PTE_US_MASK 0x00000004ULL
#define MMU_PTE_PWT_MASK 0x00000008 #define MMU_PTE_PWT_MASK 0x00000008ULL
#define MMU_PTE_PCD_MASK 0x00000010 #define MMU_PTE_PCD_MASK 0x00000010ULL
#define MMU_PTE_A_MASK 0x00000020 #define MMU_PTE_A_MASK 0x00000020ULL
#define MMU_PTE_D_MASK 0x00000040 #define MMU_PTE_D_MASK 0x00000040ULL
#define MMU_PTE_PAT_MASK 0x00000080 #define MMU_PTE_PAT_MASK 0x00000080ULL
#define MMU_PTE_G_MASK 0x00000100 #define MMU_PTE_G_MASK 0x00000100ULL
#define MMU_PTE_ALLOC_MASK 0x00000200 #define MMU_PTE_ALLOC_MASK 0x00000200ULL
#define MMU_PTE_CUSTOM_MASK 0x00000c00 #define MMU_PTE_CUSTOM_MASK 0x00000c00ULL
#define MMU_PTE_XD_MASK 0x8000000000000000ULL #define MMU_PTE_XD_MASK 0x8000000000000000ULL
#define MMU_PTE_PAGE_MASK 0x00000000fffff000ULL #define MMU_PTE_PAGE_MASK 0x00000000fffff000ULL
#define MMU_PTE_MASK_ALL 0xffffffffffffffffULL #define MMU_PTE_MASK_ALL 0xffffffffffffffffULL
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ff) #define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ffU)
#define MMU_PAGE_NUM_SHIFT 12 #define MMU_PAGE_NUM_SHIFT 12
/* /*
@ -87,32 +87,32 @@
* unuse of various options in a PTE or PDE as appropriate. * unuse of various options in a PTE or PDE as appropriate.
*/ */
#define MMU_ENTRY_NOT_PRESENT 0x00000000 #define MMU_ENTRY_NOT_PRESENT 0x00000000ULL
#define MMU_ENTRY_PRESENT 0x00000001 #define MMU_ENTRY_PRESENT 0x00000001ULL
#define MMU_ENTRY_READ 0x00000000 #define MMU_ENTRY_READ 0x00000000ULL
#define MMU_ENTRY_WRITE 0x00000002 #define MMU_ENTRY_WRITE 0x00000002ULL
#define MMU_ENTRY_SUPERVISOR 0x00000000 #define MMU_ENTRY_SUPERVISOR 0x00000000ULL
#define MMU_ENTRY_USER 0x00000004 #define MMU_ENTRY_USER 0x00000004ULL
#define MMU_ENTRY_WRITE_BACK 0x00000000 #define MMU_ENTRY_WRITE_BACK 0x00000000ULL
#define MMU_ENTRY_WRITE_THROUGH 0x00000008 #define MMU_ENTRY_WRITE_THROUGH 0x00000008ULL
#define MMU_ENTRY_CACHING_ENABLE 0x00000000 #define MMU_ENTRY_CACHING_ENABLE 0x00000000ULL
#define MMU_ENTRY_CACHING_DISABLE 0x00000010 #define MMU_ENTRY_CACHING_DISABLE 0x00000010ULL
#define MMU_ENTRY_NOT_ACCESSED 0x00000000 #define MMU_ENTRY_NOT_ACCESSED 0x00000000ULL
#define MMU_ENTRY_ACCESSED 0x00000020 #define MMU_ENTRY_ACCESSED 0x00000020ULL
#define MMU_ENTRY_NOT_DIRTY 0x00000000 #define MMU_ENTRY_NOT_DIRTY 0x00000000ULL
#define MMU_ENTRY_DIRTY 0x00000040 #define MMU_ENTRY_DIRTY 0x00000040ULL
#define MMU_ENTRY_NOT_GLOBAL 0x00000000 #define MMU_ENTRY_NOT_GLOBAL 0x00000000ULL
#define MMU_ENTRY_GLOBAL 0x00000100 #define MMU_ENTRY_GLOBAL 0x00000100ULL
#define MMU_ENTRY_NOT_ALLOC 0x00000000 #define MMU_ENTRY_NOT_ALLOC 0x00000000ULL
#define MMU_ENTRY_ALLOC 0x00000200 #define MMU_ENTRY_ALLOC 0x00000200ULL
#define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000ULL #define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000ULL
@ -126,7 +126,7 @@
* to the region will be set, even if the boot configuration has no user pages * to the region will be set, even if the boot configuration has no user pages
* in it. * in it.
*/ */
#define MMU_ENTRY_RUNTIME_USER 0x10000000 #define MMU_ENTRY_RUNTIME_USER 0x10000000ULL
/* Indicates that pages within this region may have their read/write /* Indicates that pages within this region may have their read/write
* permissions adjusted at runtime. Unnecessary if MMU_ENTRY_WRITE is already * permissions adjusted at runtime. Unnecessary if MMU_ENTRY_WRITE is already
@ -136,7 +136,7 @@
* referring to the region will be set, even if the boot configuration has no * referring to the region will be set, even if the boot configuration has no
* writable pages in it. * writable pages in it.
*/ */
#define MMU_ENTRY_RUNTIME_WRITE 0x20000000 #define MMU_ENTRY_RUNTIME_WRITE 0x20000000ULL
/* Helper macros to ease the usage of the MMU page table structures. /* Helper macros to ease the usage of the MMU page table structures.

View file

@ -200,7 +200,7 @@ void cstart(unsigned int magic, unsigned int arg)
if (magic == BOOT_MAGIC_STUB16) { if (magic == BOOT_MAGIC_STUB16) {
cpu_id = _shared.num_active_cpus++; cpu_id = _shared.num_active_cpus++;
init_stack = _shared.smpinit_stack; init_stack = _shared.smpinit_stack;
_shared.smpinit_stack = 0; _shared.smpinit_stack = 0U;
__asm__ volatile("movl $0, (%0)" : : "m"(_shared.smpinit_lock)); __asm__ volatile("movl $0, (%0)" : : "m"(_shared.smpinit_lock));
} }

View file

@ -199,7 +199,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp,
/* Signal EOI if it's an APIC-managed interrupt */ /* Signal EOI if it's an APIC-managed interrupt */
if (vector > 0x1f) { if (vector > 0x1f) {
_apic.EOI = 0; _apic.EOI = 0U;
} }
/* Subtle: for the "interrupted context pointer", we pass in /* Subtle: for the "interrupted context pointer", we pass in
@ -257,9 +257,9 @@ void xuk_set_isr(int interrupt, int priority,
red.regvals[0] = ioapic_read(regidx); red.regvals[0] = ioapic_read(regidx);
red.regvals[1] = ioapic_read(regidx + 1); red.regvals[1] = ioapic_read(regidx + 1);
red.vector = v; red.vector = v;
red.logical = 0; red.logical = 0U;
red.destination = 0xff; red.destination = 0xffU;
red.masked = 1; red.masked = 1U;
ioapic_write(regidx, red.regvals[0]); ioapic_write(regidx, red.regvals[0]);
ioapic_write(regidx + 1, red.regvals[1]); ioapic_write(regidx + 1, red.regvals[1]);
} }

View file

@ -32,7 +32,7 @@ extern "C" {
* 14 #PF Page Fault * 14 #PF Page Fault
* 17 #AC Alignment Check * 17 #AC Alignment Check
*/ */
#define _EXC_ERROR_CODE_FAULTS 0x27d00 #define _EXC_ERROR_CODE_FAULTS 0x27d00U
/* NOTE: We currently do not have definitions for 16-bit segment, currently /* NOTE: We currently do not have definitions for 16-bit segment, currently