arch: x86: Use proper essential types in operands
MISRA defines a serie of essential types, boolean, signed/unsigned integers, float, ... and operations must respect these essential types. MISRA-C rule 10.1 Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
95eb2b4fed
commit
39a50f6392
10 changed files with 91 additions and 91 deletions
|
@ -95,7 +95,7 @@ void k_cpu_atomic_idle(unsigned int key)
|
|||
"hlt\n\t");
|
||||
|
||||
/* restore interrupt lockout state before returning to caller */
|
||||
if ((key & 0x200) == 0) {
|
||||
if ((key & 0x200U) == 0) {
|
||||
z_int_latency_start();
|
||||
__asm__ volatile("cli");
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs)
|
|||
/* We were servicing an interrupt */
|
||||
start = (u32_t)Z_ARCH_THREAD_STACK_BUFFER(_interrupt_stack);
|
||||
end = start + CONFIG_ISR_STACK_SIZE;
|
||||
} else if ((cs & 0x3) != 0 ||
|
||||
} else if ((cs & 0x3U) != 0 ||
|
||||
(_current->base.user_options & K_USER) == 0) {
|
||||
/* Thread was in user mode, or is not a user mode thread.
|
||||
* The normal stack buffer is what we will check.
|
||||
|
@ -195,7 +195,7 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
|||
k_current_get(),
|
||||
pEsf->eax, pEsf->ebx, pEsf->ecx, pEsf->edx,
|
||||
pEsf->esi, pEsf->edi, pEsf->ebp, pEsf->esp,
|
||||
pEsf->eflags, pEsf->cs & 0xFFFF, pEsf->eip);
|
||||
pEsf->eflags, pEsf->cs & 0xFFFFU, pEsf->eip);
|
||||
#ifdef CONFIG_EXCEPTION_STACK_TRACE
|
||||
unwind_stack(pEsf->ebp, pEsf->cs);
|
||||
#endif
|
||||
|
@ -372,9 +372,9 @@ static void dump_page_fault(NANO_ESF *esf)
|
|||
printk("***** CPU Page Fault (error code 0x%08x)\n", err);
|
||||
|
||||
printk("%s thread %s address 0x%08x\n",
|
||||
err & US ? "User" : "Supervisor",
|
||||
err & ID ? "executed" : (err & WR ? "wrote" : "read"),
|
||||
cr2);
|
||||
(err & US) != 0 ? "User" : "Supervisor",
|
||||
(err & ID) != 0 ? "executed" : ((err & WR) != 0 ? "wrote" :
|
||||
"read"), cr2);
|
||||
|
||||
#ifdef CONFIG_X86_MMU
|
||||
#ifdef CONFIG_X86_KPTI
|
||||
|
|
|
@ -83,7 +83,7 @@ void z_arch_isr_direct_footer(int swap)
|
|||
* 2) We are not in a nested interrupt
|
||||
* 3) Next thread to run in the ready queue is not this thread
|
||||
*/
|
||||
if (swap && !_kernel.nested &&
|
||||
if (swap != 0 && _kernel.nested == 0 &&
|
||||
_kernel.ready_q.cache != _current) {
|
||||
unsigned int flags;
|
||||
|
||||
|
@ -163,7 +163,7 @@ static unsigned int priority_to_free_vector(unsigned int requested_priority)
|
|||
unsigned int vector_block;
|
||||
unsigned int vector;
|
||||
|
||||
static unsigned int mask[2] = {0x0000ffff, 0xffff0000};
|
||||
static unsigned int mask[2] = {0x0000ffffU, 0xffff0000U};
|
||||
|
||||
vector_block = requested_priority + 2;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ static int spec_ctrl_init(struct device *dev)
|
|||
{
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
u32_t enable_bits = 0;
|
||||
u32_t enable_bits = 0U;
|
||||
u32_t cpuid7 = cpuid_extended_features();
|
||||
|
||||
#ifdef CONFIG_DISABLE_SSBD
|
||||
|
|
|
@ -109,9 +109,9 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
|
|||
union x86_mmu_pde_pt pde_value =
|
||||
pd_address->entry[pde].pt;
|
||||
|
||||
if (!pde_value.p ||
|
||||
!pde_value.us ||
|
||||
(write && !pde_value.rw)) {
|
||||
if ((pde_value.p) == 0 ||
|
||||
(pde_value.us) == 0 ||
|
||||
((write != 0) && (pde_value.rw == 0))) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
|
|||
starting_pte_num = 0U;
|
||||
}
|
||||
|
||||
pte_value.value = 0xFFFFFFFF;
|
||||
pte_value.value = 0xFFFFFFFFU;
|
||||
|
||||
/* Bitwise AND all the pte values.
|
||||
* An optimization done to make sure a compare is
|
||||
|
@ -153,9 +153,9 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
|
|||
pte_address->entry[pte].value;
|
||||
}
|
||||
|
||||
if (!pte_value.p ||
|
||||
!pte_value.us ||
|
||||
(write && !pte_value.rw)) {
|
||||
if ((pte_value.p) == 0 ||
|
||||
(pte_value.us) == 0 ||
|
||||
((write != 0) && (pte_value.rw == 0))) {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -189,8 +189,8 @@ void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
|
|||
|
||||
u32_t addr = (u32_t)ptr;
|
||||
|
||||
__ASSERT(!(addr & MMU_PAGE_MASK), "unaligned address provided");
|
||||
__ASSERT(!(size & MMU_PAGE_MASK), "unaligned size provided");
|
||||
__ASSERT((addr & MMU_PAGE_MASK) == 0U, "unaligned address provided");
|
||||
__ASSERT((size & MMU_PAGE_MASK) == 0U, "unaligned size provided");
|
||||
|
||||
/* L1TF mitigation: non-present PTEs will have address fields
|
||||
* zeroed. Expand the mask to include address bits if we are changing
|
||||
|
|
|
@ -383,8 +383,8 @@
|
|||
* All other "flags" = Don't change state
|
||||
*/
|
||||
|
||||
#define EFLAGS_INITIAL 0x00000200
|
||||
#define EFLAGS_MASK 0x00003200
|
||||
#define EFLAGS_INITIAL 0x00000200LLU
|
||||
#define EFLAGS_MASK 0x00003200ULL
|
||||
|
||||
/* Enable paging and write protection */
|
||||
#define CR0_PG_WP_ENABLE 0x80010000
|
||||
|
|
|
@ -21,43 +21,43 @@
|
|||
* x86_mmu_pde_pt structure.
|
||||
*/
|
||||
|
||||
#define MMU_PDE_P_MASK 0x00000001
|
||||
#define MMU_PDE_RW_MASK 0x00000002
|
||||
#define MMU_PDE_US_MASK 0x00000004
|
||||
#define MMU_PDE_PWT_MASK 0x00000008
|
||||
#define MMU_PDE_PCD_MASK 0x00000010
|
||||
#define MMU_PDE_A_MASK 0x00000020
|
||||
#define MMU_PDE_PS_MASK 0x00000080
|
||||
#define MMU_PDE_IGNORED_MASK 0x00000F40
|
||||
#define MMU_PDE_P_MASK 0x00000001ULL
|
||||
#define MMU_PDE_RW_MASK 0x00000002ULL
|
||||
#define MMU_PDE_US_MASK 0x00000004ULL
|
||||
#define MMU_PDE_PWT_MASK 0x00000008ULL
|
||||
#define MMU_PDE_PCD_MASK 0x00000010ULL
|
||||
#define MMU_PDE_A_MASK 0x00000020ULL
|
||||
#define MMU_PDE_PS_MASK 0x00000080ULL
|
||||
#define MMU_PDE_IGNORED_MASK 0x00000F40ULL
|
||||
|
||||
#define MMU_PDE_XD_MASK 0x8000000000000000ULL
|
||||
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000ULL
|
||||
#define MMU_PDE_NUM_SHIFT 21
|
||||
#define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ff)
|
||||
#define MMU_ENTRIES_PER_PGT 512
|
||||
#define MMU_PDPTE_NUM_SHIFT 30
|
||||
#define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3)
|
||||
#define MMU_PDE_XD_MASK 0x8000000000000000ULL
|
||||
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000ULL
|
||||
#define MMU_PDE_NUM_SHIFT 21U
|
||||
#define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ffU)
|
||||
#define MMU_ENTRIES_PER_PGT 512U
|
||||
#define MMU_PDPTE_NUM_SHIFT 30U
|
||||
#define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3U)
|
||||
|
||||
/*
|
||||
* The following bitmasks correspond to the bit-fields in the
|
||||
* x86_mmu_pde_2mb structure.
|
||||
*/
|
||||
|
||||
#define MMU_2MB_PDE_P_MASK 0x00000001
|
||||
#define MMU_2MB_PDE_RW_MASK 0x00000002
|
||||
#define MMU_2MB_PDE_US_MASK 0x00000004
|
||||
#define MMU_2MB_PDE_PWT_MASK 0x00000008
|
||||
#define MMU_2MB_PDE_PCD_MASK 0x00000010
|
||||
#define MMU_2MB_PDE_A_MASK 0x00000020
|
||||
#define MMU_2MB_PDE_D_MASK 0x00000040
|
||||
#define MMU_2MB_PDE_PS_MASK 0x00000080
|
||||
#define MMU_2MB_PDE_G_MASK 0x00000100
|
||||
#define MMU_2MB_PDE_IGNORED_MASK 0x00380e00
|
||||
#define MMU_2MB_PDE_PAT_MASK 0x00001000
|
||||
#define MMU_2MB_PDE_PAGE_TABLE_MASK 0x0007e000
|
||||
#define MMU_2MB_PDE_PAGE_MASK 0xffc00000
|
||||
#define MMU_2MB_PDE_CLEAR_PS 0x00000000
|
||||
#define MMU_2MB_PDE_SET_PS 0x00000080
|
||||
#define MMU_2MB_PDE_P_MASK 0x00000001ULL
|
||||
#define MMU_2MB_PDE_RW_MASK 0x00000002ULL
|
||||
#define MMU_2MB_PDE_US_MASK 0x00000004ULL
|
||||
#define MMU_2MB_PDE_PWT_MASK 0x00000008ULL
|
||||
#define MMU_2MB_PDE_PCD_MASK 0x00000010ULL
|
||||
#define MMU_2MB_PDE_A_MASK 0x00000020ULL
|
||||
#define MMU_2MB_PDE_D_MASK 0x00000040ULL
|
||||
#define MMU_2MB_PDE_PS_MASK 0x00000080ULL
|
||||
#define MMU_2MB_PDE_G_MASK 0x00000100ULL
|
||||
#define MMU_2MB_PDE_IGNORED_MASK 0x00380e00ULL
|
||||
#define MMU_2MB_PDE_PAT_MASK 0x00001000ULL
|
||||
#define MMU_2MB_PDE_PAGE_TABLE_MASK 0x0007e000ULL
|
||||
#define MMU_2MB_PDE_PAGE_MASK 0xffc00000ULL
|
||||
#define MMU_2MB_PDE_CLEAR_PS 0x00000000ULL
|
||||
#define MMU_2MB_PDE_SET_PS 0x00000080ULL
|
||||
|
||||
|
||||
/*
|
||||
|
@ -65,21 +65,21 @@
|
|||
* x86_mmu_pte structure.
|
||||
*/
|
||||
|
||||
#define MMU_PTE_P_MASK 0x00000001
|
||||
#define MMU_PTE_RW_MASK 0x00000002
|
||||
#define MMU_PTE_US_MASK 0x00000004
|
||||
#define MMU_PTE_PWT_MASK 0x00000008
|
||||
#define MMU_PTE_PCD_MASK 0x00000010
|
||||
#define MMU_PTE_A_MASK 0x00000020
|
||||
#define MMU_PTE_D_MASK 0x00000040
|
||||
#define MMU_PTE_PAT_MASK 0x00000080
|
||||
#define MMU_PTE_G_MASK 0x00000100
|
||||
#define MMU_PTE_ALLOC_MASK 0x00000200
|
||||
#define MMU_PTE_CUSTOM_MASK 0x00000c00
|
||||
#define MMU_PTE_XD_MASK 0x8000000000000000ULL
|
||||
#define MMU_PTE_PAGE_MASK 0x00000000fffff000ULL
|
||||
#define MMU_PTE_MASK_ALL 0xffffffffffffffffULL
|
||||
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ff)
|
||||
#define MMU_PTE_P_MASK 0x00000001ULL
|
||||
#define MMU_PTE_RW_MASK 0x00000002ULL
|
||||
#define MMU_PTE_US_MASK 0x00000004ULL
|
||||
#define MMU_PTE_PWT_MASK 0x00000008ULL
|
||||
#define MMU_PTE_PCD_MASK 0x00000010ULL
|
||||
#define MMU_PTE_A_MASK 0x00000020ULL
|
||||
#define MMU_PTE_D_MASK 0x00000040ULL
|
||||
#define MMU_PTE_PAT_MASK 0x00000080ULL
|
||||
#define MMU_PTE_G_MASK 0x00000100ULL
|
||||
#define MMU_PTE_ALLOC_MASK 0x00000200ULL
|
||||
#define MMU_PTE_CUSTOM_MASK 0x00000c00ULL
|
||||
#define MMU_PTE_XD_MASK 0x8000000000000000ULL
|
||||
#define MMU_PTE_PAGE_MASK 0x00000000fffff000ULL
|
||||
#define MMU_PTE_MASK_ALL 0xffffffffffffffffULL
|
||||
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ffU)
|
||||
#define MMU_PAGE_NUM_SHIFT 12
|
||||
|
||||
/*
|
||||
|
@ -87,32 +87,32 @@
|
|||
* unuse of various options in a PTE or PDE as appropriate.
|
||||
*/
|
||||
|
||||
#define MMU_ENTRY_NOT_PRESENT 0x00000000
|
||||
#define MMU_ENTRY_PRESENT 0x00000001
|
||||
#define MMU_ENTRY_NOT_PRESENT 0x00000000ULL
|
||||
#define MMU_ENTRY_PRESENT 0x00000001ULL
|
||||
|
||||
#define MMU_ENTRY_READ 0x00000000
|
||||
#define MMU_ENTRY_WRITE 0x00000002
|
||||
#define MMU_ENTRY_READ 0x00000000ULL
|
||||
#define MMU_ENTRY_WRITE 0x00000002ULL
|
||||
|
||||
#define MMU_ENTRY_SUPERVISOR 0x00000000
|
||||
#define MMU_ENTRY_USER 0x00000004
|
||||
#define MMU_ENTRY_SUPERVISOR 0x00000000ULL
|
||||
#define MMU_ENTRY_USER 0x00000004ULL
|
||||
|
||||
#define MMU_ENTRY_WRITE_BACK 0x00000000
|
||||
#define MMU_ENTRY_WRITE_THROUGH 0x00000008
|
||||
#define MMU_ENTRY_WRITE_BACK 0x00000000ULL
|
||||
#define MMU_ENTRY_WRITE_THROUGH 0x00000008ULL
|
||||
|
||||
#define MMU_ENTRY_CACHING_ENABLE 0x00000000
|
||||
#define MMU_ENTRY_CACHING_DISABLE 0x00000010
|
||||
#define MMU_ENTRY_CACHING_ENABLE 0x00000000ULL
|
||||
#define MMU_ENTRY_CACHING_DISABLE 0x00000010ULL
|
||||
|
||||
#define MMU_ENTRY_NOT_ACCESSED 0x00000000
|
||||
#define MMU_ENTRY_ACCESSED 0x00000020
|
||||
#define MMU_ENTRY_NOT_ACCESSED 0x00000000ULL
|
||||
#define MMU_ENTRY_ACCESSED 0x00000020ULL
|
||||
|
||||
#define MMU_ENTRY_NOT_DIRTY 0x00000000
|
||||
#define MMU_ENTRY_DIRTY 0x00000040
|
||||
#define MMU_ENTRY_NOT_DIRTY 0x00000000ULL
|
||||
#define MMU_ENTRY_DIRTY 0x00000040ULL
|
||||
|
||||
#define MMU_ENTRY_NOT_GLOBAL 0x00000000
|
||||
#define MMU_ENTRY_GLOBAL 0x00000100
|
||||
#define MMU_ENTRY_NOT_GLOBAL 0x00000000ULL
|
||||
#define MMU_ENTRY_GLOBAL 0x00000100ULL
|
||||
|
||||
#define MMU_ENTRY_NOT_ALLOC 0x00000000
|
||||
#define MMU_ENTRY_ALLOC 0x00000200
|
||||
#define MMU_ENTRY_NOT_ALLOC 0x00000000ULL
|
||||
#define MMU_ENTRY_ALLOC 0x00000200ULL
|
||||
|
||||
#define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000ULL
|
||||
|
||||
|
@ -126,7 +126,7 @@
|
|||
* to the region will be set, even if the boot configuration has no user pages
|
||||
* in it.
|
||||
*/
|
||||
#define MMU_ENTRY_RUNTIME_USER 0x10000000
|
||||
#define MMU_ENTRY_RUNTIME_USER 0x10000000ULL
|
||||
|
||||
/* Indicates that pages within this region may have their read/write
|
||||
* permissions adjusted at runtime. Unnecessary if MMU_ENTRY_WRITE is already
|
||||
|
@ -136,7 +136,7 @@
|
|||
* referring to the region will be set, even if the boot configuration has no
|
||||
* writable pages in it.
|
||||
*/
|
||||
#define MMU_ENTRY_RUNTIME_WRITE 0x20000000
|
||||
#define MMU_ENTRY_RUNTIME_WRITE 0x20000000ULL
|
||||
|
||||
|
||||
/* Helper macros to ease the usage of the MMU page table structures.
|
||||
|
|
|
@ -200,7 +200,7 @@ void cstart(unsigned int magic, unsigned int arg)
|
|||
if (magic == BOOT_MAGIC_STUB16) {
|
||||
cpu_id = _shared.num_active_cpus++;
|
||||
init_stack = _shared.smpinit_stack;
|
||||
_shared.smpinit_stack = 0;
|
||||
_shared.smpinit_stack = 0U;
|
||||
__asm__ volatile("movl $0, (%0)" : : "m"(_shared.smpinit_lock));
|
||||
}
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp,
|
|||
|
||||
/* Signal EOI if it's an APIC-managed interrupt */
|
||||
if (vector > 0x1f) {
|
||||
_apic.EOI = 0;
|
||||
_apic.EOI = 0U;
|
||||
}
|
||||
|
||||
/* Subtle: for the "interrupted context pointer", we pass in
|
||||
|
@ -257,9 +257,9 @@ void xuk_set_isr(int interrupt, int priority,
|
|||
red.regvals[0] = ioapic_read(regidx);
|
||||
red.regvals[1] = ioapic_read(regidx + 1);
|
||||
red.vector = v;
|
||||
red.logical = 0;
|
||||
red.destination = 0xff;
|
||||
red.masked = 1;
|
||||
red.logical = 0U;
|
||||
red.destination = 0xffU;
|
||||
red.masked = 1U;
|
||||
ioapic_write(regidx, red.regvals[0]);
|
||||
ioapic_write(regidx + 1, red.regvals[1]);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ extern "C" {
|
|||
* 14 #PF Page Fault
|
||||
* 17 #AC Alignment Check
|
||||
*/
|
||||
#define _EXC_ERROR_CODE_FAULTS 0x27d00
|
||||
#define _EXC_ERROR_CODE_FAULTS 0x27d00U
|
||||
|
||||
|
||||
/* NOTE: We currently do not have definitions for 16-bit segment, currently
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue