x86: consolidate x86_64 architecture, SoC and boards

There are two set of code supporting x86_64: x86_64 using x32 ABI,
and x86 long mode, and this consolidates both into one x86_64
architecture and SoC supporting truly 64-bit mode.

() Removes the x86_64:x32 architecture and SoC, and replaces
   them with the existing x86 long mode arch and SoC.
() Replace qemu_x86_64 with qemu_x86_long as qemu_x86_64.
() Updates samples and tests to remove reference to
   qemu_x86_long.
() Renames CONFIG_X86_LONGMODE to CONFIG_X86_64.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2019-10-24 12:57:57 -07:00 committed by Anas Nashif
commit b7eb04b300
103 changed files with 105 additions and 3102 deletions

View file

@ -13,8 +13,6 @@
#if defined(CONFIG_X86)
#include <arch/x86/arch.h>
#elif defined(CONFIG_X86_64)
#include <arch/x86_64/arch.h>
#elif defined(CONFIG_ARM)
#include <arch/arm/arch.h>
#elif defined(CONFIG_ARC)

View file

@ -9,7 +9,7 @@
#ifndef ZEPHYR_INCLUDE_ARCH_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_SYSCALL_H_
#if defined(CONFIG_X86) && !defined(CONFIG_X86_LONGMODE)
#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
#include <arch/x86/ia32/syscall.h>
#elif defined(CONFIG_ARM)
#include <arch/arm/syscall.h>

View file

@ -198,7 +198,7 @@ extern unsigned char _irq_to_interrupt_vector[];
#endif /* _ASMLANGUAGE */
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
#include <arch/x86/intel64/arch.h>
#else
#include <arch/x86/ia32/arch.h>

View file

@ -38,7 +38,7 @@
#define Z_X86_MMU_G BIT64(8) /** Global */
#define Z_X86_MMU_XD BIT64(63) /** Execute Disable */
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
#define Z_X86_MMU_PROT_KEY_MASK 0x7800000000000000ULL
#endif
@ -54,11 +54,11 @@
* reserved or ignored regions immediately above it, into a single area.
* This will work as expected if valid memory addresses are written.
*/
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
#define Z_X86_MMU_PML4E_PDPT_MASK 0x7FFFFFFFFFFFF000ULL
#endif
#define Z_X86_MMU_PDPTE_PD_MASK 0x7FFFFFFFFFFFF000ULL
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
#define Z_X86_MMU_PDPTE_1G_MASK 0x07FFFFFFC0000000ULL
#endif
#define Z_X86_MMU_PDE_PT_MASK 0x7FFFFFFFFFFFF000ULL
@ -163,7 +163,7 @@ struct mmu_region {
#define MMU_BOOT_REGION(addr, region_size, permission_flags) \
Z_MMU_BOOT_REGION(__COUNTER__, addr, region_size, permission_flags)
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
#define Z_X86_NUM_PML4_ENTRIES 512U
#define Z_X86_NUM_PDPT_ENTRIES 512U
#else
@ -179,7 +179,7 @@ struct mmu_region {
typedef u64_t k_mem_partition_attr_t;
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
struct x86_mmu_pml4 {
u64_t entry[Z_X86_NUM_PML4_ENTRIES];
};
@ -198,7 +198,7 @@ struct x86_mmu_pt {
};
struct x86_page_tables {
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
struct x86_mmu_pml4 pml4;
#else
struct x86_mmu_pdpt pdpt;
@ -208,7 +208,7 @@ struct x86_page_tables {
/*
* Inline functions for getting the next linked structure
*/
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
static inline u64_t *z_x86_pml4_get_pml4e(struct x86_mmu_pml4 *pml4,
uintptr_t addr)
{
@ -237,7 +237,7 @@ static inline struct x86_mmu_pd *z_x86_pdpte_get_pd(u64_t pdpte)
{
uintptr_t addr = pdpte & Z_X86_MMU_PDPTE_PD_MASK;
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
__ASSERT((pdpte & Z_X86_MMU_PS) == 0, "PDPT is for 1GB page");
#endif
return (struct x86_mmu_pd *)addr;
@ -270,7 +270,7 @@ static inline u64_t *z_x86_pt_get_pte(struct x86_mmu_pt *pt, uintptr_t addr)
* Inline functions for obtaining page table structures from the top-level
*/
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
static inline struct x86_mmu_pml4 *
z_x86_get_pml4(struct x86_page_tables *ptables)
{
@ -296,7 +296,7 @@ z_x86_get_pdpt(struct x86_page_tables *ptables, uintptr_t addr)
return &ptables->pdpt;
}
#endif /* CONFIG_X86_LONGMODE */
#endif /* CONFIG_X86_64 */
static inline u64_t *z_x86_get_pdpte(struct x86_page_tables *ptables,
uintptr_t addr)

View file

@ -46,7 +46,7 @@ static inline void z_x86_msr_write(unsigned int msr, u64_t data)
__asm__ volatile ("wrmsr" : : "c"(msr), "a"(low), "d"(high));
}
#ifdef CONFIG_X86_LONGMODE
#ifdef CONFIG_X86_64
static inline u64_t z_x86_msr_read(unsigned int msr)
{

View file

@ -1,82 +0,0 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _X86_64_ARCH_H
#define _X86_64_ARCH_H
#include <arch/common/sys_io.h>
#include <arch/common/ffs.h>
#define STACK_ALIGN 8
#define DT_INST_0_INTEL_HPET_BASE_ADDRESS 0xFED00000U
#define DT_INST_0_INTEL_HPET_IRQ_0 2
#define DT_INST_0_INTEL_HPET_IRQ_0_PRIORITY 4
typedef struct z_arch_esf_t z_arch_esf_t;
static inline u32_t z_arch_k_cycle_get_32(void)
{
#ifdef CONFIG_HPET_TIMER
extern u32_t z_timer_cycle_get_32(void);
return z_timer_cycle_get_32();
#else
return (u32_t)z_arch_k_cycle_get_64();
#endif
}
/* Not a standard Zephyr function, but probably will be */
static inline unsigned long long z_arch_k_cycle_get_64(void)
{
unsigned int hi, lo;
__asm__ volatile("rdtsc" : "=d"(hi), "=a"(lo));
return (((unsigned long long)hi) << 32) | lo;
}
static inline unsigned int z_arch_irq_lock(void)
{
unsigned long long key;
__asm__ volatile("pushfq; cli; popq %0" : "=r"(key));
return (int)key;
}
static inline void z_arch_irq_unlock(unsigned int key)
{
if (key & 0x200) {
__asm__ volatile("sti");
}
}
/**
* Returns true if interrupts were unlocked prior to the
* z_arch_irq_lock() call that produced the key argument.
*/
static inline bool z_arch_irq_unlocked(unsigned int key)
{
return (key & 0x200) != 0;
}
void z_arch_irq_enable(unsigned int irq);
void z_arch_irq_disable(unsigned int irq);
#define Z_ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags) \
z_arch_irq_connect_dynamic(irq, pri, isr, arg, flags)
extern int x86_64_except_reason;
/* Vector 5 is the "bounds" exception which is otherwise vestigial
* (BOUND is an illegal instruction in long mode)
*/
#define Z_ARCH_EXCEPT(reason) do { \
x86_64_except_reason = reason; \
__asm__ volatile("int $5"); \
} while (false)
#endif /* _X86_64_ARCH_H */