arch/x86: Add support for PCI MMIO configuration access

The traditional IO Port configuration mechanism was technically
deprecated about 15 years ago when PCI Express started shipping.
While frankly the MMIO support is significantly more complicated and
no more performant in practice, Zephyr should have support for current
standards.  And (particularly complicated) devices do exist in the
wild whose extended capability pointers spill beyond the 256 byte area
allowed by the legacy mechanism.  Zephyr will want drivers for those
some day.

Also, Windows and Linux use MMIO access, which means that's what
system vendors validate.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-06-13 06:34:46 -07:00 committed by Carles Cufí
commit 7c6d8aa58e
3 changed files with 100 additions and 10 deletions

View file

@ -100,6 +100,14 @@ config ACPI
help help
Allow retrieval of platform configuration at runtime. Allow retrieval of platform configuration at runtime.
config PCIE_MMIO_CFG
bool "Use MMIO PCI configuration space access"
select ACPI
help
Selects the use of the memory-mapped PCI Express Extended
Configuration Space instead of the traditional 0xCF8/0xCFC
IO Port registers.
config X86_MEMMAP_ENTRIES config X86_MEMMAP_ENTRIES
int "Number of memory map entries" int "Number of memory map entries"
range 1 256 range 1 256

View file

@ -7,18 +7,82 @@
#include <kernel.h> #include <kernel.h>
#include <drivers/pcie/pcie.h> #include <drivers/pcie/pcie.h>
#ifdef CONFIG_ACPI
#include <arch/x86/acpi.h>
#endif
#ifdef CONFIG_PCIE_MSI #ifdef CONFIG_PCIE_MSI
#include <drivers/pcie/msi.h> #include <drivers/pcie/msi.h>
#endif #endif
/* /* PCI Express Extended Configuration Mechanism (MMIO) */
* The Configuration Mechanism (previously, Configuration Mechanism #1)
* uses two 32-bit ports in the I/O space, here called CAP and CDP. #define MAX_PCI_BUS_SEGMENTS 4
*
* N.B.: this code relies on the fact that the PCIE_BDF() format (as static struct {
* defined in dt-bindings/pcie/pcie.h) and the CAP agree on the bus/dev/func uint32_t start_bus;
* bitfield positions and sizes. uint32_t n_buses;
*/ uint8_t *mmio;
} bus_segs[MAX_PCI_BUS_SEGMENTS];
static void pcie_mm_init(void)
{
#ifdef CONFIG_ACPI
struct acpi_mcfg *m = z_acpi_find_table(ACPI_MCFG_SIGNATURE);
if (m != NULL) {
int n = (m->sdt.len - sizeof(*m)) / sizeof(m->pci_segs[0]);
for (int i = 0; i < n && i < MAX_PCI_BUS_SEGMENTS; i++) {
bus_segs[i].start_bus = m->pci_segs[i].start_bus;
bus_segs[i].n_buses = 1 + m->pci_segs[i].end_bus
- m->pci_segs[i].start_bus;
bus_segs[i].mmio =
(void *)(long)m->pci_segs[i].base_addr;
}
}
#endif
}
static inline void pcie_mm_conf(pcie_bdf_t bdf, unsigned int reg,
bool write, uint32_t *data)
{
if (bus_segs[0].mmio == NULL) {
pcie_mm_init();
}
for (int i = 0; i < ARRAY_SIZE(bus_segs); i++) {
int off = PCIE_BDF_TO_BUS(bdf) - bus_segs[i].start_bus;
if (off >= 0 && off < bus_segs[i].n_buses) {
bdf = PCIE_BDF(off,
PCIE_BDF_TO_DEV(bdf),
PCIE_BDF_TO_FUNC(bdf));
volatile uint32_t *regs
= (void *)&bus_segs[0].mmio[bdf << 4];
if (write) {
regs[reg] = *data;
} else {
*data = regs[reg];
}
}
}
}
void z_pcie_add_mmu_regions(void)
{
for (int i = 0; i < ARRAY_SIZE(bus_segs); i++) {
/* 32 devices & 8 functions per bus, 4k per device */
uintptr_t sz = bus_segs[i].n_buses * (32 * 8 * 4096);
z_x86_add_mmu_region((uintptr_t) bus_segs[i].mmio,
sz, MMU_ENTRY_READ | MMU_ENTRY_WRITE);
}
}
/* Traditional Configuration Mechanism */
#define PCIE_X86_CAP 0xCF8U /* Configuration Address Port */ #define PCIE_X86_CAP 0xCF8U /* Configuration Address Port */
#define PCIE_X86_CAP_BDF_MASK 0x00FFFF00U /* b/d/f bits */ #define PCIE_X86_CAP_BDF_MASK 0x00FFFF00U /* b/d/f bits */
@ -32,7 +96,8 @@
* Helper function for exported configuration functions. Configuration access * Helper function for exported configuration functions. Configuration access
* ain't atomic, so spinlock to keep drivers from clobbering each other. * ain't atomic, so spinlock to keep drivers from clobbering each other.
*/ */
static void pcie_conf(pcie_bdf_t bdf, unsigned int reg, bool write, uint32_t *data) static inline void pcie_io_conf(pcie_bdf_t bdf, unsigned int reg,
bool write, uint32_t *data)
{ {
static struct k_spinlock lock; static struct k_spinlock lock;
k_spinlock_key_t k; k_spinlock_key_t k;
@ -54,11 +119,22 @@ static void pcie_conf(pcie_bdf_t bdf, unsigned int reg, bool write, uint32_t *da
k_spin_unlock(&lock, k); k_spin_unlock(&lock, k);
} }
static inline void pcie_conf(pcie_bdf_t bdf, unsigned int reg,
bool write, uint32_t *data)
{
#ifdef CONFIG_PCIE_MMIO_CFG
pcie_mm_conf(bdf, reg, write, data);
#else
pcie_io_conf(bdf, reg, write, data);
#endif
}
/* these functions are explained in include/drivers/pcie/pcie.h */ /* these functions are explained in include/drivers/pcie/pcie.h */
uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg) uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg)
{ {
uint32_t data; uint32_t data = 0;
pcie_conf(bdf, reg, false, &data); pcie_conf(bdf, reg, false, &data);
return data; return data;

View file

@ -15,6 +15,8 @@
#include <logging/log.h> #include <logging/log.h>
LOG_MODULE_DECLARE(os); LOG_MODULE_DECLARE(os);
void z_pcie_add_mmu_regions(void);
#define PHYS_RAM_ADDR DT_REG_ADDR(DT_CHOSEN(zephyr_sram)) #define PHYS_RAM_ADDR DT_REG_ADDR(DT_CHOSEN(zephyr_sram))
#define PHYS_RAM_SIZE DT_REG_SIZE(DT_CHOSEN(zephyr_sram)) #define PHYS_RAM_SIZE DT_REG_SIZE(DT_CHOSEN(zephyr_sram))
@ -850,6 +852,10 @@ void z_x86_paging_init(void)
z_x86_soc_add_mmu_regions(); z_x86_soc_add_mmu_regions();
#ifdef CONFIG_PCIE_MMIO_CFG
z_pcie_add_mmu_regions();
#endif
pages_free = (page_pos - page_pool) / MMU_PAGE_SIZE; pages_free = (page_pos - page_pool) / MMU_PAGE_SIZE;
if (pages_free != 0) { if (pages_free != 0) {