From d11f2f184d9a1228cb42113943f5c8c44ef93fbb Mon Sep 17 00:00:00 2001 From: Tomasz Bursztyka Date: Fri, 23 Oct 2020 12:01:18 +0200 Subject: [PATCH] drivers/virtualization: Adding ivshmem driver This is placed into drivers/virtualization as it does not belong to any existing subsystem. This is only the ivshmem-plain variant. This device is provided by qemu or ACRN, and can be used to share memory either between the host and the VM or between VMs. Here if zephyr is used as a VM, it will be able to take advantage of such feature. Signed-off-by: Tomasz Bursztyka --- drivers/CMakeLists.txt | 1 + drivers/Kconfig | 2 + drivers/virtualization/CMakeLists.txt | 5 + drivers/virtualization/Kconfig | 35 ++++++ drivers/virtualization/virt_ivshmem.c | 133 +++++++++++++++++++++++ drivers/virtualization/virt_ivshmem.h | 27 +++++ include/drivers/virtualization/ivshmem.h | 46 ++++++++ 7 files changed, 249 insertions(+) create mode 100644 drivers/virtualization/CMakeLists.txt create mode 100644 drivers/virtualization/Kconfig create mode 100644 drivers/virtualization/virt_ivshmem.c create mode 100644 drivers/virtualization/virt_ivshmem.h create mode 100644 include/drivers/virtualization/ivshmem.h diff --git a/drivers/CMakeLists.txt b/drivers/CMakeLists.txt index c7e70b24ce3..1e09fe0f749 100644 --- a/drivers/CMakeLists.txt +++ b/drivers/CMakeLists.txt @@ -41,6 +41,7 @@ add_subdirectory_ifdef(CONFIG_LORA lora) add_subdirectory_ifdef(CONFIG_PECI peci) add_subdirectory_ifdef(CONFIG_REGULATOR regulator) add_subdirectory_ifdef(CONFIG_MEMC memc) +add_subdirectory_ifdef(CONFIG_VIRTUALIZATION virtualization) add_subdirectory_ifdef(CONFIG_FLASH_HAS_DRIVER_ENABLED flash) add_subdirectory_ifdef(CONFIG_SERIAL_HAS_DRIVER serial) diff --git a/drivers/Kconfig b/drivers/Kconfig index 528f94b1dd0..e49f06a9217 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -103,4 +103,6 @@ source "drivers/regulator/Kconfig" source "drivers/memc/Kconfig" +source "drivers/virtualization/Kconfig" + endmenu diff --git a/drivers/virtualization/CMakeLists.txt b/drivers/virtualization/CMakeLists.txt new file mode 100644 index 00000000000..78bfc3d3948 --- /dev/null +++ b/drivers/virtualization/CMakeLists.txt @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library() + +zephyr_library_sources_ifdef(CONFIG_IVSHMEM virt_ivshmem.c) diff --git a/drivers/virtualization/Kconfig b/drivers/virtualization/Kconfig new file mode 100644 index 00000000000..c22b9073d7e --- /dev/null +++ b/drivers/virtualization/Kconfig @@ -0,0 +1,35 @@ +# Virtualization drivers configuration options + +# Copyright (c) 2015-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +menuconfig VIRTUALIZATION + bool "Virtualization guests drivers" + help + This contains various drivers meant to support and expose features + when Zephyr is running as a guest in a virtualized or emulated + environment. + +if VIRTUALIZATION + +config IVSHMEM + bool "Inter-VM shared memory device (ivshmem)" + depends on PCIE + help + This will enable support of qemu's ivshmem device, which is also + present in ACRN hypervizor, and lets VM sharing memory with each + other. + +if IVSHMEM + +config IVSHMEM_DEV_NAME + string + default "IVSHMEM" + +module = IVSHMEM +module-str = ivshmem +source "subsys/logging/Kconfig.template.log_config" + +endif # IVSHMEM + +endif # VIRTUALIZATION diff --git a/drivers/virtualization/virt_ivshmem.c b/drivers/virtualization/virt_ivshmem.c new file mode 100644 index 00000000000..a5ee8e50c87 --- /dev/null +++ b/drivers/virtualization/virt_ivshmem.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2020 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define LOG_LEVEL CONFIG_IVSHMEM_LOG_LEVEL +#include +LOG_MODULE_REGISTER(ivshmem); + +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include "virt_ivshmem.h" + +static bool ivshmem_check_on_bdf(pcie_bdf_t bdf) +{ + uint32_t data; + + data = pcie_conf_read(bdf, PCIE_CONF_ID); + if ((data != PCIE_ID_NONE) && + (PCIE_ID_TO_VEND(data) == IVSHMEM_VENDOR_ID) && + (PCIE_ID_TO_DEV(data) == IVSHMEM_DEVICE_ID)) { + return true; + } + + return false; +} + +/* Ivshmem's BDF is not a static value that we could get from DTS, + * since the same image could run on qemu or ACRN which could set + * a different one. So instead, let's find it at runtime. + */ +static pcie_bdf_t ivshmem_bdf_lookup(void) +{ + int bus, dev, func; + + for (bus = 0; bus <= MAX_BUS; bus++) { + for (dev = 0; dev <= MAX_DEV; ++dev) { + for (func = 0; func <= MAX_FUNC; ++func) { + pcie_bdf_t bdf = PCIE_BDF(bus, dev, func); + + if (ivshmem_check_on_bdf(bdf)) { + return bdf; + } + } + } + } + + return 0; +} + +static bool ivshmem_configure(const struct device *dev) +{ + struct ivshmem *data = dev->data; + struct pcie_mbar mbar_regs, mbar_mem; + + if (!pcie_get_mbar(data->bdf, IVSHMEM_PCIE_REG_BAR_IDX, &mbar_regs)) { + LOG_ERR("ivshmem regs bar not found"); + return false; + } + + pcie_set_cmd(data->bdf, PCIE_CONF_CMDSTAT_MEM, true); + + device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr, + mbar_regs.size, K_MEM_CACHE_NONE); + + if (!pcie_get_mbar(data->bdf, IVSHMEM_PCIE_SHMEM_BAR_IDX, &mbar_mem)) { + LOG_ERR("ivshmem mem bar not found"); + return false; + } + + data->size = mbar_mem.size; + + z_phys_map((uint8_t **)&data->shmem, + mbar_mem.phys_addr, data->size, + K_MEM_CACHE_WB | K_MEM_PERM_RW); + + LOG_DBG("ivshmem configured:"); + LOG_DBG("- Registers at 0x%lx (mapped to 0x%lx)", + mbar_regs.phys_addr, DEVICE_MMIO_GET(dev)); + LOG_DBG("- Shared memory of %lu bytes at 0x%lx (mapped to 0x%lx)", + data->size, mbar_mem.phys_addr, data->shmem); + + return true; +} + +static size_t ivshmem_api_get_mem(const struct device *dev, + uintptr_t *memmap) +{ + struct ivshmem *data = dev->data; + + *memmap = data->shmem; + + return data->size; +} + +static const struct ivshmem_driver_api ivshmem_api = { + .get_mem = ivshmem_api_get_mem, +}; + +static int ivshmem_init(const struct device *dev) +{ + struct ivshmem *data = dev->data; + + data->bdf = ivshmem_bdf_lookup(); + if (data->bdf == 0) { + LOG_WRN("ivshmem device not found"); + return -ENOTSUP; + } + + LOG_DBG("ivshmem found at bdf 0x%x", data->bdf); + + if (!ivshmem_configure(dev)) { + return -EIO; + } + + return 0; +} + +static struct ivshmem ivshmem_data; + +DEVICE_DEFINE(ivshmem, CONFIG_IVSHMEM_DEV_NAME, + ivshmem_init, device_pm_control_nop, &ivshmem_data, NULL, + POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &ivshmem_api); diff --git a/drivers/virtualization/virt_ivshmem.h b/drivers/virtualization/virt_ivshmem.h new file mode 100644 index 00000000000..7d70ae345d7 --- /dev/null +++ b/drivers/virtualization/virt_ivshmem.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2020 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_DRIVERS_VIRTUALIZATION_VIRT_IVSHMEM_H_ +#define ZEPHYR_DRIVERS_VIRTUALIZATION_VIRT_IVSHMEM_H_ + +#define IVSHMEM_VENDOR_ID 0x1AF4 +#define IVSHMEM_DEVICE_ID 0x1110 + +#define IVSHMEM_PCIE_REG_BAR_IDX 0 +#define IVSHMEM_PCIE_SHMEM_BAR_IDX 1 + +#define MAX_BUS (0xFFFFFFFF & PCIE_BDF_BUS_MASK) +#define MAX_DEV (0xFFFFFFFF & PCIE_BDF_DEV_MASK) +#define MAX_FUNC (0xFFFFFFFF & PCIE_BDF_FUNC_MASK) + +struct ivshmem { + DEVICE_MMIO_RAM; + pcie_bdf_t bdf; + uintptr_t shmem; + size_t size; +}; + +#endif /* ZEPHYR_DRIVERS_VIRTUALIZATION_VIRT_IVSHMEM_H_ */ diff --git a/include/drivers/virtualization/ivshmem.h b/include/drivers/virtualization/ivshmem.h new file mode 100644 index 00000000000..f4e6beadc3a --- /dev/null +++ b/include/drivers/virtualization/ivshmem.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_DRIVERS_VIRTUALIZATION_IVSHMEM_H_ +#define ZEPHYR_INCLUDE_DRIVERS_VIRTUALIZATION_IVSHMEM_H_ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef size_t (*ivshmem_get_mem_f)(const struct device *dev, + uintptr_t *memmap); + +struct ivshmem_driver_api { + ivshmem_get_mem_f get_mem; +}; + +/** + * @brief Get the inter-VM shared memory + * + * @param dev Pointer to the device structure for the driver instance + * @param memmap A pointer to fill in with the memory address + * + * @return the size of the memory mapped, or 0 + */ +static inline size_t ivshmem_get_mem(const struct device *dev, + uintptr_t *memmap) +{ + const struct ivshmem_driver_api *api = + (const struct ivshmem_driver_api *)dev->api; + + return api->get_mem(dev, memmap); +} + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_DRIVERS_VIRTUALIZATION_IVSHMEM_H_ */