drivers/disk: Add NVMe controller support

Based on FreeBSD's implementation made by James Harris, Intel Copyright
2012-2016.

Since Zephyr does not propose any advanced interfaces as FreeBSD (bus
abstractions, memory and DMA abstraction and many more), this comes with
a much simplified and Zephyr-ish way to instanciate, initialize and use
NVMe controller.

ToDo: IO Queues cannot be more than 1. Macros will need to be improved to
manage the case of 2+ IO queues.

Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
This commit is contained in:
Tomasz Bursztyka 2022-11-09 11:51:18 +01:00 committed by Carles Cufí
commit b7d4d74e03
8 changed files with 1278 additions and 0 deletions

View file

@ -12,4 +12,6 @@ zephyr_library_sources_ifdef(CONFIG_SDMMC_STM32 sdmmc_stm32.c)
zephyr_library_sources_ifdef(CONFIG_SDMMC_SUBSYS sdmmc_subsys.c)
zephyr_library_sources_ifdef(CONFIG_MMC_SUBSYS mmc_subsys.c)
add_subdirectory_ifdef(CONFIG_NVME nvme)
endif()

View file

@ -13,4 +13,6 @@ source "drivers/disk/Kconfig.flash"
source "drivers/disk/Kconfig.sdmmc"
source "drivers/disk/Kconfig.mmc"
rsource "nvme/Kconfig"
endif # DISK_DRIVERS

View file

@ -0,0 +1,4 @@
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
zephyr_library_sources(nvme_controller.c)

50
drivers/disk/nvme/Kconfig Normal file
View file

@ -0,0 +1,50 @@
# Copyright (c) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
menuconfig NVME
bool "NVMe disk"
depends on PCIE
select PCIE_MSI_X
select PCIE_MSI_MULTI_VECTOR
help
NVMe disk(s) might be present on the system through PCIe, enable this
driver to support these. It will enable MSI-X and MSI multi-vector
support
if NVME
config NVME_ADMIN_ENTRIES
int "Number of admin queue entries"
range 2 4096
default 256
help
This sets the amount of allocated admin queue entries.
Do not touch this unless you know what you are doing.
config NVME_IO_QUEUES
int "Number of IO queues"
range 1 65536
default 1
help
This sets the amount of allocated I/O queues.
Do not touch this unless you know what you are doing.
config NVME_IO_ENTRIES
int "Number of IO queue entries"
range 2 65536
default 256
help
This sets the amount of allocated IO queue entries.
Do not touch this unless you know what you are doing.
config NVME_INT_PRIORITY
int "Interrupt priority"
default 2
help
Interrupt priority used for the MSI-X generated interrupts.
module = NVME
module-str = nvme
source "subsys/logging/Kconfig.template.log_config"
endif # NVME

446
drivers/disk/nvme/nvme.h Normal file
View file

@ -0,0 +1,446 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_DISK_NVME_NHME_H_
#define ZEPHYR_DRIVERS_DISK_NVME_NVME_H_
#include "nvme_cmd.h"
struct nvme_registers {
uint32_t cap_lo; /* controller capabilities */
uint32_t cap_hi;
uint32_t vs; /* version */
uint32_t intms; /* interrupt mask set */
uint32_t intmc; /* interrupt mask clear */
uint32_t cc; /* controller configuration */
uint32_t reserved1;
uint32_t csts; /* controller status */
uint32_t nssr; /* NVM Subsystem Reset */
uint32_t aqa; /* admin queue attributes */
uint64_t asq; /* admin submission queue base addr */
uint64_t acq; /* admin completion queue base addr */
uint32_t cmbloc; /* Controller Memory Buffer Location */
uint32_t cmbsz; /* Controller Memory Buffer Size */
uint32_t bpinfo; /* Boot Partition Information */
uint32_t bprsel; /* Boot Partition Read Select */
uint64_t bpmbl; /* Boot Partition Memory Buffer Location */
uint64_t cmbmsc; /* Controller Memory Buffer Memory Space Control */
uint32_t cmbsts; /* Controller Memory Buffer Status */
uint8_t reserved3[3492]; /* 5Ch - DFFh */
uint32_t pmrcap; /* Persistent Memory Capabilities */
uint32_t pmrctl; /* Persistent Memory Region Control */
uint32_t pmrsts; /* Persistent Memory Region Status */
uint32_t pmrebs; /* Persistent Memory Region Elasticity Buffer Size */
uint32_t pmrswtp; /* Persistent Memory Region Sustained Write Throughput */
uint32_t pmrmsc_lo; /* Persistent Memory Region Controller Memory Space Control */
uint32_t pmrmsc_hi;
uint8_t reserved4[484]; /* E1Ch - FFFh */
struct {
uint32_t sq_tdbl; /* submission queue tail doorbell */
uint32_t cq_hdbl; /* completion queue head doorbell */
} doorbell[1];
};
struct nvme_power_state {
/** Maximum Power */
uint16_t mp;
uint8_t ps_rsvd1;
/** Max Power Scale, Non-Operational State */
uint8_t mps_nops;
/** Entry Latency */
uint32_t enlat;
/** Exit Latency */
uint32_t exlat;
/** Relative Read Throughput */
uint8_t rrt;
/** Relative Read Latency */
uint8_t rrl;
/** Relative Write Throughput */
uint8_t rwt;
/** Relative Write Latency */
uint8_t rwl;
/** Idle Power */
uint16_t idlp;
/** Idle Power Scale */
uint8_t ips;
uint8_t ps_rsvd8;
/** Active Power */
uint16_t actp;
/** Active Power Workload, Active Power Scale */
uint8_t apw_aps;
uint8_t ps_rsvd10[9];
} __packed;
#define NVME_SERIAL_NUMBER_LENGTH 20
#define NVME_MODEL_NUMBER_LENGTH 40
#define NVME_FIRMWARE_REVISION_LENGTH 8
struct nvme_controller_data {
/* bytes 0-255: controller capabilities and features */
/** pci vendor id */
uint16_t vid;
/** pci subsystem vendor id */
uint16_t ssvid;
/** serial number */
uint8_t sn[NVME_SERIAL_NUMBER_LENGTH];
/** model number */
uint8_t mn[NVME_MODEL_NUMBER_LENGTH];
/** firmware revision */
uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH];
/** recommended arbitration burst */
uint8_t rab;
/** ieee oui identifier */
uint8_t ieee[3];
/** multi-interface capabilities */
uint8_t mic;
/** maximum data transfer size */
uint8_t mdts;
/** Controller ID */
uint16_t ctrlr_id;
/** Version */
uint32_t ver;
/** RTD3 Resume Latency */
uint32_t rtd3r;
/** RTD3 Enter Latency */
uint32_t rtd3e;
/** Optional Asynchronous Events Supported */
uint32_t oaes; /* bitfield really */
/** Controller Attributes */
uint32_t ctratt; /* bitfield really */
/** Read Recovery Levels Supported */
uint16_t rrls;
uint8_t reserved1[9];
/** Controller Type */
uint8_t cntrltype;
/** FRU Globally Unique Identifier */
uint8_t fguid[16];
/** Command Retry Delay Time 1 */
uint16_t crdt1;
/** Command Retry Delay Time 2 */
uint16_t crdt2;
/** Command Retry Delay Time 3 */
uint16_t crdt3;
uint8_t reserved2[122];
/* bytes 256-511: admin command set attributes */
/** optional admin command support */
uint16_t oacs;
/** abort command limit */
uint8_t acl;
/** asynchronous event request limit */
uint8_t aerl;
/** firmware updates */
uint8_t frmw;
/** log page attributes */
uint8_t lpa;
/** error log page entries */
uint8_t elpe;
/** number of power states supported */
uint8_t npss;
/** admin vendor specific command configuration */
uint8_t avscc;
/** Autonomous Power State Transition Attributes */
uint8_t apsta;
/** Warning Composite Temperature Threshold */
uint16_t wctemp;
/** Critical Composite Temperature Threshold */
uint16_t cctemp;
/** Maximum Time for Firmware Activation */
uint16_t mtfa;
/** Host Memory Buffer Preferred Size */
uint32_t hmpre;
/** Host Memory Buffer Minimum Size */
uint32_t hmmin;
/** Name space capabilities */
struct {
/* if nsmgmt, report tnvmcap and unvmcap */
uint8_t tnvmcap[16];
uint8_t unvmcap[16];
} __packed untncap;
/** Replay Protected Memory Block Support */
uint32_t rpmbs; /* Really a bitfield */
/** Extended Device Self-test Time */
uint16_t edstt;
/** Device Self-test Options */
uint8_t dsto; /* Really a bitfield */
/** Firmware Update Granularity */
uint8_t fwug;
/** Keep Alive Support */
uint16_t kas;
/** Host Controlled Thermal Management Attributes */
uint16_t hctma; /* Really a bitfield */
/** Minimum Thermal Management Temperature */
uint16_t mntmt;
/** Maximum Thermal Management Temperature */
uint16_t mxtmt;
/** Sanitize Capabilities */
uint32_t sanicap; /* Really a bitfield */
/** Host Memory Buffer Minimum Descriptor Entry Size */
uint32_t hmminds;
/** Host Memory Maximum Descriptors Entries */
uint16_t hmmaxd;
/** NVM Set Identifier Maximum */
uint16_t nsetidmax;
/** Endurance Group Identifier Maximum */
uint16_t endgidmax;
/** ANA Transition Time */
uint8_t anatt;
/** Asymmetric Namespace Access Capabilities */
uint8_t anacap;
/** ANA Group Identifier Maximum */
uint32_t anagrpmax;
/** Number of ANA Group Identifiers */
uint32_t nanagrpid;
/** Persistent Event Log Size */
uint32_t pels;
uint8_t reserved3[156];
/* bytes 512-703: nvm command set attributes */
/** submission queue entry size */
uint8_t sqes;
/** completion queue entry size */
uint8_t cqes;
/** Maximum Outstanding Commands */
uint16_t maxcmd;
/** number of namespaces */
uint32_t nn;
/** optional nvm command support */
uint16_t oncs;
/** fused operation support */
uint16_t fuses;
/** format nvm attributes */
uint8_t fna;
/** volatile write cache */
uint8_t vwc;
/** Atomic Write Unit Normal */
uint16_t awun;
/** Atomic Write Unit Power Fail */
uint16_t awupf;
/** NVM Vendor Specific Command Configuration */
uint8_t nvscc;
/** Namespace Write Protection Capabilities */
uint8_t nwpc;
/** Atomic Compare & Write Unit */
uint16_t acwu;
uint16_t reserved6;
/** SGL Support */
uint32_t sgls;
/** Maximum Number of Allowed Namespaces */
uint32_t mnan;
/* bytes 540-767: Reserved */
uint8_t reserved7[224];
/** NVM Subsystem NVMe Qualified Name */
uint8_t subnqn[256];
/* bytes 1024-1791: Reserved */
uint8_t reserved8[768];
/* bytes 1792-2047: NVMe over Fabrics specification */
uint8_t reserved9[256];
/* bytes 2048-3071: power state descriptors */
struct nvme_power_state power_state[32];
/* bytes 3072-4095: vendor specific */
uint8_t vs[1024];
} __packed __aligned(4);
static inline
void nvme_controller_data_swapbytes(struct nvme_controller_data *s)
{
#if _BYTE_ORDER != _LITTLE_ENDIAN
s->vid = sys_le16_to_cpu(s->vid);
s->ssvid = sys_le16_to_cpu(s->ssvid);
s->ctrlr_id = sys_le16_to_cpu(s->ctrlr_id);
s->ver = sys_le32_to_cpu(s->ver);
s->rtd3r = sys_le32_to_cpu(s->rtd3r);
s->rtd3e = sys_le32_to_cpu(s->rtd3e);
s->oaes = sys_le32_to_cpu(s->oaes);
s->ctratt = sys_le32_to_cpu(s->ctratt);
s->rrls = sys_le16_to_cpu(s->rrls);
s->crdt1 = sys_le16_to_cpu(s->crdt1);
s->crdt2 = sys_le16_to_cpu(s->crdt2);
s->crdt3 = sys_le16_to_cpu(s->crdt3);
s->oacs = sys_le16_to_cpu(s->oacs);
s->wctemp = sys_le16_to_cpu(s->wctemp);
s->cctemp = sys_le16_to_cpu(s->cctemp);
s->mtfa = sys_le16_to_cpu(s->mtfa);
s->hmpre = sys_le32_to_cpu(s->hmpre);
s->hmmin = sys_le32_to_cpu(s->hmmin);
s->rpmbs = sys_le32_to_cpu(s->rpmbs);
s->edstt = sys_le16_to_cpu(s->edstt);
s->kas = sys_le16_to_cpu(s->kas);
s->hctma = sys_le16_to_cpu(s->hctma);
s->mntmt = sys_le16_to_cpu(s->mntmt);
s->mxtmt = sys_le16_to_cpu(s->mxtmt);
s->sanicap = sys_le32_to_cpu(s->sanicap);
s->hmminds = sys_le32_to_cpu(s->hmminds);
s->hmmaxd = sys_le16_to_cpu(s->hmmaxd);
s->nsetidmax = sys_le16_to_cpu(s->nsetidmax);
s->endgidmax = sys_le16_to_cpu(s->endgidmax);
s->anagrpmax = sys_le32_to_cpu(s->anagrpmax);
s->nanagrpid = sys_le32_to_cpu(s->nanagrpid);
s->pels = sys_le32_to_cpu(s->pels);
s->maxcmd = sys_le16_to_cpu(s->maxcmd);
s->nn = sys_le32_to_cpu(s->nn);
s->oncs = sys_le16_to_cpu(s->oncs);
s->fuses = sys_le16_to_cpu(s->fuses);
s->awun = sys_le16_to_cpu(s->awun);
s->awupf = sys_le16_to_cpu(s->awupf);
s->acwu = sys_le16_to_cpu(s->acwu);
s->sgls = sys_le32_to_cpu(s->sgls);
s->mnan = sys_le32_to_cpu(s->mnan);
#else
ARG_UNUSED(s);
#endif
}
#include <zephyr/device.h>
#include <zephyr/drivers/pcie/pcie.h>
#include <zephyr/drivers/pcie/msi.h>
#define NVME_PCIE_BAR_IDX 0
#define NVME_REQUEST_AMOUNT (CONFIG_NVME_ADMIN_ENTRIES + \
CONFIG_NVME_IO_ENTRIES)
/* admin queue + io queue(s) */
#define NVME_PCIE_MSIX_VECTORS 1 + CONFIG_NVME_IO_QUEUES
#define NVME_QUEUE_ALLOCATE(name, n_entries) \
static struct nvme_command cmd_##name[n_entries] __aligned(0x1000); \
static struct nvme_completion cpl_##name[n_entries] __aligned(0x1000); \
\
static struct nvme_cmd_qpair name = { \
.num_entries = n_entries, \
.cmd = cmd_##name, \
.cpl = cpl_##name, \
}
#define NVME_ADMINQ_ALLOCATE(n, n_entries) \
NVME_QUEUE_ALLOCATE(admin_##n, n_entries)
#define NVME_IOQ_ALLOCATE(n, n_entries) \
NVME_QUEUE_ALLOCATE(io_##n, n_entries)
struct nvme_controller_config {
struct pcie_dev *pcie;
};
struct nvme_controller {
DEVICE_MMIO_RAM;
const struct device *dev;
uint32_t id;
msi_vector_t vectors[NVME_PCIE_MSIX_VECTORS];
struct nvme_cmd_qpair *adminq;
struct nvme_cmd_qpair *ioq;
uint32_t ready_timeout_in_ms;
/** LO and HI capacity mask */
uint32_t cap_lo;
uint32_t cap_hi;
/** Page size and log2(page_size) - 12 that we're currently using */
uint32_t page_size;
uint32_t mps;
/** doorbell stride */
uint32_t dstrd;
/** maximum i/o size in bytes */
uint32_t max_xfer_size;
};
#endif /* ZEPHYR_DRIVERS_DISK_NVME_NHME_H_ */

View file

@ -0,0 +1,73 @@
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2022 Intel Corp.
*/
#ifndef ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_
#define ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_
#include <zephyr/sys/slist.h>
struct nvme_command {
/* dword 0 */
struct _cdw0 {
uint8_t opc; /* opcode */
uint8_t fuse : 2; /* fused operation */
uint8_t rsvd : 4; /* reserved */
uint8_t psdt : 2; /* PRP or SGL for Data Transfer */
uint16_t cid; /* command identifier */
} cdw0;
/* dword 1 */
uint32_t nsid; /* namespace identifier */
/* dword 2-3 */
uint32_t cdw2;
uint32_t cdw3;
/* dword 4-5 */
uint64_t mptr; /* metadata pointer */
/* dword 6-7 and 8-9 */
struct _dptr {
uint64_t prp1; /* prp entry 1 */
uint64_t prp2; /* prp entry 2 */
} dptr; /* data pointer */
/* dword 10 */
union {
uint32_t cdw10; /* command-specific */
uint32_t ndt; /* Number of Dwords in Data transfer */
};
/* dword 11 */
union {
uint32_t cdw11; /* command-specific */
uint32_t ndm; /* Number of Dwords in Metadata transfer */
};
/* dword 12-15 */
uint32_t cdw12; /* command-specific */
uint32_t cdw13; /* command-specific */
uint32_t cdw14; /* command-specific */
uint32_t cdw15; /* command-specific */
};
struct nvme_completion {
/* dword 0 */
uint32_t cdw0; /* command-specific */
/* dword 1 */
uint32_t rsvd;
/* dword 2 */
uint16_t sqhd; /* submission queue head pointer */
uint16_t sqid; /* submission queue identifier */
/* dword 3 */
uint16_t cid; /* command identifier */
uint16_t p : 1; /* phase tag */
uint16_t status : 15;
} __aligned(8);
#endif /* ZEPHYR_DRIVERS_DISK_NVME_NVME_COMMAND_H_ */

View file

@ -0,0 +1,311 @@
/*
* Copyright (c) 2022 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#define DT_DRV_COMPAT nvme_controller
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(nvme, CONFIG_NVME_LOG_LEVEL);
#include <errno.h>
#include <zephyr/kernel.h>
#include <soc.h>
#include <zephyr/device.h>
#include <zephyr/init.h>
#include "nvme_helpers.h"
#include "nvme.h"
static int nvme_controller_wait_for_ready(const struct device *dev,
const int desired_val)
{
struct nvme_controller *nvme_ctrlr = dev->data;
mm_reg_t regs = DEVICE_MMIO_GET(dev);
int timeout = sys_clock_tick_get_32() +
k_ms_to_ticks_ceil32(nvme_ctrlr->ready_timeout_in_ms);
uint32_t delta_t = USEC_PER_MSEC;
uint32_t csts;
while (1) {
csts = nvme_mmio_read_4(regs, csts);
if (csts == NVME_GONE) {
LOG_ERR("Controller is unreachable");
return -EIO;
}
if (((csts >> NVME_CSTS_REG_RDY_SHIFT) &
NVME_CSTS_REG_RDY_MASK) == desired_val) {
break;
}
if ((int64_t)timeout - sys_clock_tick_get_32() < 0) {
LOG_ERR("Timeout error");
return -EIO;
}
k_busy_wait(delta_t);
delta_t = MIN((MSEC_PER_SEC * USEC_PER_MSEC), delta_t * 3 / 2);
}
return 0;
}
static int nvme_controller_disable(const struct device *dev)
{
mm_reg_t regs = DEVICE_MMIO_GET(dev);
uint32_t cc, csts;
uint8_t enabled, ready;
int err;
cc = nvme_mmio_read_4(regs, cc);
csts = nvme_mmio_read_4(regs, csts);
ready = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
enabled = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
if (enabled == 0) {
/* Wait for RDY == 0 or timeout & fail */
if (ready == 0) {
return 0;
}
return nvme_controller_wait_for_ready(dev, 0);
}
if (ready == 0) {
/* EN == 1, wait for RDY == 1 or timeout & fail */
err = nvme_controller_wait_for_ready(dev, 1);
if (err != 0) {
return err;
}
}
cc &= ~NVME_CC_REG_EN_MASK;
nvme_mmio_write_4(regs, cc, cc);
return nvme_controller_wait_for_ready(dev, 0);
}
static int nvme_controller_enable(const struct device *dev)
{
struct nvme_controller *nvme_ctrlr = dev->data;
mm_reg_t regs = DEVICE_MMIO_GET(dev);
uint32_t cc, csts, aqa, qsize;
uint8_t enabled, ready;
int err;
cc = nvme_mmio_read_4(regs, cc);
csts = nvme_mmio_read_4(regs, csts);
ready = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
enabled = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
if (enabled == 1) {
if (ready == 1) {
LOG_DBG("Already enabled");
return 0;
}
return nvme_controller_wait_for_ready(dev, 1);
}
/* EN == 0 already wait for RDY == 0 or timeout & fail */
err = nvme_controller_wait_for_ready(dev, 0);
if (err != 0) {
return err;
}
nvme_mmio_write_8(regs, asq, nvme_ctrlr->adminq->cmd_bus_addr);
nvme_mmio_write_8(regs, acq, nvme_ctrlr->adminq->cpl_bus_addr);
/* acqs and asqs are 0-based. */
qsize = CONFIG_NVME_ADMIN_ENTRIES - 1;
aqa = 0;
aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
nvme_mmio_write_4(regs, aqa, aqa);
/* Initialization values for CC */
cc = 0;
cc |= 1 << NVME_CC_REG_EN_SHIFT;
cc |= 0 << NVME_CC_REG_CSS_SHIFT;
cc |= 0 << NVME_CC_REG_AMS_SHIFT;
cc |= 0 << NVME_CC_REG_SHN_SHIFT;
cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
cc |= nvme_ctrlr->mps << NVME_CC_REG_MPS_SHIFT;
nvme_mmio_write_4(regs, cc, cc);
return nvme_controller_wait_for_ready(dev, 1);
}
static void nvme_controller_gather_info(const struct device *dev)
{
struct nvme_controller *nvme_ctrlr = dev->data;
mm_reg_t regs = DEVICE_MMIO_GET(dev);
uint32_t cap_lo, cap_hi, to, vs, pmrcap;
nvme_ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(regs, cap_lo);
LOG_DBG("CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u",
cap_lo, NVME_CAP_LO_MQES(cap_lo),
NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
(NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
(NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
NVME_CAP_LO_TO(cap_lo));
nvme_ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(regs, cap_hi);
LOG_DBG("CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
"MPSMIN %u, MPSMAX %u%s%s", cap_hi,
NVME_CAP_HI_DSTRD(cap_hi),
NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
NVME_CAP_HI_CSS(cap_hi),
NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
NVME_CAP_HI_MPSMIN(cap_hi),
NVME_CAP_HI_MPSMAX(cap_hi),
NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "");
vs = nvme_mmio_read_4(regs, vs);
LOG_DBG("Version: 0x%08x: %d.%d", vs,
NVME_MAJOR(vs), NVME_MINOR(vs));
if (NVME_CAP_HI_PMRS(cap_hi)) {
pmrcap = nvme_mmio_read_4(regs, pmrcap);
LOG_DBG("PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
"PMRWBM %x, PMRTO %u%s", pmrcap,
NVME_PMRCAP_BIR(pmrcap),
NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
NVME_PMRCAP_PMRTU(pmrcap),
NVME_PMRCAP_PMRWBM(pmrcap),
NVME_PMRCAP_PMRTO(pmrcap),
NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
}
nvme_ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
nvme_ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
nvme_ctrlr->page_size = 1 << (NVME_MPS_SHIFT + nvme_ctrlr->mps);
LOG_DBG("MPS: %u - Page Size: %u bytes",
nvme_ctrlr->mps, nvme_ctrlr->page_size);
/* Get ready timeout value from controller, in units of 500ms. */
to = NVME_CAP_LO_TO(cap_lo) + 1;
nvme_ctrlr->ready_timeout_in_ms = to * 500;
/* Cap transfers by the maximum addressable by
* page-sized PRP (4KB pages -> 2MB).
* ToDo: it could be less -> take the minimum.
*/
nvme_ctrlr->max_xfer_size = nvme_ctrlr->page_size /
8 * nvme_ctrlr->page_size;
LOG_DBG("Max transfer size: %u bytes", nvme_ctrlr->max_xfer_size);
}
static int nvme_controller_pcie_configure(const struct device *dev)
{
const struct nvme_controller_config *nvme_ctrlr_cfg = dev->config;
struct nvme_controller *nvme_ctrlr = dev->data;
struct pcie_bar mbar_regs;
uint8_t n_vectors;
if (nvme_ctrlr_cfg->pcie->bdf == PCIE_BDF_NONE) {
LOG_ERR("Controller not found");
return -ENODEV;
}
LOG_DBG("Configuring NVME controller ID %x:%x at %d:%x.%d",
PCIE_ID_TO_VEND(nvme_ctrlr_cfg->pcie->id),
PCIE_ID_TO_DEV(nvme_ctrlr_cfg->pcie->id),
PCIE_BDF_TO_BUS(nvme_ctrlr_cfg->pcie->bdf),
PCIE_BDF_TO_DEV(nvme_ctrlr_cfg->pcie->bdf),
PCIE_BDF_TO_FUNC(nvme_ctrlr_cfg->pcie->bdf));
if (!pcie_get_mbar(nvme_ctrlr_cfg->pcie->bdf,
NVME_PCIE_BAR_IDX, &mbar_regs)) {
LOG_ERR("Could not get NVME registers");
return -EIO;
}
device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr,
mbar_regs.size, K_MEM_CACHE_NONE);
/* Allocating vectors */
n_vectors = pcie_msi_vectors_allocate(nvme_ctrlr_cfg->pcie->bdf,
CONFIG_NVME_INT_PRIORITY,
nvme_ctrlr->vectors,
NVME_PCIE_MSIX_VECTORS);
if (n_vectors == 0) {
LOG_ERR("Could not allocate %u MSI-X vectors",
NVME_PCIE_MSIX_VECTORS);
return -EIO;
}
/* Enabling MSI-X and the vectors */
if (!pcie_msi_enable(nvme_ctrlr_cfg->pcie->bdf,
nvme_ctrlr->vectors, n_vectors, 0)) {
LOG_ERR("Could not enable MSI-X");
return -EIO;
}
return 0;
}
static int nvme_controller_init(const struct device *dev)
{
struct nvme_controller *nvme_ctrlr = dev->data;
int ret;
nvme_ctrlr->dev = dev;
ret = nvme_controller_pcie_configure(dev);
if (ret != 0) {
return ret;
}
nvme_controller_gather_info(dev);
ret = nvme_controller_disable(dev);
if (ret != 0) {
LOG_ERR("Controller cannot be disabled");
return ret;
}
ret = nvme_controller_enable(dev);
if (ret != 0) {
LOG_ERR("Controller cannot be enabled");
return ret;
}
return 0;
}
#define NVME_CONTROLLER_DEVICE_INIT(n) \
DEVICE_PCIE_INST_DECLARE(n); \
\
static struct nvme_controller nvme_ctrlr_data_##n = { \
.id = n, \
}; \
\
static struct nvme_controller_config nvme_ctrlr_cfg_##n = \
{ \
DEVICE_PCIE_INST_INIT(n, pcie), \
}; \
\
DEVICE_DT_INST_DEFINE(n, &nvme_controller_init, \
NULL, &nvme_ctrlr_data_##n, \
&nvme_ctrlr_cfg_##n, POST_KERNEL, \
CONFIG_KERNEL_INIT_PRIORITY_DEVICE, NULL);
DT_INST_FOREACH_STATUS_OKAY(NVME_CONTROLLER_DEVICE_INIT)

View file

@ -0,0 +1,390 @@
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2022 Intel Corp.
*/
#ifndef ZEPHYR_DRIVERS_DISK_NVME_NHME_HELPERS_H_
#define ZEPHYR_DRIVERS_DISK_NVME_NHME_HELPERS_H_
#define NVME_GONE 0xfffffffful
/*
* Macros to deal with NVME revisions, as defined VS register
*/
#define NVME_REV(x, y) (((x) << 16) | ((y) << 8))
#define NVME_MAJOR(r) (((r) >> 16) & 0xffff)
#define NVME_MINOR(r) (((r) >> 8) & 0xff)
/* Many items are expressed in terms of power of two times MPS */
#define NVME_MPS_SHIFT 12
/* Register field definitions */
#define NVME_CAP_LO_REG_MQES_SHIFT (0)
#define NVME_CAP_LO_REG_MQES_MASK (0xFFFF)
#define NVME_CAP_LO_REG_CQR_SHIFT (16)
#define NVME_CAP_LO_REG_CQR_MASK (0x1)
#define NVME_CAP_LO_REG_AMS_SHIFT (17)
#define NVME_CAP_LO_REG_AMS_MASK (0x3)
#define NVME_CAP_LO_REG_TO_SHIFT (24)
#define NVME_CAP_LO_REG_TO_MASK (0xFF)
#define NVME_CAP_LO_MQES(x) \
(((x) >> NVME_CAP_LO_REG_MQES_SHIFT) & NVME_CAP_LO_REG_MQES_MASK)
#define NVME_CAP_LO_CQR(x) \
(((x) >> NVME_CAP_LO_REG_CQR_SHIFT) & NVME_CAP_LO_REG_CQR_MASK)
#define NVME_CAP_LO_AMS(x) \
(((x) >> NVME_CAP_LO_REG_AMS_SHIFT) & NVME_CAP_LO_REG_AMS_MASK)
#define NVME_CAP_LO_TO(x) \
(((x) >> NVME_CAP_LO_REG_TO_SHIFT) & NVME_CAP_LO_REG_TO_MASK)
#define NVME_CAP_HI_REG_DSTRD_SHIFT (0)
#define NVME_CAP_HI_REG_DSTRD_MASK (0xF)
#define NVME_CAP_HI_REG_NSSRS_SHIFT (4)
#define NVME_CAP_HI_REG_NSSRS_MASK (0x1)
#define NVME_CAP_HI_REG_CSS_SHIFT (5)
#define NVME_CAP_HI_REG_CSS_MASK (0xff)
#define NVME_CAP_HI_REG_CSS_NVM_SHIFT (5)
#define NVME_CAP_HI_REG_CSS_NVM_MASK (0x1)
#define NVME_CAP_HI_REG_BPS_SHIFT (13)
#define NVME_CAP_HI_REG_BPS_MASK (0x1)
#define NVME_CAP_HI_REG_MPSMIN_SHIFT (16)
#define NVME_CAP_HI_REG_MPSMIN_MASK (0xF)
#define NVME_CAP_HI_REG_MPSMAX_SHIFT (20)
#define NVME_CAP_HI_REG_MPSMAX_MASK (0xF)
#define NVME_CAP_HI_REG_PMRS_SHIFT (24)
#define NVME_CAP_HI_REG_PMRS_MASK (0x1)
#define NVME_CAP_HI_REG_CMBS_SHIFT (25)
#define NVME_CAP_HI_REG_CMBS_MASK (0x1)
#define NVME_CAP_HI_DSTRD(x) \
(((x) >> NVME_CAP_HI_REG_DSTRD_SHIFT) & NVME_CAP_HI_REG_DSTRD_MASK)
#define NVME_CAP_HI_NSSRS(x) \
(((x) >> NVME_CAP_HI_REG_NSSRS_SHIFT) & NVME_CAP_HI_REG_NSSRS_MASK)
#define NVME_CAP_HI_CSS(x) \
(((x) >> NVME_CAP_HI_REG_CSS_SHIFT) & NVME_CAP_HI_REG_CSS_MASK)
#define NVME_CAP_HI_CSS_NVM(x) \
(((x) >> NVME_CAP_HI_REG_CSS_NVM_SHIFT) & NVME_CAP_HI_REG_CSS_NVM_MASK)
#define NVME_CAP_HI_BPS(x) \
(((x) >> NVME_CAP_HI_REG_BPS_SHIFT) & NVME_CAP_HI_REG_BPS_MASK)
#define NVME_CAP_HI_MPSMIN(x) \
(((x) >> NVME_CAP_HI_REG_MPSMIN_SHIFT) & NVME_CAP_HI_REG_MPSMIN_MASK)
#define NVME_CAP_HI_MPSMAX(x) \
(((x) >> NVME_CAP_HI_REG_MPSMAX_SHIFT) & NVME_CAP_HI_REG_MPSMAX_MASK)
#define NVME_CAP_HI_PMRS(x) \
(((x) >> NVME_CAP_HI_REG_PMRS_SHIFT) & NVME_CAP_HI_REG_PMRS_MASK)
#define NVME_CAP_HI_CMBS(x) \
(((x) >> NVME_CAP_HI_REG_CMBS_SHIFT) & NVME_CAP_HI_REG_CMBS_MASK)
#define NVME_CC_REG_EN_SHIFT (0)
#define NVME_CC_REG_EN_MASK (0x1)
#define NVME_CC_REG_CSS_SHIFT (4)
#define NVME_CC_REG_CSS_MASK (0x7)
#define NVME_CC_REG_MPS_SHIFT (7)
#define NVME_CC_REG_MPS_MASK (0xF)
#define NVME_CC_REG_AMS_SHIFT (11)
#define NVME_CC_REG_AMS_MASK (0x7)
#define NVME_CC_REG_SHN_SHIFT (14)
#define NVME_CC_REG_SHN_MASK (0x3)
#define NVME_CC_REG_IOSQES_SHIFT (16)
#define NVME_CC_REG_IOSQES_MASK (0xF)
#define NVME_CC_REG_IOCQES_SHIFT (20)
#define NVME_CC_REG_IOCQES_MASK (0xF)
#define NVME_CSTS_REG_RDY_SHIFT (0)
#define NVME_CSTS_REG_RDY_MASK (0x1)
#define NVME_CSTS_REG_CFS_SHIFT (1)
#define NVME_CSTS_REG_CFS_MASK (0x1)
#define NVME_CSTS_REG_SHST_SHIFT (2)
#define NVME_CSTS_REG_SHST_MASK (0x3)
#define NVME_CSTS_REG_NVSRO_SHIFT (4)
#define NVME_CSTS_REG_NVSRO_MASK (0x1)
#define NVME_CSTS_REG_PP_SHIFT (5)
#define NVME_CSTS_REG_PP_MASK (0x1)
#define NVME_CSTS_GET_SHST(csts) \
(((csts) >> NVME_CSTS_REG_SHST_SHIFT) & NVME_CSTS_REG_SHST_MASK)
#define NVME_AQA_REG_ASQS_SHIFT (0)
#define NVME_AQA_REG_ASQS_MASK (0xFFF)
#define NVME_AQA_REG_ACQS_SHIFT (16)
#define NVME_AQA_REG_ACQS_MASK (0xFFF)
#define NVME_PMRCAP_REG_RDS_SHIFT (3)
#define NVME_PMRCAP_REG_RDS_MASK (0x1)
#define NVME_PMRCAP_REG_WDS_SHIFT (4)
#define NVME_PMRCAP_REG_WDS_MASK (0x1)
#define NVME_PMRCAP_REG_BIR_SHIFT (5)
#define NVME_PMRCAP_REG_BIR_MASK (0x7)
#define NVME_PMRCAP_REG_PMRTU_SHIFT (8)
#define NVME_PMRCAP_REG_PMRTU_MASK (0x3)
#define NVME_PMRCAP_REG_PMRWBM_SHIFT (10)
#define NVME_PMRCAP_REG_PMRWBM_MASK (0xf)
#define NVME_PMRCAP_REG_PMRTO_SHIFT (16)
#define NVME_PMRCAP_REG_PMRTO_MASK (0xff)
#define NVME_PMRCAP_REG_CMSS_SHIFT (24)
#define NVME_PMRCAP_REG_CMSS_MASK (0x1)
#define NVME_PMRCAP_RDS(x) \
(((x) >> NVME_PMRCAP_REG_RDS_SHIFT) & NVME_PMRCAP_REG_RDS_MASK)
#define NVME_PMRCAP_WDS(x) \
(((x) >> NVME_PMRCAP_REG_WDS_SHIFT) & NVME_PMRCAP_REG_WDS_MASK)
#define NVME_PMRCAP_BIR(x) \
(((x) >> NVME_PMRCAP_REG_BIR_SHIFT) & NVME_PMRCAP_REG_BIR_MASK)
#define NVME_PMRCAP_PMRTU(x) \
(((x) >> NVME_PMRCAP_REG_PMRTU_SHIFT) & NVME_PMRCAP_REG_PMRTU_MASK)
#define NVME_PMRCAP_PMRWBM(x) \
(((x) >> NVME_PMRCAP_REG_PMRWBM_SHIFT) & NVME_PMRCAP_REG_PMRWBM_MASK)
#define NVME_PMRCAP_PMRTO(x) \
(((x) >> NVME_PMRCAP_REG_PMRTO_SHIFT) & NVME_PMRCAP_REG_PMRTO_MASK)
#define NVME_PMRCAP_CMSS(x) \
(((x) >> NVME_PMRCAP_REG_CMSS_SHIFT) & NVME_PMRCAP_REG_CMSS_MASK)
/* Command field definitions */
#define NVME_CMD_FUSE_SHIFT (8)
#define NVME_CMD_FUSE_MASK (0x3)
#define NVME_STATUS_P_SHIFT (0)
#define NVME_STATUS_P_MASK (0x1)
#define NVME_STATUS_SC_SHIFT (1)
#define NVME_STATUS_SC_MASK (0xFF)
#define NVME_STATUS_SCT_SHIFT (9)
#define NVME_STATUS_SCT_MASK (0x7)
#define NVME_STATUS_CRD_SHIFT (12)
#define NVME_STATUS_CRD_MASK (0x3)
#define NVME_STATUS_M_SHIFT (14)
#define NVME_STATUS_M_MASK (0x1)
#define NVME_STATUS_DNR_SHIFT (15)
#define NVME_STATUS_DNR_MASK (0x1)
#define NVME_STATUS_GET_P(st) \
(((st) >> NVME_STATUS_P_SHIFT) & NVME_STATUS_P_MASK)
#define NVME_STATUS_GET_SC(st) \
(((st) >> NVME_STATUS_SC_SHIFT) & NVME_STATUS_SC_MASK)
#define NVME_STATUS_GET_SCT(st) \
(((st) >> NVME_STATUS_SCT_SHIFT) & NVME_STATUS_SCT_MASK)
#define NVME_STATUS_GET_CRD(st) \
(((st) >> NVME_STATUS_CRD_SHIFT) & NVME_STATUS_CRD_MASK)
#define NVME_STATUS_GET_M(st) \
(((st) >> NVME_STATUS_M_SHIFT) & NVME_STATUS_M_MASK)
#define NVME_STATUS_GET_DNR(st) \
(((st) >> NVME_STATUS_DNR_SHIFT) & NVME_STATUS_DNR_MASK)
/** Controller Multi-path I/O and Namespace Sharing Capabilities */
/* More then one port */
#define NVME_CTRLR_DATA_MIC_MPORTS_SHIFT (0)
#define NVME_CTRLR_DATA_MIC_MPORTS_MASK (0x1)
/* More then one controller */
#define NVME_CTRLR_DATA_MIC_MCTRLRS_SHIFT (1)
#define NVME_CTRLR_DATA_MIC_MCTRLRS_MASK (0x1)
/* SR-IOV Virtual Function */
#define NVME_CTRLR_DATA_MIC_SRIOVVF_SHIFT (2)
#define NVME_CTRLR_DATA_MIC_SRIOVVF_MASK (0x1)
/* Asymmetric Namespace Access Reporting */
#define NVME_CTRLR_DATA_MIC_ANAR_SHIFT (3)
#define NVME_CTRLR_DATA_MIC_ANAR_MASK (0x1)
/** OAES - Optional Asynchronous Events Supported */
/* supports Namespace Attribute Notices event */
#define NVME_CTRLR_DATA_OAES_NS_ATTR_SHIFT (8)
#define NVME_CTRLR_DATA_OAES_NS_ATTR_MASK (0x1)
/* supports Firmware Activation Notices event */
#define NVME_CTRLR_DATA_OAES_FW_ACTIVATE_SHIFT (9)
#define NVME_CTRLR_DATA_OAES_FW_ACTIVATE_MASK (0x1)
/* supports Asymmetric Namespace Access Change Notices event */
#define NVME_CTRLR_DATA_OAES_ASYM_NS_CHANGE_SHIFT (11)
#define NVME_CTRLR_DATA_OAES_ASYM_NS_CHANGE_MASK (0x1)
/* supports Predictable Latency Event Aggregate Log Change Notices event */
#define NVME_CTRLR_DATA_OAES_PREDICT_LATENCY_SHIFT (12)
#define NVME_CTRLR_DATA_OAES_PREDICT_LATENCY_MASK (0x1)
/* supports LBA Status Information Notices event */
#define NVME_CTRLR_DATA_OAES_LBA_STATUS_SHIFT (13)
#define NVME_CTRLR_DATA_OAES_LBA_STATUS_MASK (0x1)
/* supports Endurance Group Event Aggregate Log Page Changes Notices event */
#define NVME_CTRLR_DATA_OAES_ENDURANCE_GROUP_SHIFT (14)
#define NVME_CTRLR_DATA_OAES_ENDURANCE_GROUP_MASK (0x1)
/* supports Normal NVM Subsystem Shutdown event */
#define NVME_CTRLR_DATA_OAES_NORMAL_SHUTDOWN_SHIFT (15)
#define NVME_CTRLR_DATA_OAES_NORMAL_SHUTDOWN_MASK (0x1)
/* supports Zone Descriptor Changed Notices event */
#define NVME_CTRLR_DATA_OAES_ZONE_DESC_CHANGE_SHIFT (27)
#define NVME_CTRLR_DATA_OAES_ZONE_DESC_CHANGE_MASK (0x1)
/* supports Discovery Log Page Change Notification event */
#define NVME_CTRLR_DATA_OAES_LOG_PAGE_CHANGE_SHIFT (31)
#define NVME_CTRLR_DATA_OAES_LOG_PAGE_CHANGE_MASK (0x1)
/** OACS - optional admin command support */
/* supports security send/receive commands */
#define NVME_CTRLR_DATA_OACS_SECURITY_SHIFT (0)
#define NVME_CTRLR_DATA_OACS_SECURITY_MASK (0x1)
/* supports format nvm command */
#define NVME_CTRLR_DATA_OACS_FORMAT_SHIFT (1)
#define NVME_CTRLR_DATA_OACS_FORMAT_MASK (0x1)
/* supports firmware activate/download commands */
#define NVME_CTRLR_DATA_OACS_FIRMWARE_SHIFT (2)
#define NVME_CTRLR_DATA_OACS_FIRMWARE_MASK (0x1)
/* supports namespace management commands */
#define NVME_CTRLR_DATA_OACS_NSMGMT_SHIFT (3)
#define NVME_CTRLR_DATA_OACS_NSMGMT_MASK (0x1)
/* supports Device Self-test command */
#define NVME_CTRLR_DATA_OACS_SELFTEST_SHIFT (4)
#define NVME_CTRLR_DATA_OACS_SELFTEST_MASK (0x1)
/* supports Directives */
#define NVME_CTRLR_DATA_OACS_DIRECTIVES_SHIFT (5)
#define NVME_CTRLR_DATA_OACS_DIRECTIVES_MASK (0x1)
/* supports NVMe-MI Send/Receive */
#define NVME_CTRLR_DATA_OACS_NVMEMI_SHIFT (6)
#define NVME_CTRLR_DATA_OACS_NVMEMI_MASK (0x1)
/* supports Virtualization Management */
#define NVME_CTRLR_DATA_OACS_VM_SHIFT (7)
#define NVME_CTRLR_DATA_OACS_VM_MASK (0x1)
/* supports Doorbell Buffer Config */
#define NVME_CTRLR_DATA_OACS_DBBUFFER_SHIFT (8)
#define NVME_CTRLR_DATA_OACS_DBBUFFER_MASK (0x1)
/* supports Get LBA Status */
#define NVME_CTRLR_DATA_OACS_GETLBA_SHIFT (9)
#define NVME_CTRLR_DATA_OACS_GETLBA_MASK (0x1)
/** firmware updates */
/* first slot is read-only */
#define NVME_CTRLR_DATA_FRMW_SLOT1_RO_SHIFT (0)
#define NVME_CTRLR_DATA_FRMW_SLOT1_RO_MASK (0x1)
/* number of firmware slots */
#define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT (1)
#define NVME_CTRLR_DATA_FRMW_NUM_SLOTS_MASK (0x7)
/* firmware activation without reset */
#define NVME_CTRLR_DATA_FRMW_ACT_WO_RESET_SHIFT (4)
#define NVME_CTRLR_DATA_FRMW_ACT_WO_RESET_MASK (0x1)
/** log page attributes */
/* per namespace smart/health log page */
#define NVME_CTRLR_DATA_LPA_NS_SMART_SHIFT (0)
#define NVME_CTRLR_DATA_LPA_NS_SMART_MASK (0x1)
/** AVSCC - admin vendor specific command configuration */
/* admin vendor specific commands use spec format */
#define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_SHIFT (0)
#define NVME_CTRLR_DATA_AVSCC_SPEC_FORMAT_MASK (0x1)
/** Autonomous Power State Transition Attributes */
/* Autonomous Power State Transitions supported */
#define NVME_CTRLR_DATA_APSTA_APST_SUPP_SHIFT (0)
#define NVME_CTRLR_DATA_APSTA_APST_SUPP_MASK (0x1)
/** Sanitize Capabilities */
/* Crypto Erase Support */
#define NVME_CTRLR_DATA_SANICAP_CES_SHIFT (0)
#define NVME_CTRLR_DATA_SANICAP_CES_MASK (0x1)
/* Block Erase Support */
#define NVME_CTRLR_DATA_SANICAP_BES_SHIFT (1)
#define NVME_CTRLR_DATA_SANICAP_BES_MASK (0x1)
/* Overwrite Support */
#define NVME_CTRLR_DATA_SANICAP_OWS_SHIFT (2)
#define NVME_CTRLR_DATA_SANICAP_OWS_MASK (0x1)
/* No-Deallocate Inhibited */
#define NVME_CTRLR_DATA_SANICAP_NDI_SHIFT (29)
#define NVME_CTRLR_DATA_SANICAP_NDI_MASK (0x1)
/* No-Deallocate Modifies Media After Sanitize */
#define NVME_CTRLR_DATA_SANICAP_NODMMAS_SHIFT (30)
#define NVME_CTRLR_DATA_SANICAP_NODMMAS_MASK (0x3)
#define NVME_CTRLR_DATA_SANICAP_NODMMAS_UNDEF (0)
#define NVME_CTRLR_DATA_SANICAP_NODMMAS_NO (1)
#define NVME_CTRLR_DATA_SANICAP_NODMMAS_YES (2)
/** submission queue entry size */
#define NVME_CTRLR_DATA_SQES_MIN_SHIFT (0)
#define NVME_CTRLR_DATA_SQES_MIN_MASK (0xF)
#define NVME_CTRLR_DATA_SQES_MAX_SHIFT (4)
#define NVME_CTRLR_DATA_SQES_MAX_MASK (0xF)
/** completion queue entry size */
#define NVME_CTRLR_DATA_CQES_MIN_SHIFT (0)
#define NVME_CTRLR_DATA_CQES_MIN_MASK (0xF)
#define NVME_CTRLR_DATA_CQES_MAX_SHIFT (4)
#define NVME_CTRLR_DATA_CQES_MAX_MASK (0xF)
/** optional nvm command support */
#define NVME_CTRLR_DATA_ONCS_COMPARE_SHIFT (0)
#define NVME_CTRLR_DATA_ONCS_COMPARE_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_WRITE_UNC_SHIFT (1)
#define NVME_CTRLR_DATA_ONCS_WRITE_UNC_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_DSM_SHIFT (2)
#define NVME_CTRLR_DATA_ONCS_DSM_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_WRZERO_SHIFT (3)
#define NVME_CTRLR_DATA_ONCS_WRZERO_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_SAVEFEAT_SHIFT (4)
#define NVME_CTRLR_DATA_ONCS_SAVEFEAT_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_RESERV_SHIFT (5)
#define NVME_CTRLR_DATA_ONCS_RESERV_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_TIMESTAMP_SHIFT (6)
#define NVME_CTRLR_DATA_ONCS_TIMESTAMP_MASK (0x1)
#define NVME_CTRLR_DATA_ONCS_VERIFY_SHIFT (7)
#define NVME_CTRLR_DATA_ONCS_VERIFY_MASK (0x1)
/** Fused Operation Support */
#define NVME_CTRLR_DATA_FUSES_CNW_SHIFT (0)
#define NVME_CTRLR_DATA_FUSES_CNW_MASK (0x1)
/** Format NVM Attributes */
#define NVME_CTRLR_DATA_FNA_FORMAT_ALL_SHIFT (0)
#define NVME_CTRLR_DATA_FNA_FORMAT_ALL_MASK (0x1)
#define NVME_CTRLR_DATA_FNA_ERASE_ALL_SHIFT (1)
#define NVME_CTRLR_DATA_FNA_ERASE_ALL_MASK (0x1)
#define NVME_CTRLR_DATA_FNA_CRYPTO_ERASE_SHIFT (2)
#define NVME_CTRLR_DATA_FNA_CRYPTO_ERASE_MASK (0x1)
/** volatile write cache */
/* volatile write cache present */
#define NVME_CTRLR_DATA_VWC_PRESENT_SHIFT (0)
#define NVME_CTRLR_DATA_VWC_PRESENT_MASK (0x1)
/* flush all namespaces supported */
#define NVME_CTRLR_DATA_VWC_ALL_SHIFT (1)
#define NVME_CTRLR_DATA_VWC_ALL_MASK (0x3)
#define NVME_CTRLR_DATA_VWC_ALL_UNKNOWN (0)
#define NVME_CTRLR_DATA_VWC_ALL_NO (2)
#define NVME_CTRLR_DATA_VWC_ALL_YES (3)
enum nvme_critical_warning_state {
NVME_CRIT_WARN_ST_AVAILABLE_SPARE = 0x1,
NVME_CRIT_WARN_ST_TEMPERATURE = 0x2,
NVME_CRIT_WARN_ST_DEVICE_RELIABILITY = 0x4,
NVME_CRIT_WARN_ST_READ_ONLY = 0x8,
NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP = 0x10,
};
#define NVME_CRIT_WARN_ST_RESERVED_MASK (0xE0)
#define NVME_ASYNC_EVENT_NS_ATTRIBUTE (0x100)
#define NVME_ASYNC_EVENT_FW_ACTIVATE (0x200)
/* Helper macro to combine *_MASK and *_SHIFT defines */
#define NVMEB(name) (name##_MASK << name##_SHIFT)
/* CC register SHN field values */
enum shn_value {
NVME_SHN_NORMAL = 0x1,
NVME_SHN_ABRUPT = 0x2,
};
/* CSTS register SHST field values */
enum shst_value {
NVME_SHST_NORMAL = 0x0,
NVME_SHST_OCCURRING = 0x1,
NVME_SHST_COMPLETE = 0x2,
};
#define nvme_mmio_offsetof(reg) \
offsetof(struct nvme_registers, reg)
#define nvme_mmio_read_4(b_a, reg) \
sys_read32((mm_reg_t)b_a + nvme_mmio_offsetof(reg))
#define nvme_mmio_write_4(b_a, reg, val) \
sys_write32(val, (mm_reg_t)b_a + nvme_mmio_offsetof(reg))
#define nvme_mmio_write_8(b_a, reg, val) \
do { \
sys_write32(val & 0xFFFFFFFF, \
(mm_reg_t)b_a + nvme_mmio_offsetof(reg)); \
sys_write32((val & 0xFFFFFFFF00000000ULL) >> 32, \
(mm_reg_t)b_a + nvme_mmio_offsetof(reg) + 4); \
} while (0)
#endif /* ZEPHYR_DRIVERS_DISK_NVME_NHME_HELPERS_H_ */