diff --git a/drivers/ethernet/CMakeLists.txt b/drivers/ethernet/CMakeLists.txt index fb623f5ce46..3314a1d6fa9 100644 --- a/drivers/ethernet/CMakeLists.txt +++ b/drivers/ethernet/CMakeLists.txt @@ -31,6 +31,7 @@ zephyr_library_sources_ifdef(CONFIG_ETH_SAM_GMAC eth_sam_gmac.c) zephyr_library_sources_ifdef(CONFIG_ETH_CYCLONEV eth_cyclonev.c) zephyr_library_sources_ifdef(CONFIG_SLIP_TAP eth_slip_tap.c) zephyr_library_sources_ifdef(CONFIG_ETH_SMSC91X eth_smsc91x.c) +zephyr_library_sources_ifdef(CONFIG_ETH_IVSHMEM eth_ivshmem.c eth_ivshmem_queue.c) if(CONFIG_ETH_NXP_S32_NETC) zephyr_library_sources(eth_nxp_s32_netc.c) diff --git a/drivers/ethernet/Kconfig b/drivers/ethernet/Kconfig index e472fea8e6f..33a85e4a513 100644 --- a/drivers/ethernet/Kconfig +++ b/drivers/ethernet/Kconfig @@ -58,6 +58,7 @@ source "drivers/ethernet/Kconfig.xlnx_gem" source "drivers/ethernet/Kconfig.cyclonev" source "drivers/ethernet/Kconfig.nxp_s32" source "drivers/ethernet/Kconfig.smsc91x" +source "drivers/ethernet/Kconfig.ivshmem" source "drivers/ethernet/phy/Kconfig" diff --git a/drivers/ethernet/Kconfig.ivshmem b/drivers/ethernet/Kconfig.ivshmem new file mode 100644 index 00000000000..ea7dad4d3b0 --- /dev/null +++ b/drivers/ethernet/Kconfig.ivshmem @@ -0,0 +1,27 @@ +# IVSHMEM Ethernet driver configuration options + +# Copyright (c) 2023 Enphase Energy +# SPDX-License-Identifier: Apache-2.0 + +menuconfig ETH_IVSHMEM + bool "Inter-VM shared memory Ethernet driver" + select PCIE + select VIRTUALIZATION + select IVSHMEM_V2 + select IVSHMEM_DOORBELL + select OPENAMP + help + Enable Inter-VM Shared Memory Ethernet driver. + Used for Ethernet communication between "cells" in the Jailhouse hypervisor. + +if ETH_IVSHMEM + +config ETH_IVSHMEM_THREAD_STACK_SIZE + int "IVSHMEM Ethernet thread stack size" + default 4096 + +config ETH_IVSHMEM_THREAD_PRIORITY + int "IVSHMEM Ethernet thread priority" + default 2 + +endif # ETH_IVSHMEM diff --git a/drivers/ethernet/eth_ivshmem.c b/drivers/ethernet/eth_ivshmem.c new file mode 100644 index 00000000000..eb869261cbc --- /dev/null +++ b/drivers/ethernet/eth_ivshmem.c @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2023 Enphase Energy + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT siemens_ivshmem_eth + +#include +#include +#include +#include + +#include "eth.h" +#include "eth_ivshmem_priv.h" + +LOG_MODULE_REGISTER(eth_ivshmem, CONFIG_ETHERNET_LOG_LEVEL); + +#define ETH_IVSHMEM_STATE_RESET 0 +#define ETH_IVSHMEM_STATE_INIT 1 +#define ETH_IVSHMEM_STATE_READY 2 +#define ETH_IVSHMEM_STATE_RUN 3 + +static const char * const eth_ivshmem_state_names[] = { + [ETH_IVSHMEM_STATE_RESET] = "RESET", + [ETH_IVSHMEM_STATE_INIT] = "INIT", + [ETH_IVSHMEM_STATE_READY] = "READY", + [ETH_IVSHMEM_STATE_RUN] = "RUN" +}; + +struct eth_ivshmem_dev_data { + struct net_if *iface; + + uint32_t tx_rx_vector; + uint32_t peer_id; + uint8_t mac_addr[6]; + struct k_poll_signal poll_signal; + struct eth_ivshmem_queue ivshmem_queue; + + K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_IVSHMEM_THREAD_STACK_SIZE); + struct k_thread thread; + bool enabled; + uint32_t state; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + struct net_stats_eth stats; +#endif +}; + +struct eth_ivshmem_cfg_data { + const struct device *ivshmem; + const char *name; + void (*generate_mac_addr)(uint8_t mac_addr[6]); +}; + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +static struct net_stats_eth *eth_ivshmem_get_stats(const struct device *dev) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + + return &dev_data->stats; +} +#endif + +static int eth_ivshmem_start(const struct device *dev) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + + dev_data->enabled = true; + + /* Wake up thread to check/update state */ + k_poll_signal_raise(&dev_data->poll_signal, 0); + + return 0; +} + +static int eth_ivshmem_stop(const struct device *dev) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + + dev_data->enabled = false; + + /* Wake up thread to check/update state */ + k_poll_signal_raise(&dev_data->poll_signal, 0); + + return 0; +} + +static enum ethernet_hw_caps eth_ivshmem_caps(const struct device *dev) +{ + ARG_UNUSED(dev); + return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T; +} + +static int eth_ivshmem_send(const struct device *dev, struct net_pkt *pkt) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + const struct eth_ivshmem_cfg_data *cfg_data = dev->config; + size_t len = net_pkt_get_len(pkt); + + void *data; + int res = eth_ivshmem_queue_tx_get_buff(&dev_data->ivshmem_queue, &data, len); + + if (res != 0) { + LOG_ERR("Failed to allocate tx buffer"); + eth_stats_update_errors_tx(dev_data->iface); + return res; + } + + if (net_pkt_read(pkt, data, len)) { + LOG_ERR("Failed to read tx packet"); + eth_stats_update_errors_tx(dev_data->iface); + return -EIO; + } + + res = eth_ivshmem_queue_tx_commit_buff(&dev_data->ivshmem_queue); + if (res == 0) { + /* Notify peer */ + ivshmem_int_peer(cfg_data->ivshmem, dev_data->peer_id, dev_data->tx_rx_vector); + } + + return res; +} + +static struct net_pkt *eth_ivshmem_rx(const struct device *dev) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + const struct eth_ivshmem_cfg_data *cfg_data = dev->config; + const void *rx_data; + size_t rx_len; + + int res = eth_ivshmem_queue_rx(&dev_data->ivshmem_queue, &rx_data, &rx_len); + + if (res != 0) { + if (res != -EWOULDBLOCK) { + LOG_ERR("Queue RX failed"); + eth_stats_update_errors_rx(dev_data->iface); + } + return NULL; + } + + struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer( + dev_data->iface, rx_len, AF_UNSPEC, 0, K_MSEC(100)); + if (pkt == NULL) { + LOG_ERR("Failed to allocate rx buffer"); + eth_stats_update_errors_rx(dev_data->iface); + goto dequeue; + } + + if (net_pkt_write(pkt, rx_data, rx_len) != 0) { + LOG_ERR("Failed to write rx packet"); + eth_stats_update_errors_rx(dev_data->iface); + net_pkt_unref(pkt); + } + +dequeue: + if (eth_ivshmem_queue_rx_complete(&dev_data->ivshmem_queue) == 0) { + /* Notify peer */ + ivshmem_int_peer(cfg_data->ivshmem, dev_data->peer_id, dev_data->tx_rx_vector); + } + + return pkt; +} + +static void eth_ivshmem_set_state(const struct device *dev, uint32_t state) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + const struct eth_ivshmem_cfg_data *cfg_data = dev->config; + + LOG_DBG("State update: %s -> %s", + eth_ivshmem_state_names[dev_data->state], + eth_ivshmem_state_names[state]); + dev_data->state = state; + ivshmem_set_state(cfg_data->ivshmem, state); +} + +static void eth_ivshmem_state_update(const struct device *dev) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + const struct eth_ivshmem_cfg_data *cfg_data = dev->config; + + uint32_t peer_state = ivshmem_get_state(cfg_data->ivshmem, dev_data->peer_id); + + switch (dev_data->state) { + case ETH_IVSHMEM_STATE_RESET: + switch (peer_state) { + case ETH_IVSHMEM_STATE_RESET: + case ETH_IVSHMEM_STATE_INIT: + eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_INIT); + break; + default: + /* Wait for peer to reset */ + break; + } + break; + case ETH_IVSHMEM_STATE_INIT: + if (dev_data->iface == NULL || peer_state == ETH_IVSHMEM_STATE_RESET) { + /* Peer is not ready for init */ + break; + } + eth_ivshmem_queue_reset(&dev_data->ivshmem_queue); + eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_READY); + break; + case ETH_IVSHMEM_STATE_READY: + case ETH_IVSHMEM_STATE_RUN: + switch (peer_state) { + case ETH_IVSHMEM_STATE_RESET: + net_eth_carrier_off(dev_data->iface); + eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RESET); + break; + case ETH_IVSHMEM_STATE_READY: + case ETH_IVSHMEM_STATE_RUN: + if (dev_data->enabled && dev_data->state == ETH_IVSHMEM_STATE_READY) { + eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RUN); + net_eth_carrier_on(dev_data->iface); + } else if (!dev_data->enabled && dev_data->state == ETH_IVSHMEM_STATE_RUN) { + net_eth_carrier_off(dev_data->iface); + eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RESET); + } + break; + } + break; + } +} + +FUNC_NORETURN static void eth_ivshmem_thread(void *arg1, void *arg2, void *arg3) +{ + const struct device *dev = arg1; + struct eth_ivshmem_dev_data *dev_data = dev->data; + struct k_poll_event poll_event; + + ARG_UNUSED(arg2); + ARG_UNUSED(arg3); + + k_poll_event_init(&poll_event, + K_POLL_TYPE_SIGNAL, + K_POLL_MODE_NOTIFY_ONLY, + &dev_data->poll_signal); + + while (true) { + k_poll(&poll_event, 1, K_FOREVER); + poll_event.signal->signaled = 0; + poll_event.state = K_POLL_STATE_NOT_READY; + + eth_ivshmem_state_update(dev); + if (dev_data->state != ETH_IVSHMEM_STATE_RUN) { + continue; + } + + while (true) { + struct net_pkt *pkt = eth_ivshmem_rx(dev); + + if (pkt == NULL) { + break; + } + + if (net_recv_data(dev_data->iface, pkt) < 0) { + /* Upper layers are not ready to receive packets */ + net_pkt_unref(pkt); + } + + k_yield(); + }; + } +} + +int eth_ivshmem_initialize(const struct device *dev) +{ + struct eth_ivshmem_dev_data *dev_data = dev->data; + const struct eth_ivshmem_cfg_data *cfg_data = dev->config; + int res; + + k_poll_signal_init(&dev_data->poll_signal); + + if (!device_is_ready(cfg_data->ivshmem)) { + LOG_ERR("ivshmem device not ready"); + return -ENODEV; + } + + uint16_t protocol = ivshmem_get_protocol(cfg_data->ivshmem); + + if (protocol != IVSHMEM_V2_PROTO_NET) { + LOG_ERR("Invalid ivshmem protocol %hu", protocol); + return -EINVAL; + } + + uint32_t id = ivshmem_get_id(cfg_data->ivshmem); + uint32_t max_peers = ivshmem_get_max_peers(cfg_data->ivshmem); + + LOG_INF("ivshmem: id %u, max_peers %u", id, max_peers); + if (id > 1) { + LOG_ERR("Invalid ivshmem ID %u", id); + return -EINVAL; + } + if (max_peers != 2) { + LOG_ERR("Invalid ivshmem max peers %u", max_peers); + return -EINVAL; + } + dev_data->peer_id = (id == 0) ? 1 : 0; + + bool tx_buffer_first = id == 0; + uintptr_t output_section_addr; + size_t output_section_size = ivshmem_get_output_mem_section( + cfg_data->ivshmem, 0, &output_section_addr); + + res = eth_ivshmem_queue_init( + &dev_data->ivshmem_queue, output_section_addr, + output_section_size, tx_buffer_first); + if (res != 0) { + LOG_ERR("Failed to init ivshmem queue"); + return res; + } + LOG_INF("shmem queue: desc len 0x%hX, header size 0x%X, data size 0x%X", + dev_data->ivshmem_queue.desc_max_len, + dev_data->ivshmem_queue.vring_header_size, + dev_data->ivshmem_queue.vring_data_max_len); + + uint16_t n_vectors = ivshmem_get_vectors(cfg_data->ivshmem); + + /* For simplicity, state and TX/RX vectors do the same thing */ + ivshmem_register_handler(cfg_data->ivshmem, &dev_data->poll_signal, 0); + dev_data->tx_rx_vector = 0; + if (n_vectors == 0) { + LOG_ERR("Error no ivshmem ISR vectors"); + return -EINVAL; + } else if (n_vectors > 1) { + ivshmem_register_handler(cfg_data->ivshmem, &dev_data->poll_signal, 1); + dev_data->tx_rx_vector = 1; + } + + ivshmem_set_state(cfg_data->ivshmem, ETH_IVSHMEM_STATE_RESET); + + cfg_data->generate_mac_addr(dev_data->mac_addr); + LOG_INF("MAC Address %02X:%02X:%02X:%02X:%02X:%02X", + dev_data->mac_addr[0], dev_data->mac_addr[1], + dev_data->mac_addr[2], dev_data->mac_addr[3], + dev_data->mac_addr[4], dev_data->mac_addr[5]); + + k_tid_t tid = k_thread_create( + &dev_data->thread, dev_data->thread_stack, + K_KERNEL_STACK_SIZEOF(dev_data->thread_stack), + eth_ivshmem_thread, + (void *) dev, NULL, NULL, + CONFIG_ETH_IVSHMEM_THREAD_PRIORITY, + K_ESSENTIAL, K_NO_WAIT); + k_thread_name_set(tid, cfg_data->name); + + ivshmem_enable_interrupts(cfg_data->ivshmem, true); + + /* Wake up thread to check/update state */ + k_poll_signal_raise(&dev_data->poll_signal, 0); + + return 0; +} + +static void eth_ivshmem_iface_init(struct net_if *iface) +{ + const struct device *dev = net_if_get_device(iface); + struct eth_ivshmem_dev_data *dev_data = dev->data; + + if (dev_data->iface == NULL) { + dev_data->iface = iface; + } + + net_if_set_link_addr( + iface, dev_data->mac_addr, + sizeof(dev_data->mac_addr), + NET_LINK_ETHERNET); + + ethernet_init(iface); + + /* Do not start the interface until PHY link is up */ + net_if_carrier_off(iface); + + /* Wake up thread to check/update state */ + k_poll_signal_raise(&dev_data->poll_signal, 0); +} + +static const struct ethernet_api eth_ivshmem_api = { + .iface_api.init = eth_ivshmem_iface_init, +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + .get_stats = eth_ivshmem_get_stats, +#endif + .start = eth_ivshmem_start, + .stop = eth_ivshmem_stop, + .get_capabilities = eth_ivshmem_caps, + .send = eth_ivshmem_send, +}; + +#define ETH_IVSHMEM_RANDOM_MAC_ADDR(inst) \ + static void generate_mac_addr_##inst(uint8_t mac_addr[6]) \ + { \ + uint32_t entropy = sys_rand32_get(); \ + mac_addr[0] = (entropy >> 16) & 0xff; \ + mac_addr[1] = (entropy >> 8) & 0xff; \ + mac_addr[2] = (entropy >> 0) & 0xff; \ + /* Clear multicast bit */ \ + mac_addr[0] &= 0xFE; \ + gen_random_mac(mac_addr, mac_addr[0], mac_addr[1], mac_addr[2]); \ + } + +#define ETH_IVSHMEM_LOCAL_MAC_ADDR(inst) \ + static void generate_mac_addr_##inst(uint8_t mac_addr[6]) \ + { \ + const uint8_t addr[6] = DT_INST_PROP(0, local_mac_address); \ + memcpy(mac_addr, addr, sizeof(addr)); \ + } + +#define ETH_IVSHMEM_GENERATE_MAC_ADDR(inst) \ + BUILD_ASSERT(DT_INST_PROP(inst, zephyr_random_mac_address) || \ + NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(inst)), \ + "eth_ivshmem requires either a fixed or random mac address"); \ + COND_CODE_1(DT_INST_PROP(inst, zephyr_random_mac_address), \ + (ETH_IVSHMEM_RANDOM_MAC_ADDR(inst)), \ + (ETH_IVSHMEM_LOCAL_MAC_ADDR(inst))) + +#define ETH_IVSHMEM_INIT(inst) \ + ETH_IVSHMEM_GENERATE_MAC_ADDR(inst); \ + static struct eth_ivshmem_dev_data eth_ivshmem_dev_##inst = {}; \ + static const struct eth_ivshmem_cfg_data eth_ivshmem_cfg_##inst = { \ + .ivshmem = DEVICE_DT_GET(DT_INST_PHANDLE(inst, ivshmem_v2)), \ + .name = "ivshmem_eth" STRINGIFY(inst), \ + .generate_mac_addr = generate_mac_addr_##inst, \ + }; \ + ETH_NET_DEVICE_DT_INST_DEFINE(inst, \ + eth_ivshmem_initialize, \ + NULL, \ + ð_ivshmem_dev_##inst, \ + ð_ivshmem_cfg_##inst, \ + CONFIG_ETH_INIT_PRIORITY, \ + ð_ivshmem_api, \ + NET_ETH_MTU); + +DT_INST_FOREACH_STATUS_OKAY(ETH_IVSHMEM_INIT); diff --git a/drivers/ethernet/eth_ivshmem_priv.h b/drivers/ethernet/eth_ivshmem_priv.h new file mode 100644 index 00000000000..b5769a31f03 --- /dev/null +++ b/drivers/ethernet/eth_ivshmem_priv.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023 Enphase Energy + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ETH_IVSHMEM_PRIV_H +#define ETH_IVSHMEM_PRIV_H + +#include +#include +#include + +#include + +struct eth_ivshmem_queue { + struct { + struct vring vring; + void *shmem; + uint16_t desc_head; + uint16_t desc_len; + uint32_t data_head; + uint32_t data_tail; + uint32_t data_len; + uint16_t avail_idx; + uint16_t used_idx; + + uint32_t pending_data_head; + uint32_t pending_data_len; + } tx; + struct { + struct vring vring; + void *shmem; + uint16_t avail_idx; + uint16_t used_idx; + } rx; + uint16_t desc_max_len; + uint32_t vring_header_size; + uint32_t vring_data_max_len; +}; + +int eth_ivshmem_queue_init( + struct eth_ivshmem_queue *q, uintptr_t shmem, + size_t shmem_section_size, bool tx_buffer_first); +void eth_ivshmem_queue_reset(struct eth_ivshmem_queue *q); +int eth_ivshmem_queue_tx_get_buff(struct eth_ivshmem_queue *q, void **data, size_t len); +int eth_ivshmem_queue_tx_commit_buff(struct eth_ivshmem_queue *q); +int eth_ivshmem_queue_rx(struct eth_ivshmem_queue *q, const void **data, size_t *len); +int eth_ivshmem_queue_rx_complete(struct eth_ivshmem_queue *q); + +#endif /* ETH_IVSHMEM_PRIV_H */ diff --git a/drivers/ethernet/eth_ivshmem_queue.c b/drivers/ethernet/eth_ivshmem_queue.c new file mode 100644 index 00000000000..d5b5b29a735 --- /dev/null +++ b/drivers/ethernet/eth_ivshmem_queue.c @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2023 Enphase Energy + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "eth_ivshmem_priv.h" + +#include +#include +#include + +#include +#include + +/* These defines must match on the peer */ +#define ETH_IVSHMEM_VRING_ALIGNMENT 64 +#define ETH_IVSHMEM_FRAME_SIZE(len) ROUND_UP(18 + (len), L1_CACHE_BYTES) + +#define VRING_FLUSH(x) sys_cache_data_flush_range(&(x), sizeof(x)) +#define VRING_INVALIDATE(x) sys_cache_data_invd_range(&(x), sizeof(x)) + +static int calc_vring_size( + size_t section_size, uint16_t *vring_desc_len, + uint32_t *vring_header_size); +static uint32_t tx_buffer_advance(uint32_t max_len, uint32_t *position, uint32_t *len); +static int tx_clean_used(struct eth_ivshmem_queue *q); +static int get_rx_avail_desc_idx(struct eth_ivshmem_queue *q, uint16_t *avail_desc_idx); + +int eth_ivshmem_queue_init( + struct eth_ivshmem_queue *q, uintptr_t shmem, + size_t shmem_section_size, bool tx_buffer_first) +{ + memset(q, 0, sizeof(*q)); + + uint16_t vring_desc_len; + uint32_t vring_header_size; + int res = calc_vring_size(shmem_section_size, &vring_desc_len, &vring_header_size); + + if (res != 0) { + return res; + } + + q->desc_max_len = vring_desc_len; + q->vring_data_max_len = shmem_section_size - vring_header_size; + q->vring_header_size = vring_header_size; + + if (tx_buffer_first) { + q->tx.shmem = (void *)shmem; + q->rx.shmem = (void *)(shmem + shmem_section_size); + } else { + q->rx.shmem = (void *)shmem; + q->tx.shmem = (void *)(shmem + shmem_section_size); + } + + /* Init vrings */ + vring_init(&q->tx.vring, vring_desc_len, q->tx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); + vring_init(&q->rx.vring, vring_desc_len, q->rx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); + + /* Swap "used" pointers. + * This is done so that each peer only ever writes to its output section, + * while maintaining vring code consistency elsewhere in this file. + */ + struct vring_used *tmp_used = q->tx.vring.used; + + q->tx.vring.used = q->rx.vring.used; + q->rx.vring.used = tmp_used; + + eth_ivshmem_queue_reset(q); + + return 0; +} + +void eth_ivshmem_queue_reset(struct eth_ivshmem_queue *q) +{ + q->tx.desc_head = 0; + q->tx.desc_len = 0; + q->tx.data_head = 0; + q->tx.data_tail = 0; + q->tx.data_len = 0; + q->tx.avail_idx = 0; + q->tx.used_idx = 0; + q->tx.pending_data_head = 0; + q->tx.pending_data_len = 0; + q->rx.avail_idx = 0; + q->rx.used_idx = 0; + + memset(q->tx.shmem, 0, q->vring_header_size); + + /* Init TX ring descriptors */ + for (unsigned int i = 0; i < q->tx.vring.num - 1; i++) + q->tx.vring.desc[i].next = i + 1; + q->tx.vring.desc[q->tx.vring.num - 1].next = 0; +} + +int eth_ivshmem_queue_tx_get_buff(struct eth_ivshmem_queue *q, void **data, size_t len) +{ + /* Clean used TX buffers */ + int res = tx_clean_used(q); + + if (res != 0) { + return res; + } + + if (q->tx.desc_len >= q->desc_max_len) { + return -ENOBUFS; + } + + uint32_t head = q->tx.data_head; + uint32_t consumed_len = len; + uint32_t new_head = tx_buffer_advance(q->vring_data_max_len, &head, &consumed_len); + + if (q->vring_data_max_len - q->tx.data_len < consumed_len) { + return -ENOBUFS; + } + + struct vring_desc *tx_desc = &q->tx.vring.desc[q->tx.desc_head]; + + tx_desc->addr = q->vring_header_size + head; + tx_desc->len = len; + tx_desc->flags = 0; + VRING_FLUSH(*tx_desc); + + *data = (uint8_t *)q->tx.shmem + q->vring_header_size + head; + + q->tx.pending_data_head = new_head; + q->tx.pending_data_len = q->tx.data_len + consumed_len; + + return 0; +} + +int eth_ivshmem_queue_tx_commit_buff(struct eth_ivshmem_queue *q) +{ + /* Ensure that a TX buffer is pending */ + if (q->tx.pending_data_len == 0) { + return -EINVAL; + } + + uint16_t desc_head = q->tx.desc_head; + + q->tx.desc_len++; + q->tx.desc_head = (q->tx.desc_head + 1) % q->desc_max_len; + + q->tx.data_head = q->tx.pending_data_head; + q->tx.data_len = q->tx.pending_data_len; + + q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len] = desc_head; + + VRING_FLUSH(q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len]); + atomic_thread_fence(memory_order_seq_cst); + + q->tx.avail_idx++; + q->tx.vring.avail->idx = q->tx.avail_idx; + + VRING_FLUSH(q->tx.vring.avail->idx); + + q->tx.pending_data_len = 0; + + return 0; +} + +int eth_ivshmem_queue_rx(struct eth_ivshmem_queue *q, const void **data, size_t *len) +{ + *data = NULL; + *len = 0; + + uint16_t avail_desc_idx; + int res = get_rx_avail_desc_idx(q, &avail_desc_idx); + + if (res != 0) { + return res; + } + + struct vring_desc *desc = &q->rx.vring.desc[avail_desc_idx]; + + VRING_INVALIDATE(*desc); + + uint64_t offset = desc->addr - q->vring_header_size; + uint32_t rx_len = desc->len; + + if (offset > q->vring_data_max_len || + rx_len > q->vring_data_max_len || + offset > q->vring_data_max_len - rx_len) { + return -EINVAL; + } + + *data = (uint8_t *)q->rx.shmem + q->vring_header_size + offset; + *len = desc->len; + + return 0; +} + +int eth_ivshmem_queue_rx_complete(struct eth_ivshmem_queue *q) +{ + uint16_t avail_desc_idx; + int res = get_rx_avail_desc_idx(q, &avail_desc_idx); + + if (res != 0) { + return res; + } + + uint16_t used_idx = q->rx.used_idx % q->desc_max_len; + + q->rx.used_idx++; + q->rx.vring.used->ring[used_idx].id = avail_desc_idx; + q->rx.vring.used->ring[used_idx].len = 1; + VRING_FLUSH(q->rx.vring.used->ring[used_idx]); + atomic_thread_fence(memory_order_seq_cst); + + q->rx.vring.used->idx = q->rx.used_idx; + VRING_FLUSH(q->rx.vring.used->idx); + atomic_thread_fence(memory_order_seq_cst); + + q->rx.avail_idx++; + vring_avail_event(&q->rx.vring) = q->rx.avail_idx; + VRING_FLUSH(vring_avail_event(&q->rx.vring)); + + return 0; +} + +/** + * Calculates the vring descriptor length and header size. + * This must match what is calculated by the peer. + */ +static int calc_vring_size( + size_t section_size, uint16_t *vring_desc_len, + uint32_t *vring_header_size) +{ + static const int eth_min_mtu = 68; + uint32_t header_size; + int16_t desc_len; + + for (desc_len = 4096; desc_len > 32; desc_len >>= 1) { + header_size = vring_size(desc_len, ETH_IVSHMEM_VRING_ALIGNMENT); + header_size = ROUND_UP(header_size, ETH_IVSHMEM_VRING_ALIGNMENT); + if (header_size < section_size / 8) { + break; + } + } + + if (header_size > section_size) { + return -EINVAL; + } + + uint32_t vring_data_size = section_size - header_size; + + if (vring_data_size < 4 * eth_min_mtu) { + return -EINVAL; + } + + *vring_desc_len = desc_len; + *vring_header_size = header_size; + + return 0; +} + +static uint32_t tx_buffer_advance(uint32_t max_len, uint32_t *position, uint32_t *len) +{ + uint32_t aligned_len = ETH_IVSHMEM_FRAME_SIZE(*len); + uint32_t contiguous_len = max_len - *position; + + *len = aligned_len; + if (aligned_len > contiguous_len) { + /* Wrap back to zero */ + *position = 0; + *len += contiguous_len; + } + + return *position + aligned_len; +} + +static int tx_clean_used(struct eth_ivshmem_queue *q) +{ + while (true) { + VRING_INVALIDATE(q->tx.vring.used->idx); + if (q->tx.used_idx == q->tx.vring.used->idx) { + break; + } + + struct vring_used_elem *used = &q->tx.vring.used->ring[ + q->tx.used_idx % q->desc_max_len]; + + atomic_thread_fence(memory_order_seq_cst); + VRING_INVALIDATE(*used); + + if (used->id >= q->desc_max_len || used->len != 1) { + return -EINVAL; + } + + struct vring_desc *desc = &q->tx.vring.desc[used->id]; + + uint64_t offset = desc->addr - q->vring_header_size; + uint32_t len = desc->len; + + uint32_t tail = q->tx.data_tail; + uint32_t consumed_len = len; + uint32_t new_tail = tx_buffer_advance(q->vring_data_max_len, &tail, &consumed_len); + + if (consumed_len > q->tx.data_len || + offset != tail) { + return -EINVAL; + } + + q->tx.data_tail = new_tail; + q->tx.data_len -= consumed_len; + q->tx.desc_len--; + q->tx.used_idx++; + } + return 0; +} + +static int get_rx_avail_desc_idx(struct eth_ivshmem_queue *q, uint16_t *avail_desc_idx) +{ + atomic_thread_fence(memory_order_seq_cst); + VRING_INVALIDATE(q->rx.vring.avail->idx); + + uint16_t avail_idx = q->rx.vring.avail->idx; + + if (avail_idx == q->rx.avail_idx) { + return -EWOULDBLOCK; + } + + VRING_INVALIDATE(q->rx.vring.avail->ring[q->rx.avail_idx % q->desc_max_len]); + *avail_desc_idx = q->rx.vring.avail->ring[q->rx.avail_idx % q->desc_max_len]; + if (*avail_desc_idx >= q->desc_max_len) { + return -EINVAL; + } + + return 0; +} diff --git a/dts/bindings/ethernet/siemens,ivshmem-eth.yaml b/dts/bindings/ethernet/siemens,ivshmem-eth.yaml new file mode 100644 index 00000000000..2c16b40c403 --- /dev/null +++ b/dts/bindings/ethernet/siemens,ivshmem-eth.yaml @@ -0,0 +1,15 @@ +# Copyright (c) 2023 Enphase Energy +# SPDX-License-Identifier: Apache-2.0 + +description: IVSHMEM Ethernet + +compatible: "siemens,ivshmem-eth" + +include: ethernet.yaml + +properties: + + ivshmem-v2: + type: phandle + required: true + description: ivshmem-v2 driver node