diff --git a/drivers/ethernet/CMakeLists.txt b/drivers/ethernet/CMakeLists.txt index dd5200cb52c..62443b03aa8 100644 --- a/drivers/ethernet/CMakeLists.txt +++ b/drivers/ethernet/CMakeLists.txt @@ -12,6 +12,11 @@ zephyr_sources_ifdef(CONFIG_ETH_GECKO phy_gecko.c ) +zephyr_sources_ifdef(CONFIG_ETH_XLNX_GEM + eth_xlnx_gem.c + phy_xlnx_gem.c + ) + zephyr_sources_ifdef(CONFIG_ETH_STELLARIS eth_stellaris.c) zephyr_sources_ifdef(CONFIG_ETH_E1000 eth_e1000.c) zephyr_sources_ifdef(CONFIG_ETH_ENC28J60 eth_enc28j60.c) diff --git a/drivers/ethernet/Kconfig b/drivers/ethernet/Kconfig index 94c41c94328..05478d644f9 100644 --- a/drivers/ethernet/Kconfig +++ b/drivers/ethernet/Kconfig @@ -57,5 +57,6 @@ source "drivers/ethernet/Kconfig.liteeth" source "drivers/ethernet/Kconfig.gecko" source "drivers/ethernet/Kconfig.w5500" source "drivers/ethernet/Kconfig.dsa" +source "drivers/ethernet/Kconfig.xlnx_gem" endmenu # "Ethernet Drivers" diff --git a/drivers/ethernet/Kconfig.xlnx_gem b/drivers/ethernet/Kconfig.xlnx_gem new file mode 100644 index 00000000000..9657c5a905d --- /dev/null +++ b/drivers/ethernet/Kconfig.xlnx_gem @@ -0,0 +1,25 @@ +# +# Xilinx Processor System Gigabit Ethernet controller (GEM) driver +# configuration options +# +# Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG +# SPDX-License-Identifier: Apache-2.0 +# + +DT_COMPAT_XLNX_GEM := xlnx,gem + +menuconfig ETH_XLNX_GEM + bool "Xilinx GEM Ethernet driver" + default $(dt_compat_enabled,$(DT_COMPAT_XLNX_GEM)) + depends on SOC_XILINX_ZYNQMP_RPU + help + Enable Xilinx GEM Ethernet driver. + +config ETH_NIC_MODEL + string + default "cadence_gem" + depends on ETH_XLNX_GEM + help + Value of the -nic parameter to be used when starting QEMU. + This parameter specifies which type of Ethernet controller + shall be simulated by QEMU. diff --git a/drivers/ethernet/eth_xlnx_gem.c b/drivers/ethernet/eth_xlnx_gem.c new file mode 100644 index 00000000000..0701a958555 --- /dev/null +++ b/drivers/ethernet/eth_xlnx_gem.c @@ -0,0 +1,1607 @@ +/* + * Xilinx Processor System Gigabit Ethernet controller (GEM) driver + * + * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG + * SPDX-License-Identifier: Apache-2.0 + * + * Known current limitations / TODOs: + * - Only supports 32-bit addresses in buffer descriptors, therefore + * the ZynqMP APU (Cortex-A53 cores) may not be fully supported. + * - Hardware timestamps not considered. + * - VLAN tags not considered. + * - Wake-on-LAN interrupt not supported. + * - Send function is not SMP-capable (due to single TX done semaphore). + * - Interrupt-driven PHY management not supported - polling only. + * - No explicit placement of the DMA memory area(s) in either a + * specific memory section or at a fixed memory location yet. This + * is not an issue as long as the controller is used in conjunction + * with the Cortex-R5 QEMU target or an actual R5 running without the + * MPU enabled. + * - No detailed error handling when evaluating the Interrupt Status, + * RX Status and TX Status registers. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include "eth_xlnx_gem_priv.h" + +#define LOG_MODULE_NAME eth_xlnx_gem +#define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL +#include +LOG_MODULE_REGISTER(LOG_MODULE_NAME); + +static int eth_xlnx_gem_dev_init(const struct device *dev); +static void eth_xlnx_gem_iface_init(struct net_if *iface); +static void eth_xlnx_gem_isr(const struct device *dev); +static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt); +static int eth_xlnx_gem_start_device(const struct device *dev); +static int eth_xlnx_gem_stop_device(const struct device *dev); +static enum ethernet_hw_caps + eth_xlnx_gem_get_capabilities(const struct device *dev); +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev); +#endif + +static void eth_xlnx_gem_reset_hw(const struct device *dev); +static void eth_xlnx_gem_configure_clocks(const struct device *dev); +static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev); +static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev); +static void eth_xlnx_gem_set_mac_address(const struct device *dev); +static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev); +static void eth_xlnx_gem_init_phy(const struct device *dev); +static void eth_xlnx_gem_poll_phy(struct k_work *item); +static void eth_xlnx_gem_configure_buffers(const struct device *dev); +static void eth_xlnx_gem_rx_pending_work(struct k_work *item); +static void eth_xlnx_gem_handle_rx_pending(const struct device *dev); +static void eth_xlnx_gem_tx_done_work(struct k_work *item); +static void eth_xlnx_gem_handle_tx_done(const struct device *dev); + +static const struct ethernet_api eth_xlnx_gem_apis = { + .iface_api.init = eth_xlnx_gem_iface_init, + .get_capabilities = eth_xlnx_gem_get_capabilities, + .send = eth_xlnx_gem_send, + .start = eth_xlnx_gem_start_device, + .stop = eth_xlnx_gem_stop_device, +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + .get_stats = eth_xlnx_gem_stats, +#endif +}; + +/* + * Insert the configuration & run-time data for all GEM instances which + * are enabled in the device tree of the current target board. + */ +DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INITIALIZE) + +/** + * @brief GEM device initialization function + * Initializes the GEM itself, the DMA memory area used by the GEM and, + * if enabled, an associated PHY attached to the GEM's MDIO interface. + * + * @param dev Pointer to the device data + * @retval 0 if the device initialization completed successfully + */ +static int eth_xlnx_gem_dev_init(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + uint32_t reg_val; + + /* Precondition checks using assertions */ + + /* Valid PHY address and polling interval, if PHY is to be managed */ + if (dev_conf->init_phy) { + __ASSERT((dev_conf->phy_mdio_addr_fix >= 0 && + dev_conf->phy_mdio_addr_fix <= 32), + "%s invalid PHY address %u, must be in range " + "1 to 32, or 0 for auto-detection", + dev->name, dev_conf->phy_mdio_addr_fix); + __ASSERT(dev_conf->phy_poll_interval > 0, + "%s has an invalid zero PHY status polling " + "interval", dev->name); + } + + /* Valid max. / nominal link speed value */ + __ASSERT((dev_conf->max_link_speed == LINK_10MBIT || + dev_conf->max_link_speed == LINK_100MBIT || + dev_conf->max_link_speed == LINK_1GBIT), + "%s invalid max./nominal link speed value %u", + dev->name, (uint32_t)dev_conf->max_link_speed); + + /* MDC clock divider validity check */ + __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_48, + "%s invalid MDC clock divider value %u, must be in " + "range 0 to %u", dev->name, dev_conf->mdc_divider, + (uint32_t)MDC_DIVIDER_48); + + /* AMBA AHB configuration options */ + __ASSERT((dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_32BIT || + dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_64BIT || + dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_128BIT), + "%s AMBA AHB bus width configuration is invalid", + dev->name); + __ASSERT((dev_conf->ahb_burst_length == AHB_BURST_SINGLE || + dev_conf->ahb_burst_length == AHB_BURST_INCR4 || + dev_conf->ahb_burst_length == AHB_BURST_INCR8 || + dev_conf->ahb_burst_length == AHB_BURST_INCR16), + "%s AMBA AHB burst length configuration is invalid", + dev->name); + + /* HW RX buffer size */ + __ASSERT((dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_8KB || + dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_4KB || + dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_2KB || + dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_1KB), + "%s hardware RX buffer size configuration is invalid", + dev->name); + + /* HW RX buffer offset */ + __ASSERT(dev_conf->hw_rx_buffer_offset <= 3, + "%s hardware RX buffer offset %u is invalid, must be in " + "range 0 to 3", dev->name, dev_conf->hw_rx_buffer_offset); + + /* + * RX & TX buffer sizes + * RX Buffer size must be a multiple of 64, as the size of the + * corresponding DMA receive buffer in AHB system memory is + * expressed as n * 64 bytes in the DMA configuration register. + */ + __ASSERT(dev_conf->rx_buffer_size % 64 == 0, + "%s RX buffer size %u is not a multiple of 64 bytes", + dev->name, dev_conf->rx_buffer_size); + __ASSERT((dev_conf->rx_buffer_size != 0 && + dev_conf->rx_buffer_size <= 16320), + "%s RX buffer size %u is invalid, should be >64, " + "must be 16320 bytes maximum.", dev->name, + dev_conf->rx_buffer_size); + __ASSERT((dev_conf->tx_buffer_size != 0 && + dev_conf->tx_buffer_size <= 16380), + "%s TX buffer size %u is invalid, should be >64, " + "must be 16380 bytes maximum.", dev->name, + dev_conf->tx_buffer_size); + + /* Checksum offloading limitations of the QEMU GEM implementation */ +#ifdef CONFIG_QEMU_TARGET + __ASSERT(!dev_conf->enable_rx_chksum_offload, + "TCP/UDP/IP hardware checksum offloading is not " + "supported by the QEMU GEM implementation"); + __ASSERT(!dev_conf->enable_tx_chksum_offload, + "TCP/UDP/IP hardware checksum offloading is not " + "supported by the QEMU GEM implementation"); +#endif + + /* + * Initialization procedure as described in the Zynq-7000 TRM, + * chapter 16.3.x. + */ + eth_xlnx_gem_reset_hw(dev); /* Chapter 16.3.1 */ + eth_xlnx_gem_set_initial_nwcfg(dev); /* Chapter 16.3.2 */ + eth_xlnx_gem_set_mac_address(dev); /* Chapter 16.3.2 */ + eth_xlnx_gem_set_initial_dmacr(dev); /* Chapter 16.3.2 */ + + /* Enable MDIO -> set gem.net_ctrl[mgmt_port_en] */ + if (dev_conf->init_phy) { + reg_val = sys_read32(dev_conf->base_addr + + ETH_XLNX_GEM_NWCTRL_OFFSET); + reg_val |= ETH_XLNX_GEM_NWCTRL_MDEN_BIT; + sys_write32(reg_val, dev_conf->base_addr + + ETH_XLNX_GEM_NWCTRL_OFFSET); + } + + eth_xlnx_gem_configure_clocks(dev); /* Chapter 16.3.3 */ + if (dev_conf->init_phy) { + eth_xlnx_gem_init_phy(dev); /* Chapter 16.3.4 */ + } + eth_xlnx_gem_configure_buffers(dev); /* Chapter 16.3.5 */ + + return 0; +} + +/** + * @brief GEM associated interface initialization function + * Initializes the interface associated with a GEM device. + * + * @param iface Pointer to the associated interface data struct + */ +static void eth_xlnx_gem_iface_init(struct net_if *iface) +{ + const struct device *dev = net_if_get_device(iface); + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + + /* Set the initial contents of the current instance's run-time data */ + dev_data->iface = iface; + net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET); + ethernet_init(iface); + net_if_flag_set(iface, NET_IF_NO_AUTO_START); + + /* + * Initialize the (delayed) work items for RX pending, TX done + * and PHY status polling handlers + */ + k_work_init(&dev_data->tx_done_work, eth_xlnx_gem_tx_done_work); + k_work_init(&dev_data->rx_pend_work, eth_xlnx_gem_rx_pending_work); + k_work_init_delayable(&dev_data->phy_poll_delayed_work, + eth_xlnx_gem_poll_phy); + + /* Initialize TX completion semaphore */ + k_sem_init(&dev_data->tx_done_sem, 0, 1); + + /* + * Initialize semaphores in the RX/TX BD rings which have not + * yet been initialized + */ + k_sem_init(&dev_data->txbd_ring.ring_sem, 1, 1); + /* RX BD ring semaphore is not required at the time being */ + + /* Initialize the device's interrupt */ + dev_conf->config_func(dev); + + /* Submit initial PHY status polling delayed work */ + k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); +} + +/** + * @brief GEM interrupt service routine + * GEM interrupt service routine. Checks for indications of errors + * and either immediately handles RX pending / TX complete notifications + * or defers them to the system work queue. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_isr(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t reg_val; + + /* Read the interrupt status register */ + reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); + + /* + * TODO: handling if one or more error flag(s) are set in the + * interrupt status register. -> For now, just log them + */ + if (reg_val & ETH_XLNX_GEM_IXR_ERRORS_MASK) { + LOG_ERR("%s error bit(s) set in Interrupt Status Reg.: 0x%08X", + dev->name, reg_val); + } + + /* + * Check for the following indications by the controller: + * reg_val & 0x00000080 -> gem.intr_status bit [7] = Frame TX complete + * reg_val & 0x00000002 -> gem.intr_status bit [1] = Frame received + * comp. Zynq-7000 TRM, Chapter B.18, p. 1289/1290. + * If the respective condition's handling is configured to be deferred + * to the work queue thread, submit the corresponding job to the work + * queue, otherwise, handle the condition immediately. + */ + if ((reg_val & ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT) != 0) { + sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); + sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); + if (dev_conf->defer_txd_to_queue) { + k_work_submit(&dev_data->tx_done_work); + } else { + eth_xlnx_gem_handle_tx_done(dev); + } + } + if ((reg_val & ETH_XLNX_GEM_IXR_FRAME_RX_BIT) != 0) { + sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); + sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); + if (dev_conf->defer_rxp_to_queue) { + k_work_submit(&dev_data->rx_pend_work); + } else { + eth_xlnx_gem_handle_rx_pending(dev); + } + } + + /* + * Clear all interrupt status bits so that the interrupt is de-asserted + * by the GEM. -> TXSR/RXSR are read/cleared by either eth_xlnx_gem_- + * handle_tx_done or eth_xlnx_gem_handle_rx_pending if those actions + * are not deferred to the system's work queue for the current inter- + * face. If the latter is the case, those registers will be read/ + * cleared whenever the corresponding work item submitted from within + * this ISR is being processed. + */ + sys_write32((0xFFFFFFFF & ~(ETH_XLNX_GEM_IXR_FRAME_RX_BIT | + ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT)), + dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); +} + +/** + * @brief GEM data send function + * GEM data send function. Blocks until a TX complete notification has been + * received & processed. + * + * @param dev Pointer to the device data + * @param pkt Pointer to the data packet to be sent + * @retval -EINVAL in case of invalid parameters, e.g. zero data length + * @retval -EIO in case of: + * (1) the attempt to TX data while the device is stopped, + * the interface is down or the link is down, + * (2) the attempt to TX data while no free buffers are available + * in the DMA memory area, + * (3) the transmission completion notification timing out + * @retval 0 if the packet was transmitted successfully + */ +static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + + uint16_t tx_data_length; + uint16_t tx_data_remaining; + void *tx_buffer_offs; + + uint8_t bds_reqd; + uint8_t curr_bd_idx; + uint8_t first_bd_idx; + + uint32_t reg_ctrl; + uint32_t reg_val; + int sem_status; + + if (!dev_data->started || dev_data->eff_link_speed == LINK_DOWN || + (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) { +#ifdef CONFIG_NET_STATISTICS_ETHERNET + dev_data->stats.tx_dropped++; +#endif + return -EIO; + } + + tx_data_length = tx_data_remaining = net_pkt_get_len(pkt); + if (tx_data_length == 0) { + LOG_ERR("%s cannot TX, zero packet length", dev->name); +#ifdef CONFIG_NET_STATISTICS_ETHERNET + dev_data->stats.errors.tx++; +#endif + return -EINVAL; + } + + /* + * Check if enough buffer descriptors are available for the amount + * of data to be transmitted, update the free BD count if this is + * the case. Update the 'next to use' BD index in the TX BD ring if + * sufficient space is available. If TX done handling, where the BD + * ring's data is accessed as well, is performed via the system work + * queue, protect against interruptions during the update of the BD + * ring's data by taking the ring's semaphore. If TX done handling + * is performed within the ISR, protect against interruptions by + * disabling the TX done interrupt source. + */ + bds_reqd = (uint8_t)((tx_data_length + (dev_conf->tx_buffer_size - 1)) / + dev_conf->tx_buffer_size); + + if (dev_conf->defer_txd_to_queue) { + k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); + } else { + sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); + } + + if (bds_reqd > dev_data->txbd_ring.free_bds) { + LOG_ERR("%s cannot TX, packet length %hu requires " + "%hhu BDs, current free count = %hhu", + dev->name, tx_data_length, bds_reqd, + dev_data->txbd_ring.free_bds); + + if (dev_conf->defer_txd_to_queue) { + k_sem_give(&(dev_data->txbd_ring.ring_sem)); + } else { + sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); + } +#ifdef CONFIG_NET_STATISTICS_ETHERNET + dev_data->stats.tx_dropped++; +#endif + return -EIO; + } + + curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_use; + reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); + + dev_data->txbd_ring.next_to_use = (first_bd_idx + bds_reqd) % + dev_conf->txbd_count; + dev_data->txbd_ring.free_bds -= bds_reqd; + + if (dev_conf->defer_txd_to_queue) { + k_sem_give(&(dev_data->txbd_ring.ring_sem)); + } else { + sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); + } + + /* + * Scatter the contents of the network packet's buffer to + * one or more DMA buffers. + */ + net_pkt_cursor_init(pkt); + do { + /* Calculate the base pointer of the target TX buffer */ + tx_buffer_offs = (void *)(dev_data->first_tx_buffer + + (dev_conf->tx_buffer_size * curr_bd_idx)); + + /* Copy packet data to DMA buffer */ + net_pkt_read(pkt, (void *)tx_buffer_offs, + (tx_data_remaining < dev_conf->tx_buffer_size) ? + tx_data_remaining : dev_conf->tx_buffer_size); + + /* Update current BD's control word */ + reg_val = sys_read32(reg_ctrl) & (ETH_XLNX_GEM_TXBD_WRAP_BIT | + ETH_XLNX_GEM_TXBD_USED_BIT); + reg_val |= (tx_data_remaining < dev_conf->tx_buffer_size) ? + tx_data_remaining : dev_conf->tx_buffer_size; + sys_write32(reg_val, reg_ctrl); + + if (tx_data_remaining > dev_conf->tx_buffer_size) { + /* Switch to next BD */ + curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count; + reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); + } + + tx_data_remaining -= (tx_data_remaining < dev_conf->tx_buffer_size) ? + tx_data_remaining : dev_conf->tx_buffer_size; + } while (tx_data_remaining > 0); + + /* Set the 'last' bit in the current BD's control word */ + reg_val |= ETH_XLNX_GEM_TXBD_LAST_BIT; + + /* + * Clear the 'used' bits of all BDs involved in the current + * transmission. In accordance with chapter 16.3.8 of the + * Zynq-7000 TRM, the 'used' bits shall be cleared in reverse + * order, so that the 'used' bit of the first BD is cleared + * last just before the transmission is started. + */ + reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT; + sys_write32(reg_val, reg_ctrl); + + while (curr_bd_idx != first_bd_idx) { + curr_bd_idx = (curr_bd_idx != 0) ? (curr_bd_idx - 1) : + (dev_conf->txbd_count - 1); + reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); + reg_val = sys_read32(reg_ctrl); + reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT; + sys_write32(reg_val, reg_ctrl); + } + + /* Set the start TX bit in the gem.net_ctrl register */ + reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + reg_val |= ETH_XLNX_GEM_NWCTRL_STARTTX_BIT; + sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + +#ifdef CONFIG_NET_STATISTICS_ETHERNET + dev_data->stats.bytes.sent += tx_data_length; + dev_data->stats.pkts.tx++; +#endif + + /* Block until TX has completed */ + sem_status = k_sem_take(&dev_data->tx_done_sem, K_MSEC(100)); + if (sem_status < 0) { + LOG_ERR("%s TX confirmation timed out", dev->name); +#ifdef CONFIG_NET_STATISTICS_ETHERNET + dev_data->stats.tx_timeout_count++; +#endif + return -EIO; + } + + return 0; +} + +/** + * @brief GEM device start function + * GEM device start function. Clears all status registers and any + * pending interrupts, enables RX and TX, enables interrupts. If + * no PHY is managed by the current driver instance, this function + * also declares the physical link up at the configured nominal + * link speed. + * + * @param dev Pointer to the device data + * @retval 0 upon successful completion + */ +static int eth_xlnx_gem_start_device(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t reg_val; + + if (dev_data->started) { + return 0; + } + dev_data->started = true; + + /* Disable & clear all the MAC interrupts */ + sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); + sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); + + /* Clear RX & TX status registers */ + sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); + sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); + + /* RX and TX enable */ + reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + reg_val |= (ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT); + sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + + /* Enable all the MAC interrupts */ + sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); + + /* Submit the delayed work for polling the link state */ + if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) == 0) { + k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); + } + + LOG_DBG("%s started", dev->name); + return 0; +} + +/** + * @brief GEM device stop function + * GEM device stop function. Disables all interrupts, disables + * RX and TX, clears all status registers. If no PHY is managed + * by the current driver instance, this function also declares + * the physical link down. + * + * @param dev Pointer to the device data + * @retval 0 upon successful completion + */ +static int eth_xlnx_gem_stop_device(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t reg_val; + + if (!dev_data->started) { + return 0; + } + dev_data->started = false; + + /* Cancel the delayed work that polls the link state */ + if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) != 0) { + k_work_cancel_delayable(&dev_data->phy_poll_delayed_work); + } + + /* RX and TX disable */ + reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + reg_val &= (~(ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT)); + sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + + /* Disable & clear all the MAC interrupts */ + sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); + sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); + + /* Clear RX & TX status registers */ + sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); + sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); + + LOG_DBG("%s stopped", dev->name); + return 0; +} + +/** + * @brief GEM capability request function + * Returns the capabilities of the GEM controller as an enumeration. + * All of the data returned is derived from the device configuration + * of the current GEM device instance. + * + * @param dev Pointer to the device data + * @return Enumeration containing the current GEM device's capabilities + */ +static enum ethernet_hw_caps eth_xlnx_gem_get_capabilities( + const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + enum ethernet_hw_caps caps = (enum ethernet_hw_caps)0; + + if (dev_conf->max_link_speed == LINK_1GBIT) { + if (dev_conf->phy_advertise_lower) { + caps |= (ETHERNET_LINK_1000BASE_T | + ETHERNET_LINK_100BASE_T | + ETHERNET_LINK_10BASE_T); + } else { + caps |= ETHERNET_LINK_1000BASE_T; + } + } else if (dev_conf->max_link_speed == LINK_100MBIT) { + if (dev_conf->phy_advertise_lower) { + caps |= (ETHERNET_LINK_100BASE_T | + ETHERNET_LINK_10BASE_T); + } else { + caps |= ETHERNET_LINK_100BASE_T; + } + } else { + caps |= ETHERNET_LINK_10BASE_T; + } + + if (dev_conf->enable_rx_chksum_offload) { + caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD; + } + + if (dev_conf->enable_tx_chksum_offload) { + caps |= ETHERNET_HW_TX_CHKSUM_OFFLOAD; + } + + if (dev_conf->enable_fdx) { + caps |= ETHERNET_DUPLEX_SET; + } + + if (dev_conf->copy_all_frames) { + caps |= ETHERNET_PROMISC_MODE; + } + + return caps; +} + +#ifdef CONFIG_NET_STATISTICS_ETHERNET +/** + * @brief GEM statistics data request function + * Returns a pointer to the statistics data of the current GEM controller. + * + * @param dev Pointer to the device data + * @return Pointer to the current GEM device's statistics data + */ +static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev) +{ + return &(DEV_DATA(dev)->stats); +} +#endif + +/** + * @brief GEM Hardware reset function + * Resets the current GEM device. Called from within the device + * initialization function. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_reset_hw(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + + /* + * Controller reset sequence as described in the Zynq-7000 TRM, + * chapter 16.3.1. + */ + + /* Clear the NWCTRL register */ + sys_write32(0x00000000, + dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + + /* Clear the statistics counters */ + sys_write32(ETH_XLNX_GEM_STATCLR_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); + + /* Clear the RX/TX status registers */ + sys_write32(ETH_XLNX_GEM_TXSRCLR_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); + sys_write32(ETH_XLNX_GEM_RXSRCLR_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); + + /* Disable all interrupts */ + sys_write32(ETH_XLNX_GEM_IDRCLR_MASK, + dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); + + /* Clear the buffer queues */ + sys_write32(0x00000000, + dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET); + sys_write32(0x00000000, + dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET); +} + +/** + * @brief GEM clock configuration function + * Calculates the pre-scalers for the TX clock to match the current + * (if an associated PHY is managed) or nominal link speed. Called + * from within the device initialization function. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_configure_clocks(const struct device *dev) +{ + /* + * Clock source configuration for the respective GEM as described + * in the Zynq-7000 TRM, chapter 16.3.3, is not tackled here. This + * is performed by the PS7Init code. Only the DIVISOR and DIVISOR1 + * values for the respective GEM's TX clock are calculated here. + */ + + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + + uint32_t div0; + uint32_t div1; + uint32_t target = 2500000; /* default prevents 'may be uninitialized' warning */ + uint32_t tmp; + uint32_t clk_ctrl_reg; + + if ((!dev_conf->init_phy) || dev_data->eff_link_speed == LINK_DOWN) { + /* + * Run-time data indicates 'link down' or PHY management + * is disabled for the current device -> this indicates the + * initial device initialization. Once the PHY status polling + * delayed work handler has picked up the result of the auto- + * negotiation (if enabled), this if-statement will evaluate + * to false. + */ + if (dev_conf->max_link_speed == LINK_10MBIT) { + target = 2500000; /* Target frequency: 2.5 MHz */ + } else if (dev_conf->max_link_speed == LINK_100MBIT) { + target = 25000000; /* Target frequency: 25 MHz */ + } else if (dev_conf->max_link_speed == LINK_1GBIT) { + target = 125000000; /* Target frequency: 125 MHz */ + } + } else if (dev_data->eff_link_speed != LINK_DOWN) { + /* + * Use the effective link speed instead of the maximum/nominal + * link speed for clock configuration. + */ + if (dev_data->eff_link_speed == LINK_10MBIT) { + target = 2500000; /* Target frequency: 2.5 MHz */ + } else if (dev_data->eff_link_speed == LINK_100MBIT) { + target = 25000000; /* Target frequency: 25 MHz */ + } else if (dev_data->eff_link_speed == LINK_1GBIT) { + target = 125000000; /* Target frequency: 125 MHz */ + } + } + + /* + * Caclculate the divisors for the target frequency. + * The frequency of the PLL to which the divisors shall be applied are + * provided in the respective GEM's device tree data. + */ + for (div0 = 1; div0 < 64; div0++) { + for (div1 = 1; div1 < 64; div1++) { + tmp = ((dev_conf->pll_clock_frequency / div0) / div1); + if (tmp >= (target - 10) && tmp <= (target + 10)) { + break; + } + } + if (tmp >= (target - 10) && tmp <= (target + 10)) { + break; + } + } + + /* + * ZynqMP register crl_apb.GEMx_REF_CTRL: + * RX_CLKACT bit [26] + * CLKACT bit [25] + * div0 bits [13..8], div1 bits [21..16] + * Unlock CRL_APB write access if the write protect bit + * is currently set, restore it afterwards. + */ + clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address); + clk_ctrl_reg &= ~((ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK << + ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) | + (ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK << + ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT)); + clk_ctrl_reg |= ((div0 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) << + ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) | + ((div1 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) << + ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT); + clk_ctrl_reg |= ETH_XLNX_CRL_APB_GEMX_REF_CTRL_RX_CLKACT_BIT | + ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT; + + /* + * Unlock CRL_APB write access if the write protect bit + * is currently set, restore it afterwards. + */ + tmp = sys_read32(ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS); + if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) { + sys_write32((tmp & ~ETH_XLNX_CRL_APB_WPROT_BIT), + ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS); + } + sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address); + if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) { + sys_write32(tmp, ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS); + } + + LOG_DBG("%s set clock dividers div0/1 %u/%u for target " + "frequency %u Hz", dev->name, div0, div1, target); +} + +/** + * @brief GEM initial Network Configuration Register setup function + * Writes the contents of the current GEM device's Network Configuration + * Register (NWCFG / gem.net_cfg). Called from within the device + * initialization function. Implementation differs depending on whether + * the current target is a Zynq-7000 or a ZynqMP. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + uint32_t reg_val = 0; + + if (dev_conf->ignore_ipg_rxer) { + /* [30] ignore IPG rx_er */ + reg_val |= ETH_XLNX_GEM_NWCFG_IGNIPGRXERR_BIT; + } + if (dev_conf->disable_reject_nsp) { + /* [29] disable rejection of non-standard preamble */ + reg_val |= ETH_XLNX_GEM_NWCFG_BADPREAMBEN_BIT; + } + if (dev_conf->enable_ipg_stretch) { + /* [28] enable IPG stretch */ + reg_val |= ETH_XLNX_GEM_NWCFG_IPG_STRETCH_BIT; + } + if (dev_conf->enable_sgmii_mode) { + /* [27] SGMII mode enable */ + reg_val |= ETH_XLNX_GEM_NWCFG_SGMIIEN_BIT; + } + if (dev_conf->disable_reject_fcs_crc_errors) { + /* [26] disable rejection of FCS/CRC errors */ + reg_val |= ETH_XLNX_GEM_NWCFG_FCSIGNORE_BIT; + } + if (dev_conf->enable_rx_halfdup_while_tx) { + /* [25] RX half duplex while TX enable */ + reg_val |= ETH_XLNX_GEM_NWCFG_HDRXEN_BIT; + } + if (dev_conf->enable_rx_chksum_offload) { + /* [24] enable RX IP/TCP/UDP checksum offload */ + reg_val |= ETH_XLNX_GEM_NWCFG_RXCHKSUMEN_BIT; + } + if (dev_conf->disable_pause_copy) { + /* [23] Do not copy pause Frames to memory */ + reg_val |= ETH_XLNX_GEM_NWCFG_PAUSECOPYDI_BIT; + } + /* [22..21] Data bus width */ + reg_val |= (((uint32_t)(dev_conf->amba_dbus_width) & + ETH_XLNX_GEM_NWCFG_DBUSW_MASK) << + ETH_XLNX_GEM_NWCFG_DBUSW_SHIFT); + /* [20..18] MDC clock divider */ + reg_val |= (((uint32_t)dev_conf->mdc_divider & + ETH_XLNX_GEM_NWCFG_MDC_MASK) << + ETH_XLNX_GEM_NWCFG_MDC_SHIFT); + if (dev_conf->discard_rx_fcs) { + /* [17] Discard FCS from received frames */ + reg_val |= ETH_XLNX_GEM_NWCFG_FCSREM_BIT; + } + if (dev_conf->discard_rx_length_errors) { + /* [16] RX length error discard */ + reg_val |= ETH_XLNX_GEM_NWCFG_LENGTHERRDSCRD_BIT; + } + /* [15..14] RX buffer offset */ + reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_offset & + ETH_XLNX_GEM_NWCFG_RXOFFS_MASK) << + ETH_XLNX_GEM_NWCFG_RXOFFS_SHIFT); + if (dev_conf->enable_pause) { + /* [13] Enable pause TX */ + reg_val |= ETH_XLNX_GEM_NWCFG_PAUSEEN_BIT; + } + if (dev_conf->enable_tbi) { + /* [11] enable TBI instead of GMII/MII */ + reg_val |= ETH_XLNX_GEM_NWCFG_TBIINSTEAD_BIT; + } + if (dev_conf->ext_addr_match) { + /* [09] External address match enable */ + reg_val |= ETH_XLNX_GEM_NWCFG_EXTADDRMATCHEN_BIT; + } + if (dev_conf->enable_1536_frames) { + /* [08] Enable 1536 byte frames reception */ + reg_val |= ETH_XLNX_GEM_NWCFG_1536RXEN_BIT; + } + if (dev_conf->enable_ucast_hash) { + /* [07] Receive unicast hash frames */ + reg_val |= ETH_XLNX_GEM_NWCFG_UCASTHASHEN_BIT; + } + if (dev_conf->enable_mcast_hash) { + /* [06] Receive multicast hash frames */ + reg_val |= ETH_XLNX_GEM_NWCFG_MCASTHASHEN_BIT; + } + if (dev_conf->disable_bcast) { + /* [05] Do not receive broadcast frames */ + reg_val |= ETH_XLNX_GEM_NWCFG_BCASTDIS_BIT; + } + if (dev_conf->copy_all_frames) { + /* [04] Copy all frames */ + reg_val |= ETH_XLNX_GEM_NWCFG_COPYALLEN_BIT; + } + if (dev_conf->discard_non_vlan) { + /* [02] Receive only VLAN frames */ + reg_val |= ETH_XLNX_GEM_NWCFG_NVLANDISC_BIT; + } + if (dev_conf->enable_fdx) { + /* [01] enable Full duplex */ + reg_val |= ETH_XLNX_GEM_NWCFG_FDEN_BIT; + } + if (dev_conf->max_link_speed == LINK_100MBIT) { + /* [00] 10 or 100 Mbps */ + reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT; + } else if (dev_conf->max_link_speed == LINK_1GBIT) { + /* [10] Gigabit mode enable */ + reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT; + } + /* + * No else-branch for 10Mbit/s mode: + * in 10 Mbit/s mode, both bits [00] and [10] remain 0 + */ + + /* Write the assembled register contents to gem.net_cfg */ + sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); +} + +/** + * @brief GEM Network Configuration Register link speed update function + * Updates only the link speed-related bits of the Network Configuration + * register. This is called from within #eth_xlnx_gem_poll_phy. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t reg_val; + + /* + * Read the current gem.net_cfg register contents and mask out + * the link speed-related bits + */ + reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); + reg_val &= ~(ETH_XLNX_GEM_NWCFG_1000_BIT | ETH_XLNX_GEM_NWCFG_100_BIT); + + /* No bits to set for 10 Mbps. 100 Mbps and 1 Gbps set one bit each. */ + if (dev_data->eff_link_speed == LINK_100MBIT) { + reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT; + } else if (dev_data->eff_link_speed == LINK_1GBIT) { + reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT; + } + + /* Write the assembled register contents to gem.net_cfg */ + sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); +} + +/** + * @brief GEM MAC address setup function + * Acquires the MAC address to be assigned to the current GEM device + * from the device configuration data which in turn acquires it from + * the device tree data, then writes it to the gem.spec_addr1_bot/LADDR1L + * and gem.spec_addr1_top/LADDR1H registers. Called from within the device + * initialization function. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_set_mac_address(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t regval_top; + uint32_t regval_bot; + + regval_bot = (dev_data->mac_addr[0] & 0xFF); + regval_bot |= (dev_data->mac_addr[1] & 0xFF) << 8; + regval_bot |= (dev_data->mac_addr[2] & 0xFF) << 16; + regval_bot |= (dev_data->mac_addr[3] & 0xFF) << 24; + + regval_top = (dev_data->mac_addr[4] & 0xFF); + regval_top |= (dev_data->mac_addr[5] & 0xFF) << 8; + + sys_write32(regval_bot, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1L_OFFSET); + sys_write32(regval_top, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1H_OFFSET); + + LOG_DBG("%s MAC %02X:%02X:%02X:%02X:%02X:%02X", + dev->name, + dev_data->mac_addr[0], + dev_data->mac_addr[1], + dev_data->mac_addr[2], + dev_data->mac_addr[3], + dev_data->mac_addr[4], + dev_data->mac_addr[5]); +} + +/** + * @brief GEM initial DMA Control Register setup function + * Writes the contents of the current GEM device's DMA Control Register + * (DMACR / gem.dma_cfg). Called from within the device initialization + * function. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + uint32_t reg_val = 0; + + /* + * gem.dma_cfg register bit (field) definitions: + * comp. Zynq-7000 TRM, p. 1278 ff. + */ + + if (dev_conf->disc_rx_ahb_unavail) { + /* [24] Discard RX packet when AHB unavailable */ + reg_val |= ETH_XLNX_GEM_DMACR_DISCNOAHB_BIT; + } + /* + * [23..16] DMA RX buffer size in AHB system memory + * e.g.: 0x02 = 128, 0x18 = 1536, 0xA0 = 10240 + */ + reg_val |= (((dev_conf->rx_buffer_size / 64) & + ETH_XLNX_GEM_DMACR_RX_BUF_MASK) << + ETH_XLNX_GEM_DMACR_RX_BUF_SHIFT); + if (dev_conf->enable_tx_chksum_offload) { + /* [11] TX TCP/UDP/IP checksum offload to GEM */ + reg_val |= ETH_XLNX_GEM_DMACR_TCP_CHKSUM_BIT; + } + if (dev_conf->tx_buffer_size_full) { + /* [10] TX buffer memory size select */ + reg_val |= ETH_XLNX_GEM_DMACR_TX_SIZE_BIT; + } + /* + * [09..08] RX packet buffer memory size select + * 0 = 1kB, 1 = 2kB, 2 = 4kB, 3 = 8kB + */ + reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_size << + ETH_XLNX_GEM_DMACR_RX_SIZE_SHIFT) & + ETH_XLNX_GEM_DMACR_RX_SIZE_MASK); + if (dev_conf->enable_ahb_packet_endian_swap) { + /* [07] AHB packet data endian swap enable */ + reg_val |= ETH_XLNX_GEM_DMACR_ENDIAN_BIT; + } + if (dev_conf->enable_ahb_md_endian_swap) { + /* [06] AHB mgmt descriptor endian swap enable */ + reg_val |= ETH_XLNX_GEM_DMACR_DESCR_ENDIAN_BIT; + } + /* + * [04..00] AHB fixed burst length for DMA ops. + * 00001 = single AHB bursts, + * 001xx = attempt to use INCR4 bursts, + * 01xxx = attempt to use INCR8 bursts, + * 1xxxx = attempt to use INCR16 bursts + */ + reg_val |= ((uint32_t)dev_conf->ahb_burst_length & + ETH_XLNX_GEM_DMACR_AHB_BURST_LENGTH_MASK); + + /* Write the assembled register contents */ + sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_DMACR_OFFSET); +} + +/** + * @brief GEM associated PHY detection and setup function + * If the current GEM device shall manage an associated PHY, its detection + * and configuration is performed from within this function. Called from + * within the device initialization function. This function refers to + * functionality implemented in the phy_xlnx_gem module. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_init_phy(const struct device *dev) +{ + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + int detect_rc; + + LOG_DBG("%s attempting to initialize associated PHY", dev->name); + + /* + * The phy_xlnx_gem_detect function checks if a valid PHY + * ID is returned when reading the corresponding high / low + * ID registers for all valid MDIO addresses. If a compatible + * PHY is detected, the function writes a pointer to the + * vendor-specific implementations of the PHY management + * functions to the run-time device data struct, along with + * the ID and the MDIO address of the detected PHY (dev_data-> + * phy_id, dev_data->phy_addr, dev_data->phy_access_api). + */ + detect_rc = phy_xlnx_gem_detect(dev); + + if (detect_rc == 0 && dev_data->phy_id != 0x00000000 && + dev_data->phy_id != 0xFFFFFFFF && + dev_data->phy_access_api != NULL) { + /* A compatible PHY was detected -> reset & configure it */ + dev_data->phy_access_api->phy_reset_func(dev); + dev_data->phy_access_api->phy_configure_func(dev); + } else { + LOG_WRN("%s no compatible PHY detected", dev->name); + } +} + +/** + * @brief GEM associated PHY status polling function + * This handler of a delayed work item is called from the context of + * the system work queue. It is always scheduled at least once during the + * interface initialization. If the current driver instance manages a + * PHY, the delayed work item will be re-scheduled in order to continuously + * monitor the link state and speed while the device is active. Link state + * and link speed changes are polled, which may result in the link state + * change being propagated (carrier on/off) and / or the TX clock being + * reconfigured to match the current link speed. If PHY management is dis- + * abled for the current driver instance or no compatible PHY was detected, + * the work item will not be re-scheduled and default link speed and link + * state values are applied. This function refers to functionality imple- + * mented in the phy_xlnx_gem module. + * + * @param work Pointer to the delayed work item which facilitates + * access to the current device's configuration data + */ +static void eth_xlnx_gem_poll_phy(struct k_work *work) +{ + struct k_work_delayable *dwork = k_work_delayable_from_work(work); + struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(dwork, + struct eth_xlnx_gem_dev_data, phy_poll_delayed_work); + const struct device *dev = net_if_get_device(dev_data->iface); + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + + uint16_t phy_status; + uint8_t link_status; + + if (dev_data->phy_access_api != NULL) { + /* A supported PHY is managed by the driver */ + phy_status = dev_data->phy_access_api->phy_poll_status_change_func(dev); + + if ((phy_status & ( + PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED | + PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED | + PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE)) != 0) { + + /* + * Get the PHY's link status. Handling a 'link down' + * event the simplest possible case. + */ + link_status = dev_data->phy_access_api->phy_poll_link_status_func(dev); + + if (link_status == 0) { + /* + * Link is down -> propagate to the Ethernet + * layer that the link has gone down. + */ + dev_data->eff_link_speed = LINK_DOWN; + net_eth_carrier_off(dev_data->iface); + + LOG_WRN("%s link down", dev->name); + } else { + /* + * A link has been detected, which, depending + * on the driver's configuration, might have + * a different speed than the previous link. + * Therefore, the clock dividers must be ad- + * justed accordingly. + */ + dev_data->eff_link_speed = + dev_data->phy_access_api->phy_poll_link_speed_func(dev); + + eth_xlnx_gem_configure_clocks(dev); + eth_xlnx_gem_set_nwcfg_link_speed(dev); + net_eth_carrier_on(dev_data->iface); + + LOG_INF("%s link up, %s", dev->name, + (dev_data->eff_link_speed == LINK_1GBIT) + ? "1 GBit/s" + : (dev_data->eff_link_speed == LINK_100MBIT) + ? "100 MBit/s" + : (dev_data->eff_link_speed == LINK_10MBIT) + ? "10 MBit/s" : "undefined / link down"); + } + } + + /* + * Re-submit the delayed work using the interval from the device + * configuration data. + */ + k_work_reschedule(&dev_data->phy_poll_delayed_work, + K_MSEC(dev_conf->phy_poll_interval)); + } else { + /* + * The current driver instance doesn't manage a PHY or no + * supported PHY was detected -> pretend the configured max. + * link speed is the effective link speed and that the link + * is up. The delayed work item won't be re-scheduled, as + * there isn't anything to poll for. + */ + dev_data->eff_link_speed = dev_conf->max_link_speed; + + eth_xlnx_gem_configure_clocks(dev); + eth_xlnx_gem_set_nwcfg_link_speed(dev); + net_eth_carrier_on(dev_data->iface); + + LOG_WRN("%s PHY not managed by the driver or no compatible " + "PHY detected, assuming link up at %s", dev->name, + (dev_conf->max_link_speed == LINK_1GBIT) + ? "1 GBit/s" + : (dev_conf->max_link_speed == LINK_100MBIT) + ? "100 MBit/s" + : (dev_conf->max_link_speed == LINK_10MBIT) + ? "10 MBit/s" : "undefined"); + } +} + +/** + * @brief GEM DMA memory area setup function + * Sets up the DMA memory area to be used by the current GEM device. + * Called from within the device initialization function or from within + * the context of the PHY status polling delayed work handler. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_configure_buffers(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + struct eth_xlnx_gem_bd *bdptr; + uint32_t buf_iter; + + /* Initial configuration of the RX/TX BD rings */ + DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INIT_BD_RING) + + /* + * Set initial RX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5, + * "Receive Buffer Descriptor List". The BD ring data other than + * the base RX/TX buffer pointers will be set in eth_xlnx_gem_- + * iface_init() + */ + bdptr = dev_data->rxbd_ring.first_bd; + + for (buf_iter = 0; buf_iter < (dev_conf->rxbd_count - 1); buf_iter++) { + /* Clear 'used' bit -> BD is owned by the controller */ + bdptr->ctrl = 0; + bdptr->addr = (uint32_t)dev_data->first_rx_buffer + + (buf_iter * (uint32_t)dev_conf->rx_buffer_size); + ++bdptr; + } + + /* + * For the last BD, bit [1] must be OR'ed in the buffer memory + * address -> this is the 'wrap' bit indicating that this is the + * last BD in the ring. This location is used as bits [1..0] can't + * be part of the buffer address due to alignment requirements + * anyways. Watch out: TX BDs handle this differently, their wrap + * bit is located in the BD's control word! + */ + bdptr->ctrl = 0; /* BD is owned by the controller */ + bdptr->addr = ((uint32_t)dev_data->first_rx_buffer + + (buf_iter * (uint32_t)dev_conf->rx_buffer_size)) | + ETH_XLNX_GEM_RXBD_WRAP_BIT; + + /* + * Set initial TX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5, + * "Transmit Buffer Descriptor List". TX BD ring data has already + * been set up in eth_xlnx_gem_iface_init() + */ + bdptr = dev_data->txbd_ring.first_bd; + + for (buf_iter = 0; buf_iter < (dev_conf->txbd_count - 1); buf_iter++) { + /* Set up the control word -> 'used' flag must be set. */ + bdptr->ctrl = ETH_XLNX_GEM_TXBD_USED_BIT; + bdptr->addr = (uint32_t)dev_data->first_tx_buffer + + (buf_iter * (uint32_t)dev_conf->tx_buffer_size); + ++bdptr; + } + + /* + * For the last BD, set the 'wrap' bit indicating to the controller + * that this BD is the last one in the ring. -> For TX BDs, the 'wrap' + * bit isn't located in the address word, but in the control word + * instead + */ + bdptr->ctrl = (ETH_XLNX_GEM_TXBD_WRAP_BIT | ETH_XLNX_GEM_TXBD_USED_BIT); + bdptr->addr = (uint32_t)dev_data->first_tx_buffer + + (buf_iter * (uint32_t)dev_conf->tx_buffer_size); + + /* Set free count/current index in the RX/TX BD ring data */ + dev_data->rxbd_ring.next_to_process = 0; + dev_data->rxbd_ring.next_to_use = 0; + dev_data->rxbd_ring.free_bds = dev_conf->rxbd_count; + dev_data->txbd_ring.next_to_process = 0; + dev_data->txbd_ring.next_to_use = 0; + dev_data->txbd_ring.free_bds = dev_conf->txbd_count; + + /* Write pointers to the first RX/TX BD to the controller */ + sys_write32((uint32_t)dev_data->rxbd_ring.first_bd, + dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET); + sys_write32((uint32_t)dev_data->txbd_ring.first_bd, + dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET); +} + +/** + * @brief GEM RX data pending handler wrapper for the work queue + * Wraps the RX data pending handler, eth_xlnx_gem_handle_rx_pending, + * for the scenario in which the current GEM device is configured + * to defer RX pending / TX done indication handling to the system + * work queue. In this case, the work item received by this wrapper + * function will be enqueued from within the ISR if the corresponding + * bit is set within the controller's interrupt status register + * (gem.intr_status). + * + * @param item Pointer to the work item enqueued by the ISR which + * facilitates access to the current device's data + */ +static void eth_xlnx_gem_rx_pending_work(struct k_work *item) +{ + struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item, + struct eth_xlnx_gem_dev_data, rx_pend_work); + const struct device *dev = net_if_get_device(dev_data->iface); + + eth_xlnx_gem_handle_rx_pending(dev); +} + +/** + * @brief GEM RX data pending handler + * This handler is called either from within the ISR or from the + * context of the system work queue whenever the RX data pending bit + * is set in the controller's interrupt status register (gem.intr_status). + * No further RX data pending interrupts will be triggered until this + * handler has been executed, which eventually clears the corresponding + * interrupt status bit. This function acquires the incoming packet + * data from the DMA memory area via the RX buffer descriptors and copies + * the data to a packet which will then be handed over to the network + * stack. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_handle_rx_pending(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t reg_addr; + uint32_t reg_ctrl; + uint32_t reg_val; + uint32_t reg_val_rxsr; + uint8_t first_bd_idx; + uint8_t last_bd_idx; + uint8_t curr_bd_idx; + uint32_t rx_data_length; + uint32_t rx_data_remaining; + struct net_pkt *pkt; + + /* Read the RX status register */ + reg_val_rxsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); + + /* + * TODO Evaluate error flags from RX status register word + * here for proper error handling. + */ + + while (1) { + curr_bd_idx = dev_data->rxbd_ring.next_to_process; + first_bd_idx = last_bd_idx = curr_bd_idx; + reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].addr); + reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].ctrl); + + /* + * Basic precondition checks for the current BD's + * address and control words + */ + reg_val = sys_read32(reg_addr); + if ((reg_val & ETH_XLNX_GEM_RXBD_USED_BIT) == 0) { + /* + * No new data contained in the current BD + * -> break out of the RX loop + */ + break; + } + reg_val = sys_read32(reg_ctrl); + if ((reg_val & ETH_XLNX_GEM_RXBD_START_OF_FRAME_BIT) == 0) { + /* + * Although the current BD is marked as 'used', it + * doesn't contain the SOF bit. + */ + LOG_ERR("%s unexpected missing SOF bit in RX BD [%u]", + dev->name, first_bd_idx); + break; + } + + /* + * As long as the current BD doesn't have the EOF bit set, + * iterate forwards until the EOF bit is encountered. Only + * the BD containing the EOF bit also contains the length + * of the received packet which spans multiple buffers. + */ + do { + reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[last_bd_idx].ctrl); + reg_val = sys_read32(reg_ctrl); + rx_data_length = rx_data_remaining = + (reg_val & ETH_XLNX_GEM_RXBD_FRAME_LENGTH_MASK); + if ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0) { + last_bd_idx = (last_bd_idx + 1) % dev_conf->rxbd_count; + } + } while ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0); + + /* + * Store the position of the first BD behind the end of the + * frame currently being processed as 'next to process' + */ + dev_data->rxbd_ring.next_to_process = (last_bd_idx + 1) % + dev_conf->rxbd_count; + + /* + * Allocate a destination packet from the network stack + * now that the total frame length is known. + */ + pkt = net_pkt_rx_alloc_with_buffer(dev_data->iface, rx_data_length, + AF_UNSPEC, 0, K_NO_WAIT); + if (pkt == NULL) { + LOG_ERR("RX packet buffer alloc failed: %u bytes", + rx_data_length); +#ifdef CONFIG_NET_STATISTICS_ETHERNET + dev_data->stats.errors.rx++; + dev_data->stats.error_details.rx_no_buffer_count++; +#endif + } + + /* + * Copy data from all involved RX buffers into the allocated + * packet's data buffer. If we don't have a packet buffer be- + * cause none are available, we still have to iterate over all + * involved BDs in order to properly release them for re-use + * by the controller. + */ + do { + if (pkt != NULL) { + net_pkt_write(pkt, (const void *) + (dev_data->rxbd_ring.first_bd[curr_bd_idx].addr & + ETH_XLNX_GEM_RXBD_BUFFER_ADDR_MASK), + (rx_data_remaining < dev_conf->rx_buffer_size) ? + rx_data_remaining : dev_conf->rx_buffer_size); + } + rx_data_remaining -= (rx_data_remaining < dev_conf->rx_buffer_size) ? + rx_data_remaining : dev_conf->rx_buffer_size; + + /* + * The entire packet data of the current BD has been + * processed, on to the next BD -> preserve the RX BD's + * 'wrap' bit & address, but clear the 'used' bit. + */ + reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[curr_bd_idx].addr); + reg_val = sys_read32(reg_addr); + reg_val &= ~ETH_XLNX_GEM_RXBD_USED_BIT; + sys_write32(reg_val, reg_addr); + + curr_bd_idx = (curr_bd_idx + 1) % dev_conf->rxbd_count; + } while (curr_bd_idx != ((last_bd_idx + 1) % dev_conf->rxbd_count)); + + /* Propagate the received packet to the network stack */ + if (pkt != NULL) { + if (net_recv_data(dev_data->iface, pkt) < 0) { + LOG_ERR("%s RX packet hand-over to IP stack failed", + dev->name); + net_pkt_unref(pkt); + } +#ifdef CONFIG_NET_STATISTICS_ETHERNET + else { + dev_data->stats.bytes.received += rx_data_length; + dev_data->stats.pkts.rx++; + } +#endif + } + } + + /* Clear the RX status register */ + sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); + /* Re-enable the frame received interrupt source */ + sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); +} + +/** + * @brief GEM TX done handler wrapper for the work queue + * Wraps the TX done handler, eth_xlnx_gem_handle_tx_done, + * for the scenario in which the current GEM device is configured + * to defer RX pending / TX done indication handling to the system + * work queue. In this case, the work item received by this wrapper + * function will be enqueued from within the ISR if the corresponding + * bit is set within the controller's interrupt status register + * (gem.intr_status). + * + * @param item Pointer to the work item enqueued by the ISR which + * facilitates access to the current device's data + */ +static void eth_xlnx_gem_tx_done_work(struct k_work *item) +{ + struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item, + struct eth_xlnx_gem_dev_data, tx_done_work); + const struct device *dev = net_if_get_device(dev_data->iface); + + eth_xlnx_gem_handle_tx_done(dev); +} + +/** + * @brief GEM TX done handler + * This handler is called either from within the ISR or from the + * context of the system work queue whenever the TX done bit is set + * in the controller's interrupt status register (gem.intr_status). + * No further TX done interrupts will be triggered until this handler + * has been executed, which eventually clears the corresponding + * interrupt status bit. Once this handler reaches the end of its + * execution, the eth_xlnx_gem_send call which effectively triggered + * it is unblocked by posting to the current GEM's TX done semaphore + * on which the send function is blocking. + * + * @param dev Pointer to the device data + */ +static void eth_xlnx_gem_handle_tx_done(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint32_t reg_ctrl; + uint32_t reg_val; + uint32_t reg_val_txsr; + uint8_t curr_bd_idx; + uint8_t first_bd_idx; + uint8_t bds_processed = 0; + uint8_t bd_is_last; + + /* Read the TX status register */ + reg_val_txsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); + + /* + * TODO Evaluate error flags from TX status register word + * here for proper error handling + */ + + if (dev_conf->defer_txd_to_queue) { + k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); + } + + curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_process; + reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); + reg_val = sys_read32(reg_ctrl); + + do { + ++bds_processed; + + /* + * TODO Evaluate error flags from current BD control word + * here for proper error handling + */ + + /* + * Check if the BD we're currently looking at is the last BD + * of the current transmission + */ + bd_is_last = ((reg_val & ETH_XLNX_GEM_TXBD_LAST_BIT) != 0) ? 1 : 0; + + /* + * Reset control word of the current BD, clear everything but + * the 'wrap' bit, then set the 'used' bit + */ + reg_val &= ETH_XLNX_GEM_TXBD_WRAP_BIT; + reg_val |= ETH_XLNX_GEM_TXBD_USED_BIT; + sys_write32(reg_val, reg_ctrl); + + /* Move on to the next BD or break out of the loop */ + if (bd_is_last == 1) { + break; + } + curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count; + reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); + reg_val = sys_read32(reg_ctrl); + } while (bd_is_last == 0 && curr_bd_idx != first_bd_idx); + + if (curr_bd_idx == first_bd_idx && bd_is_last == 0) { + LOG_WRN("%s TX done handling wrapped around", dev->name); + } + + dev_data->txbd_ring.next_to_process = + (dev_data->txbd_ring.next_to_process + bds_processed) % + dev_conf->txbd_count; + dev_data->txbd_ring.free_bds += bds_processed; + + if (dev_conf->defer_txd_to_queue) { + k_sem_give(&(dev_data->txbd_ring.ring_sem)); + } + + /* Clear the TX status register */ + sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); + + /* Re-enable the TX complete interrupt source */ + sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, + dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); + + /* Indicate completion to a blocking eth_xlnx_gem_send() call */ + k_sem_give(&dev_data->tx_done_sem); +} + +/* EOF */ diff --git a/drivers/ethernet/eth_xlnx_gem_priv.h b/drivers/ethernet/eth_xlnx_gem_priv.h new file mode 100644 index 00000000000..ef0b7361c88 --- /dev/null +++ b/drivers/ethernet/eth_xlnx_gem_priv.h @@ -0,0 +1,754 @@ +/* + * Xilinx Processor System Gigabit Ethernet controller (GEM) driver + * + * Driver private data declarations + * + * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _ZEPHYR_DRIVERS_ETHERNET_ETH_XLNX_GEM_PRIV_H_ +#define _ZEPHYR_DRIVERS_ETHERNET_ETH_XLNX_GEM_PRIV_H_ + +#define DT_DRV_COMPAT xlnx_gem + +#include +#include +#include + +#include "phy_xlnx_gem.h" + +#define ETH_XLNX_BUFFER_ALIGNMENT 4 /* RX/TX buffer alignment (in bytes) */ + +/* Buffer descriptor (BD) related defines */ + +/* Receive Buffer Descriptor bits & masks: comp. Zynq-7000 TRM, Table 16-2. */ + +/* + * Receive Buffer Descriptor address word: + * [31 .. 02] Mask for effective buffer address -> excludes [1..0] + * [01] Wrap bit, last BD in RX BD ring + * [00] BD used bit + */ +#define ETH_XLNX_GEM_RXBD_WRAP_BIT 0x00000002 +#define ETH_XLNX_GEM_RXBD_USED_BIT 0x00000001 +#define ETH_XLNX_GEM_RXBD_BUFFER_ADDR_MASK 0xFFFFFFFC + +/* + * Receive Buffer Descriptor control word: + * [31] Broadcast detected + * [30] Multicast hash match detected + * [29] Unicast hash match detected + * [27] Specific address match detected + * [26 .. 25] Bits indicating which specific address register was matched + * [24] this bit has different semantics depending on whether RX checksum + * offloading is enabled or not + * [23 .. 22] These bits have different semantics depending on whether RX check- + * sum offloading is enabled or not + * [21] VLAN tag (type ID 0x8100) detected + * [20] Priority tag: VLAN tag (type ID 0x8100) and null VLAN identifier + * detected + * [19 .. 17] VLAN priority + * [16] Canonical format indicator bit + * [15] End-of-frame bit + * [14] Start-of-frame bit + * [13] FCS status bit for FCS ignore mode + * [12 .. 00] Data length of received frame + */ +#define ETH_XLNX_GEM_RXBD_BCAST_BIT 0x80000000 +#define ETH_XLNX_GEM_RXBD_MCAST_HASH_MATCH_BIT 0x40000000 +#define ETH_XLNX_GEM_RXBD_UCAST_HASH_MATCH_BIT 0x20000000 +#define ETH_XLNX_GEM_RXBD_SPEC_ADDR_MATCH_BIT 0x08000000 +#define ETH_XLNX_GEM_RXBD_SPEC_ADDR_MASK 0x00000003 +#define ETH_XLNX_GEM_RXBD_SPEC_ADDR_SHIFT 25 +#define ETH_XLNX_GEM_RXBD_BIT24 0x01000000 +#define ETH_XLNX_GEM_RXBD_BITS23_22_MASK 0x00000003 +#define ETH_XLNX_GEM_RXBD_BITS23_22_SHIFT 22 +#define ETH_XLNX_GEM_RXBD_VLAN_TAG_DETECTED_BIT 0x00200000 +#define ETH_XLNX_GEM_RXBD_PRIO_TAG_DETECTED_BIT 0x00100000 +#define ETH_XLNX_GEM_RXBD_VLAN_PRIORITY_MASK 0x00000007 +#define ETH_XLNX_GEM_RXBD_VLAN_PRIORITY_SHIFT 17 +#define ETH_XLNX_GEM_RXBD_CFI_BIT 0x00010000 +#define ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT 0x00008000 +#define ETH_XLNX_GEM_RXBD_START_OF_FRAME_BIT 0x00004000 +#define ETH_XLNX_GEM_RXBD_FCS_STATUS_BIT 0x00002000 +#define ETH_XLNX_GEM_RXBD_FRAME_LENGTH_MASK 0x00001FFF + +/* Transmit Buffer Descriptor bits & masks: comp. Zynq-7000 TRM, Table 16-3. */ + +/* + * Transmit Buffer Descriptor control word: + * [31] BD used marker + * [30] Wrap bit, last BD in TX BD ring + * [29] Retry limit exceeded + * [27] TX frame corruption due to AHB/AXI error, HRESP errors or buffers + * exhausted mid-frame + * [26] Late collision, TX error detected + * [22 .. 20] Transmit IP/TCP/UDP checksum generation offload error bits + * [16] No CRC appended by MAC + * [15] Last buffer bit, indicates end of current TX frame + * [13 .. 00] Data length in the BD's associated buffer + */ +#define ETH_XLNX_GEM_TXBD_USED_BIT 0x80000000 +#define ETH_XLNX_GEM_TXBD_WRAP_BIT 0x40000000 +#define ETH_XLNX_GEM_TXBD_RETRY_BIT 0x20000000 +#define ETH_XLNX_GEM_TXBD_TX_FRAME_CORRUPT_BIT 0x08000000 +#define ETH_XLNX_GEM_TXBD_LATE_COLLISION_BIT 0x04000000 +#define ETH_XLNX_GEM_TXBD_CKSUM_OFFLOAD_ERROR_MASK 0x00000007 +#define ETH_XLNX_GEM_TXBD_CKSUM_OFFLOAD_ERROR_SHIFT 20 +#define ETH_XLNX_GEM_TXBD_NO_CRC_BIT 0x00010000 +#define ETH_XLNX_GEM_TXBD_LAST_BIT 0x00008000 +#define ETH_XLNX_GEM_TXBD_LEN_MASK 0x00003FFF +#define ETH_XLNX_GEM_TXBD_ERR_MASK 0x3C000000 + +#define ETH_XLNX_GEM_CKSUM_NO_ERROR 0x00000000 +#define ETH_XLNX_GEM_CKSUM_VLAN_HDR_ERROR 0x00000001 +#define ETH_XLNX_GEM_CKSUM_SNAP_HDR_ERROR 0x00000002 +#define ETH_XLNX_GEM_CKSUM_IP_TYPE_OR_LEN_ERROR 0x00000003 +#define ETH_XLNX_GEM_CKSUM_NOT_VLAN_SNAP_IP_ERROR 0x00000004 +#define ETH_XLNX_GEM_CKSUM_UNSUPP_PKT_FRAG_ERROR 0x00000005 +#define ETH_XLNX_GEM_CKSUM_NOT_TCP_OR_UDP_ERROR 0x00000006 +#define ETH_XLNX_GEM_CKSUM_PREMATURE_END_ERROR 0x00000007 + +/* + * TX clock configuration: comp. + * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html + * + * CRL_WPROT (CRL_APB) register: + * [00] CRL APB register space write protection bit + * + * GEMx_REF_CTRL (CRL_APB) registers: + * [30] RX channel clock active bit + * [29] Clock active bit + * [21 .. 16] Reference clock divisor 1 + * [13 .. 08] Reference clock divisor 0 + */ +#define ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS 0xFF5E001C +#define ETH_XLNX_CRL_APB_WPROT_BIT 0x00000001 +#define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK 0x0000003F +#define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT 16 +#define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT 8 +#define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_RX_CLKACT_BIT 0x04000000 +#define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT 0x02000000 + +/* + * Register offsets within the respective GEM's address space: + * NWCTRL = gem.net_ctrl Network Control register + * NWCFG = gem.net_cfg Network Configuration register + * NWSR = gem.net_status Network Status register + * DMACR = gem.dma_cfg DMA Control register + * TXSR = gem.tx_status TX Status register + * RXQBASE = gem.rx_qbar RXQ base address register + * TXQBASE = gem.tx_qbar TXQ base address register + * RXSR = gem.rx_status RX Status register + * ISR = gem.intr_status Interrupt status register + * IER = gem.intr_en Interrupt enable register + * IDR = gem.intr_dis Interrupt disable register + * IMR = gem.intr_mask Interrupt mask register + * PHYMNTNC = gem.phy_maint PHY maintenance register + * LADDR1L = gem.spec_addr1_bot Specific address 1 bottom register + * LADDR1H = gem.spec_addr1_top Specific address 1 top register + * LADDR2L = gem.spec_addr2_bot Specific address 2 bottom register + * LADDR2H = gem.spec_addr2_top Specific address 2 top register + * LADDR3L = gem.spec_addr3_bot Specific address 3 bottom register + * LADDR3H = gem.spec_addr3_top Specific address 3 top register + * LADDR4L = gem.spec_addr4_bot Specific address 4 bottom register + * LADDR4H = gem.spec_addr4_top Specific address 4 top register + */ +#define ETH_XLNX_GEM_NWCTRL_OFFSET 0x00000000 +#define ETH_XLNX_GEM_NWCFG_OFFSET 0x00000004 +#define ETH_XLNX_GEM_NWSR_OFFSET 0x00000008 +#define ETH_XLNX_GEM_DMACR_OFFSET 0x00000010 +#define ETH_XLNX_GEM_TXSR_OFFSET 0x00000014 +#define ETH_XLNX_GEM_RXQBASE_OFFSET 0x00000018 +#define ETH_XLNX_GEM_TXQBASE_OFFSET 0x0000001C +#define ETH_XLNX_GEM_RXSR_OFFSET 0x00000020 +#define ETH_XLNX_GEM_ISR_OFFSET 0x00000024 +#define ETH_XLNX_GEM_IER_OFFSET 0x00000028 +#define ETH_XLNX_GEM_IDR_OFFSET 0x0000002C +#define ETH_XLNX_GEM_IMR_OFFSET 0x00000030 +#define ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET 0x00000034 +#define ETH_XLNX_GEM_LADDR1L_OFFSET 0x00000088 +#define ETH_XLNX_GEM_LADDR1H_OFFSET 0x0000008C +#define ETH_XLNX_GEM_LADDR2L_OFFSET 0x00000090 +#define ETH_XLNX_GEM_LADDR2H_OFFSET 0x00000094 +#define ETH_XLNX_GEM_LADDR3L_OFFSET 0x00000098 +#define ETH_XLNX_GEM_LADDR3H_OFFSET 0x0000009C +#define ETH_XLNX_GEM_LADDR4L_OFFSET 0x000000A0 +#define ETH_XLNX_GEM_LADDR4H_OFFSET 0x000000A4 + +/* + * Masks for clearing registers during initialization: + * gem.net_ctrl [clear_stat_regs] + * gem.tx_status [7..0] + * gem.rx_status [3..0] + * gem.intr_dis [26..0] + */ +#define ETH_XLNX_GEM_STATCLR_MASK 0x00000020 +#define ETH_XLNX_GEM_TXSRCLR_MASK 0x000000FF +#define ETH_XLNX_GEM_RXSRCLR_MASK 0x0000000F +#define ETH_XLNX_GEM_IDRCLR_MASK 0x07FFFFFF + +/* (Shift) masks for individual registers' bits / bitfields */ + +/* + * gem.net_ctrl: + * [15] Store 1588 receive timestamp in CRC field + * [12] Transmit zero quantum pause frame + * [11] Transmit pause frame + * [10] Halt transmission after current frame + * [09] Start transmission (tx_go) + * [07] Enable writing to statistics counters + * [06] Increment statistics registers - for testing purposes only + * [05] Clear statistics registers + * [04] Enable MDIO port + * [03] Enable transmit + * [02] Enable receive + * [01] Local loopback mode + */ +#define ETH_XLNX_GEM_NWCTRL_RXTSTAMP_BIT 0x00008000 +#define ETH_XLNX_GEM_NWCTRL_ZEROPAUSETX_BIT 0x00001000 +#define ETH_XLNX_GEM_NWCTRL_PAUSETX_BIT 0x00000800 +#define ETH_XLNX_GEM_NWCTRL_HALTTX_BIT 0x00000400 +#define ETH_XLNX_GEM_NWCTRL_STARTTX_BIT 0x00000200 +#define ETH_XLNX_GEM_NWCTRL_STATWEN_BIT 0x00000080 +#define ETH_XLNX_GEM_NWCTRL_STATINC_BIT 0x00000040 +#define ETH_XLNX_GEM_NWCTRL_STATCLR_BIT 0x00000020 +#define ETH_XLNX_GEM_NWCTRL_MDEN_BIT 0x00000010 +#define ETH_XLNX_GEM_NWCTRL_TXEN_BIT 0x00000008 +#define ETH_XLNX_GEM_NWCTRL_RXEN_BIT 0x00000004 +#define ETH_XLNX_GEM_NWCTRL_LOOPEN_BIT 0x00000002 + +/* + * gem.net_cfg: + * [30] Ignore IPG RX Error + * [29] Disable rejection of non-standard preamble + * [28] Enable IPG stretch + * [27] Enable SGMII mode + * [26] Disable rejection of frames with FCS errors + * [25] Enable frames to be received in HDX mode while transmitting + * [24] Enable RX checksum offload to hardware + * [23] Do not copy pause frames to memory + * [22 .. 21] Data bus width + * [20 .. 18] MDC clock division setting + * [17] Discard FCS from received frames + * [16] RX length field error frame discard enable + * [15 .. 14] Receive buffer offset, # of bytes + * [13] Enable pause TX upon 802.3 pause frame reception + * [12] Retry test - for testing purposes only + * [11] Use TBI instead of the GMII/MII interface + * [10] Gigabit mode enable + * [09] External address match enable + * [08] Enable 1536 byte frames reception + * [07] Receive unicast hash frames enable + * [06] Receive multicast hash frames enable + * [05] Disable broadcast frame reception + * [04] Copy all frames = promiscuous mode + * [02] Discard non-VLAN frames enable + * [01] Full duplex enable + * [00] Speed selection: 1 = 100Mbit/s, 0 = 10 Mbit/s, GBE mode is + * set separately in bit [10] + */ +#define ETH_XLNX_GEM_NWCFG_IGNIPGRXERR_BIT 0x40000000 +#define ETH_XLNX_GEM_NWCFG_BADPREAMBEN_BIT 0x20000000 +#define ETH_XLNX_GEM_NWCFG_IPG_STRETCH_BIT 0x10000000 +#define ETH_XLNX_GEM_NWCFG_SGMIIEN_BIT 0x08000000 +#define ETH_XLNX_GEM_NWCFG_FCSIGNORE_BIT 0x04000000 +#define ETH_XLNX_GEM_NWCFG_HDRXEN_BIT 0x02000000 +#define ETH_XLNX_GEM_NWCFG_RXCHKSUMEN_BIT 0x01000000 +#define ETH_XLNX_GEM_NWCFG_PAUSECOPYDI_BIT 0x00800000 +#define ETH_XLNX_GEM_NWCFG_DBUSW_MASK 0x3 +#define ETH_XLNX_GEM_NWCFG_DBUSW_SHIFT 21 +#define ETH_XLNX_GEM_NWCFG_MDC_MASK 0x7 +#define ETH_XLNX_GEM_NWCFG_MDC_SHIFT 18 +#define ETH_XLNX_GEM_NWCFG_MDCCLKDIV_MASK 0x001C0000 +#define ETH_XLNX_GEM_NWCFG_FCSREM_BIT 0x00020000 +#define ETH_XLNX_GEM_NWCFG_LENGTHERRDSCRD_BIT 0x00010000 +#define ETH_XLNX_GEM_NWCFG_RXOFFS_MASK 0x00000003 +#define ETH_XLNX_GEM_NWCFG_RXOFFS_SHIFT 14 +#define ETH_XLNX_GEM_NWCFG_PAUSEEN_BIT 0x00002000 +#define ETH_XLNX_GEM_NWCFG_RETRYTESTEN_BIT 0x00001000 +#define ETH_XLNX_GEM_NWCFG_TBIINSTEAD_BIT 0x00000800 +#define ETH_XLNX_GEM_NWCFG_1000_BIT 0x00000400 +#define ETH_XLNX_GEM_NWCFG_EXTADDRMATCHEN_BIT 0x00000200 +#define ETH_XLNX_GEM_NWCFG_1536RXEN_BIT 0x00000100 +#define ETH_XLNX_GEM_NWCFG_UCASTHASHEN_BIT 0x00000080 +#define ETH_XLNX_GEM_NWCFG_MCASTHASHEN_BIT 0x00000040 +#define ETH_XLNX_GEM_NWCFG_BCASTDIS_BIT 0x00000020 +#define ETH_XLNX_GEM_NWCFG_COPYALLEN_BIT 0x00000010 +#define ETH_XLNX_GEM_NWCFG_NVLANDISC_BIT 0x00000004 +#define ETH_XLNX_GEM_NWCFG_FDEN_BIT 0x00000002 +#define ETH_XLNX_GEM_NWCFG_100_BIT 0x00000001 + +/* + * gem.dma_cfg: + * [24] Discard packets when AHB resource is unavailable + * [23 .. 16] RX buffer size, n * 64 bytes + * [11] Enable/disable TCP|UDP/IP TX checksum offload + * [10] TX buffer half/full memory size + * [09 .. 08] Receiver packet buffer memory size select + * [07] Endianness configuration + * [06] Descriptor access endianness configuration + * [04 .. 00] AHB fixed burst length for DMA data operations + */ +#define ETH_XLNX_GEM_DMACR_DISCNOAHB_BIT 0x01000000 +#define ETH_XLNX_GEM_DMACR_RX_BUF_MASK 0x000000FF +#define ETH_XLNX_GEM_DMACR_RX_BUF_SHIFT 16 +#define ETH_XLNX_GEM_DMACR_TCP_CHKSUM_BIT 0x00000800 +#define ETH_XLNX_GEM_DMACR_TX_SIZE_BIT 0x00000400 +#define ETH_XLNX_GEM_DMACR_RX_SIZE_MASK 0x00000300 +#define ETH_XLNX_GEM_DMACR_RX_SIZE_SHIFT 8 +#define ETH_XLNX_GEM_DMACR_ENDIAN_BIT 0x00000080 +#define ETH_XLNX_GEM_DMACR_DESCR_ENDIAN_BIT 0x00000040 +#define ETH_XLNX_GEM_DMACR_AHB_BURST_LENGTH_MASK 0x0000001F + +/* + * gem.intr_* interrupt status/enable/disable bits: + * [25] PTP pdelay_resp frame transmitted + * [24] PTP pdelay_req frame transmitted + * [23] PTP pdelay_resp frame received + * [22] PTP delay_req frame received + * [21] PTP sync frame transmitted + * [20] PTP delay_req frame transmitted + * [19] PTP sync frame received + * [18] PTP delay_req frame received + * [17] PCS link partner page mask + * [16] Auto-negotiation completed + * [15] External interrupt + * [14] Pause frame transmitted + * [13] Pause time has reached zero + * [12] Pause frame received with non-zero pause quantum + * [11] hresp not OK + * [10] Receive overrun + * [09] Link change + * [07] Transmit complete + * [06] Transmit frame corruption due to AHB/AXI error + * [05] Retry limit exceeded or late collision + * [04] Transmit buffer underrun + * [03] Set 'used' bit in TX BD encountered + * [02] Set 'used' bit in RX BD encountered + * [01] Frame received + * [00] PHY management done + */ +#define ETH_XLNX_GEM_IXR_PTPPSTX_BIT 0x02000000 +#define ETH_XLNX_GEM_IXR_PTPPDRTX_BIT 0x01000000 +#define ETH_XLNX_GEM_IXR_PTPSTX_BIT 0x00800000 +#define ETH_XLNX_GEM_IXR_PTPDRTX_BIT 0x00400000 +#define ETH_XLNX_GEM_IXR_PTPPSRX_BIT 0x00200000 +#define ETH_XLNX_GEM_IXR_PTPPDRRX_BIT 0x00100000 +#define ETH_XLNX_GEM_IXR_PTPSRX_BIT 0x00080000 +#define ETH_XLNX_GEM_IXR_PTPDRRX_BIT 0x00040000 +#define ETH_XLNX_GEM_IXR_PARTNER_PGRX_BIT 0x00020000 +#define ETH_XLNX_GEM_IXR_AUTONEG_COMPLETE_BIT 0x00010000 +#define ETH_XLNX_GEM_IXR_EXTERNAL_INT_BIT 0x00008000 +#define ETH_XLNX_GEM_IXR_PAUSE_TX_BIT 0x00004000 +#define ETH_XLNX_GEM_IXR_PAUSE_ZERO_BIT 0x00002000 +#define ETH_XLNX_GEM_IXR_PAUSE_NONZERO_BIT 0x00001000 +#define ETH_XLNX_GEM_IXR_HRESP_NOT_OK_BIT 0x00000800 +#define ETH_XLNX_GEM_IXR_RX_OVERRUN_BIT 0x00000400 +#define ETH_XLNX_GEM_IXR_LINK_CHANGE 0x00000200 +#define ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT 0x00000080 +#define ETH_XLNX_GEM_IXR_TX_CORRUPT_BIT 0x00000040 +#define ETH_XLNX_GEM_IXR_RETRY_LIMIT_OR_LATE_COLL_BIT 0x00000020 +#define ETH_XLNX_GEM_IXR_TX_UNDERRUN_BIT 0x00000010 +#define ETH_XLNX_GEM_IXR_TX_USED_BIT 0x00000008 +#define ETH_XLNX_GEM_IXR_RX_USED_BIT 0x00000004 +#define ETH_XLNX_GEM_IXR_FRAME_RX_BIT 0x00000002 +#define ETH_XLNX_GEM_IXR_PHY_MGMNT_BIT 0x00000001 +#define ETH_XLNX_GEM_IXR_ALL_MASK 0x03FC7FFE +#define ETH_XLNX_GEM_IXR_ERRORS_MASK 0x00000C60 + +/* Bits / bit masks relating to the GEM's MDIO interface */ + +/* + * gem.net_status: + * [02] PHY management idle bit + * [01] MDIO input status + */ +#define ETH_XLNX_GEM_MDIO_IDLE_BIT 0x00000004 +#define ETH_XLNX_GEM_MDIO_IN_STATUS_BIT 0x00000002 + +/* + * gem.phy_maint: + * [31 .. 30] constant values + * [17 .. 16] constant values + * [29] Read operation control bit + * [28] Write operation control bit + * [27 .. 23] PHY address + * [22 .. 18] Register address + * [15 .. 00] 16-bit data word + */ +#define ETH_XLNX_GEM_PHY_MAINT_CONST_BITS 0x40020000 +#define ETH_XLNX_GEM_PHY_MAINT_READ_OP_BIT 0x20000000 +#define ETH_XLNX_GEM_PHY_MAINT_WRITE_OP_BIT 0x10000000 +#define ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_MASK 0x0000001F +#define ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_SHIFT 23 +#define ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_MASK 0x0000001F +#define ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_SHIFT 18 +#define ETH_XLNX_GEM_PHY_MAINT_DATA_MASK 0x0000FFFF + +/* Device configuration / run-time data resolver macros */ +#define DEV_CFG(dev) \ + ((const struct eth_xlnx_gem_dev_cfg * const)(dev->config)) +#define DEV_DATA(dev) \ + ((struct eth_xlnx_gem_dev_data *)(dev->data)) + +/* Device initialization macro */ +#define ETH_XLNX_GEM_NET_DEV_INIT(port) \ +ETH_NET_DEVICE_DT_INST_DEFINE(port,\ + eth_xlnx_gem_dev_init,\ + NULL,\ + ð_xlnx_gem##port##_dev_data,\ + ð_xlnx_gem##port##_dev_cfg,\ + CONFIG_ETH_INIT_PRIORITY,\ + ð_xlnx_gem_apis,\ + NET_ETH_MTU); + +/* Device configuration data declaration macro */ +#define ETH_XLNX_GEM_DEV_CONFIG(port) \ +static const struct eth_xlnx_gem_dev_cfg eth_xlnx_gem##port##_dev_cfg = {\ + .base_addr = DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 0),\ + .config_func = eth_xlnx_gem##port##_irq_config,\ + .pll_clock_frequency = DT_INST_PROP(port, clock_frequency),\ + .clk_ctrl_reg_address = DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 1),\ + .mdc_divider = (enum eth_xlnx_mdc_clock_divider)\ + (DT_INST_PROP(port, mdc_divider)),\ + .max_link_speed = (enum eth_xlnx_link_speed)\ + (DT_INST_PROP(port, link_speed)),\ + .init_phy = DT_INST_PROP(port, init_mdio_phy),\ + .phy_mdio_addr_fix = DT_INST_PROP(port, mdio_phy_address),\ + .phy_advertise_lower = DT_INST_PROP(port, advertise_lower_link_speeds),\ + .phy_poll_interval = DT_INST_PROP(port, phy_poll_interval),\ + .defer_rxp_to_queue = !DT_INST_PROP(port, handle_rx_in_isr),\ + .defer_txd_to_queue = DT_INST_PROP(port, handle_tx_in_workq),\ + .amba_dbus_width = (enum eth_xlnx_amba_dbus_width)\ + (DT_INST_PROP(port, amba_ahb_dbus_width)),\ + .ahb_burst_length = (enum eth_xlnx_ahb_burst_length)\ + (DT_INST_PROP(port, amba_ahb_burst_length)),\ + .hw_rx_buffer_size = (enum eth_xlnx_hwrx_buffer_size)\ + (DT_INST_PROP(port, hw_rx_buffer_size)),\ + .hw_rx_buffer_offset = (uint8_t)\ + (DT_INST_PROP(port, hw_rx_buffer_offset)),\ + .rxbd_count = (uint8_t)\ + (DT_INST_PROP(port, rx_buffer_descriptors)),\ + .txbd_count = (uint8_t)\ + (DT_INST_PROP(port, tx_buffer_descriptors)),\ + .rx_buffer_size = (((uint16_t)(DT_INST_PROP(port, rx_buffer_size)) +\ + (ETH_XLNX_BUFFER_ALIGNMENT-1)) & ~(ETH_XLNX_BUFFER_ALIGNMENT-1)),\ + .tx_buffer_size = (((uint16_t)(DT_INST_PROP(port, tx_buffer_size)) +\ + (ETH_XLNX_BUFFER_ALIGNMENT-1)) & ~(ETH_XLNX_BUFFER_ALIGNMENT-1)),\ + .ignore_ipg_rxer = DT_INST_PROP(port, ignore_ipg_rxer),\ + .disable_reject_nsp = DT_INST_PROP(port, disable_reject_nsp),\ + .enable_ipg_stretch = DT_INST_PROP(port, ipg_stretch),\ + .enable_sgmii_mode = DT_INST_PROP(port, sgmii_mode),\ + .disable_reject_fcs_crc_errors = DT_INST_PROP(port, disable_reject_fcs_crc_errors),\ + .enable_rx_halfdup_while_tx = DT_INST_PROP(port, rx_halfdup_while_tx),\ + .enable_rx_chksum_offload = DT_INST_PROP(port, rx_checksum_offload),\ + .disable_pause_copy = DT_INST_PROP(port, disable_pause_copy),\ + .discard_rx_fcs = DT_INST_PROP(port, discard_rx_fcs),\ + .discard_rx_length_errors = DT_INST_PROP(port, discard_rx_length_errors),\ + .enable_pause = DT_INST_PROP(port, pause_frame),\ + .enable_tbi = DT_INST_PROP(port, tbi),\ + .ext_addr_match = DT_INST_PROP(port, ext_address_match),\ + .enable_1536_frames = DT_INST_PROP(port, long_frame_rx_support),\ + .enable_ucast_hash = DT_INST_PROP(port, unicast_hash),\ + .enable_mcast_hash = DT_INST_PROP(port, multicast_hash),\ + .disable_bcast = DT_INST_PROP(port, reject_broadcast),\ + .copy_all_frames = DT_INST_PROP(port, promiscuous_mode),\ + .discard_non_vlan = DT_INST_PROP(port, discard_non_vlan),\ + .enable_fdx = DT_INST_PROP(port, full_duplex),\ + .disc_rx_ahb_unavail = DT_INST_PROP(port, discard_rx_frame_ahb_unavail),\ + .enable_tx_chksum_offload = DT_INST_PROP(port, tx_checksum_offload),\ + .tx_buffer_size_full = DT_INST_PROP(port, hw_tx_buffer_size_full),\ + .enable_ahb_packet_endian_swap = DT_INST_PROP(port, ahb_packet_endian_swap),\ + .enable_ahb_md_endian_swap = DT_INST_PROP(port, ahb_md_endian_swap)\ +}; + +/* Device run-time data declaration macro */ +#define ETH_XLNX_GEM_DEV_DATA(port) \ +static struct eth_xlnx_gem_dev_data eth_xlnx_gem##port##_dev_data = {\ + .mac_addr = DT_INST_PROP(port, local_mac_address),\ + .started = 0,\ + .eff_link_speed = LINK_DOWN,\ + .phy_addr = 0,\ + .phy_id = 0,\ + .phy_access_api = NULL,\ + .first_rx_buffer = NULL,\ + .first_tx_buffer = NULL\ +}; + +/* DMA memory area declaration macro */ +#define ETH_XLNX_GEM_DMA_AREA_DECL(port) \ +struct eth_xlnx_dma_area_gem##port {\ + struct eth_xlnx_gem_bd rx_bd[DT_INST_PROP(port, rx_buffer_descriptors)];\ + struct eth_xlnx_gem_bd tx_bd[DT_INST_PROP(port, tx_buffer_descriptors)];\ + uint8_t rx_buffer\ + [DT_INST_PROP(port, rx_buffer_descriptors)]\ + [((DT_INST_PROP(port, rx_buffer_size)\ + + (ETH_XLNX_BUFFER_ALIGNMENT - 1))\ + & ~(ETH_XLNX_BUFFER_ALIGNMENT - 1))];\ + uint8_t tx_buffer\ + [DT_INST_PROP(port, tx_buffer_descriptors)]\ + [((DT_INST_PROP(port, tx_buffer_size)\ + + (ETH_XLNX_BUFFER_ALIGNMENT - 1))\ + & ~(ETH_XLNX_BUFFER_ALIGNMENT - 1))];\ +}; + +/* DMA memory area instantiation macro */ +#define ETH_XLNX_GEM_DMA_AREA_INST(port) \ +static struct eth_xlnx_dma_area_gem##port eth_xlnx_gem##port##_dma_area \ + __aligned(4096); + +/* Interrupt configuration function macro */ +#define ETH_XLNX_GEM_CONFIG_IRQ_FUNC(port) \ +static void eth_xlnx_gem##port##_irq_config(const struct device *dev)\ +{\ + ARG_UNUSED(dev);\ + IRQ_CONNECT(DT_INST_IRQN(port), DT_INST_IRQ(port, priority),\ + eth_xlnx_gem_isr, DEVICE_DT_INST_GET(port), 0);\ + irq_enable(DT_INST_IRQN(port));\ +} + +/* RX/TX BD Ring initialization macro */ +#define ETH_XLNX_GEM_INIT_BD_RING(port) \ +if (dev_conf->base_addr == DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 0)) {\ + dev_data->rxbd_ring.first_bd = &(eth_xlnx_gem##port##_dma_area.rx_bd[0]);\ + dev_data->txbd_ring.first_bd = &(eth_xlnx_gem##port##_dma_area.tx_bd[0]);\ + dev_data->first_rx_buffer = (uint8_t *)eth_xlnx_gem##port##_dma_area.rx_buffer;\ + dev_data->first_tx_buffer = (uint8_t *)eth_xlnx_gem##port##_dma_area.tx_buffer;\ +} + +/* Top-level device initialization macro - bundles all of the above */ +#define ETH_XLNX_GEM_INITIALIZE(port) \ +ETH_XLNX_GEM_CONFIG_IRQ_FUNC(port);\ +ETH_XLNX_GEM_DEV_CONFIG(port);\ +ETH_XLNX_GEM_DEV_DATA(port);\ +ETH_XLNX_GEM_DMA_AREA_DECL(port);\ +ETH_XLNX_GEM_DMA_AREA_INST(port);\ +ETH_XLNX_GEM_NET_DEV_INIT(port);\ + +/* IRQ handler function type */ +typedef void (*eth_xlnx_gem_config_irq_t)(const struct device *dev); + +/* Enums for bitfields representing configuration settings */ + +/** + * @brief Link speed configuration enumeration type. + * + * Enumeration type for link speed indication, contains 'link down' + * plus all link speeds supported by the controller (10/100/1000). + */ +enum eth_xlnx_link_speed { + /* The values of this enum are consecutively numbered */ + LINK_DOWN = 0, + LINK_10MBIT, + LINK_100MBIT, + LINK_1GBIT +}; + +/** + * @brief AMBA AHB data bus width configuration enumeration type. + * + * Enumeration type containing the supported width options for the + * AMBA AHB data bus. This is a configuration item in the controller's + * net_cfg register. + */ +enum eth_xlnx_amba_dbus_width { + /* The values of this enum are consecutively numbered */ + AMBA_AHB_DBUS_WIDTH_32BIT = 0, + AMBA_AHB_DBUS_WIDTH_64BIT, + AMBA_AHB_DBUS_WIDTH_128BIT +}; + +/** + * @brief MDC clock divider configuration enumeration type. + * + * Enumeration type containing the supported clock divider values + * used to generate the MDIO interface clock (MDC) from the ZynqMP's + * LPD LSBUS clock. This is a configuration item in the controller's + * net_cfg register. + */ +enum eth_xlnx_mdc_clock_divider { + /* The values of this enum are consecutively numbered */ + MDC_DIVIDER_8 = 0, + MDC_DIVIDER_16, + MDC_DIVIDER_32, + MDC_DIVIDER_48 +}; + +/** + * @brief DMA RX buffer size configuration enumeration type. + * + * Enumeration type containing the supported size options for the + * DMA receive buffer size in AHB system memory. This is a configuration + * item in the controller's dma_cfg register. + */ +enum eth_xlnx_hwrx_buffer_size { + /* The values of this enum are consecutively numbered */ + HWRX_BUFFER_SIZE_1KB = 0, + HWRX_BUFFER_SIZE_2KB, + HWRX_BUFFER_SIZE_4KB, + HWRX_BUFFER_SIZE_8KB +}; + +/** + * @brief AHB burst length configuration enumeration type. + * + * Enumeration type containing the supported burst length options + * for the AHB fixed burst length for DMA data operations. This is a + * configuration item in the controller's dma_cfg register. + */ +enum eth_xlnx_ahb_burst_length { + /* The values of this enum are one-hot encoded */ + AHB_BURST_SINGLE = 1, + /* 2 = also AHB_BURST_SINGLE */ + AHB_BURST_INCR4 = 4, + AHB_BURST_INCR8 = 8, + AHB_BURST_INCR16 = 16 +}; + +/** + * @brief DMA memory area buffer descriptor. + * + * An array of these descriptors for each RX and TX is used to + * describe the respective DMA memory area. Each address word + * points to the start of a RX or TX buffer within the DMA memory + * area, while the control word is used for buffer status exchange + * with the controller. + */ +struct eth_xlnx_gem_bd { + /* TODO for Cortex-A53: 64-bit addressing */ + /* TODO: timestamping support */ + /* Buffer physical address (absolute address) */ + uint32_t addr; + /* Buffer control word (different contents for RX and TX) */ + uint32_t ctrl; +}; + +/** + * @brief DMA memory area buffer descriptor ring management structure. + * + * The DMA memory area buffer descriptor ring management structure + * is used to manage either the RX or TX buffer descriptor array + * (while the buffer descriptors are just an array from the software + * point of view, the controller treats them as a ring, in which the + * last descriptor's control word has a special last-in-ring bit set). + * It contains a pointer to the start of the descriptor array, a + * semaphore as a means of preventing concurrent access, a free entry + * counter as well as indices used to determine which BD shall be used + * or evaluated for the next RX/TX operation. + */ +struct eth_xlnx_gem_bdring { + /* Concurrent modification protection */ + struct k_sem ring_sem; + /* Pointer to the first BD in the list */ + struct eth_xlnx_gem_bd *first_bd; + /* Index of the next BD to be used for TX */ + uint8_t next_to_use; + /* Index of the next BD to be processed (both RX/TX) */ + uint8_t next_to_process; + /* Number of currently available BDs in this ring */ + uint8_t free_bds; +}; + +/** + * @brief Constant device configuration data structure. + * + * This struct contains all device configuration data for a GEM + * controller instance which is constant. The data herein is + * either acquired from the generated header file based on the + * data from Kconfig, or from header file based on the device tree + * data. Some of the data contained, in particular data relating + * to clock sources, is specific to either the Zynq-7000 or the + * UltraScale SoCs, which both contain the GEM. + */ +struct eth_xlnx_gem_dev_cfg { + uint32_t base_addr; + eth_xlnx_gem_config_irq_t config_func; + + uint32_t pll_clock_frequency; + uint32_t clk_ctrl_reg_address; + enum eth_xlnx_mdc_clock_divider mdc_divider; + + enum eth_xlnx_link_speed max_link_speed; + bool init_phy; + uint8_t phy_mdio_addr_fix; + uint8_t phy_advertise_lower; + uint32_t phy_poll_interval; + uint8_t defer_rxp_to_queue; + uint8_t defer_txd_to_queue; + + enum eth_xlnx_amba_dbus_width amba_dbus_width; + enum eth_xlnx_ahb_burst_length ahb_burst_length; + enum eth_xlnx_hwrx_buffer_size hw_rx_buffer_size; + uint8_t hw_rx_buffer_offset; + + uint8_t rxbd_count; + uint8_t txbd_count; + uint16_t rx_buffer_size; + uint16_t tx_buffer_size; + + bool ignore_ipg_rxer : 1; + bool disable_reject_nsp : 1; + bool enable_ipg_stretch : 1; + bool enable_sgmii_mode : 1; + bool disable_reject_fcs_crc_errors : 1; + bool enable_rx_halfdup_while_tx : 1; + bool enable_rx_chksum_offload : 1; + bool disable_pause_copy : 1; + bool discard_rx_fcs : 1; + bool discard_rx_length_errors : 1; + bool enable_pause : 1; + bool enable_tbi : 1; + bool ext_addr_match : 1; + bool enable_1536_frames : 1; + bool enable_ucast_hash : 1; + bool enable_mcast_hash : 1; + bool disable_bcast : 1; + bool copy_all_frames : 1; + bool discard_non_vlan : 1; + bool enable_fdx : 1; + bool disc_rx_ahb_unavail : 1; + bool enable_tx_chksum_offload : 1; + bool tx_buffer_size_full : 1; + bool enable_ahb_packet_endian_swap : 1; + bool enable_ahb_md_endian_swap : 1; +}; + +/** + * @brief Run-time device configuration data structure. + * + * This struct contains all device configuration data for a GEM + * controller instance which is modifyable at run-time, such as + * data relating to the attached PHY or the auxiliary thread. + */ +struct eth_xlnx_gem_dev_data { + struct net_if *iface; + uint8_t mac_addr[6]; + enum eth_xlnx_link_speed eff_link_speed; + + struct k_work tx_done_work; + struct k_work rx_pend_work; + struct k_sem tx_done_sem; + + uint8_t phy_addr; + uint32_t phy_id; + struct k_work_delayable phy_poll_delayed_work; + struct phy_xlnx_gem_api *phy_access_api; + + uint8_t *first_rx_buffer; + uint8_t *first_tx_buffer; + + struct eth_xlnx_gem_bdring rxbd_ring; + struct eth_xlnx_gem_bdring txbd_ring; + +#ifdef CONFIG_NET_STATISTICS_ETHERNET + struct net_stats_eth stats; +#endif + + bool started; +}; + +#endif /* _ZEPHYR_DRIVERS_ETHERNET_ETH_XLNX_GEM_PRIV_H_ */ + +/* EOF */ diff --git a/drivers/ethernet/phy_xlnx_gem.c b/drivers/ethernet/phy_xlnx_gem.c new file mode 100644 index 00000000000..e42c8f4832e --- /dev/null +++ b/drivers/ethernet/phy_xlnx_gem.c @@ -0,0 +1,978 @@ +/* + * Xilinx Processor System Gigabit Ethernet controller (GEM) driver + * + * PHY management interface implementation + * Models currently supported: + * - Marvell Alaska 88E1111 (QEMU simulated PHY) + * - Marvell Alaska 88E1510/88E1518/88E1512/88E1514 (Zedboard) + * - Texas Instruments TLK105 + * - Texas Instruments DP83822 + * + * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "eth_xlnx_gem_priv.h" + +#define LOG_MODULE_NAME phy_xlnx_gem +#define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL +#include +LOG_MODULE_REGISTER(LOG_MODULE_NAME); + +/* Basic MDIO read / write functions for PHY access */ + +/** + * @brief Read PHY data via the MDIO interface + * Reads data from a PHY attached to the respective GEM's MDIO interface + * + * @param base_addr Base address of the GEM's register space + * @param phy_addr MDIO address of the PHY to be accessed + * @param reg_addr Index of the PHY register to be read + * @return 16-bit data word received from the PHY + */ +static uint16_t phy_xlnx_gem_mdio_read( + uint32_t base_addr, uint8_t phy_addr, + uint8_t reg_addr) +{ + uint32_t reg_val; + uint32_t poll_cnt = 0; + + /* + * MDIO read operation as described in Zynq-7000 TRM, + * chapter 16.3.4, p. 517. + */ + + /* + * Wait until gem.net_status[phy_mgmt_idle] == 1 before issuing the + * current command. + */ + do { + if (poll_cnt++ > 0) + k_busy_wait(100); + reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); + } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); + if (poll_cnt == 10) { + LOG_ERR("GEM@0x%08X read from PHY address %hhu, " + "register address %hhu timed out", + base_addr, phy_addr, reg_addr); + return 0; + } + + /* Assemble & write the read command to the gem.phy_maint register */ + + /* Set the bits constant for any operation */ + reg_val = ETH_XLNX_GEM_PHY_MAINT_CONST_BITS; + /* Indicate a read operation */ + reg_val |= ETH_XLNX_GEM_PHY_MAINT_READ_OP_BIT; + /* PHY address */ + reg_val |= (((uint32_t)phy_addr & ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_MASK) << + ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_SHIFT); + /* Register address */ + reg_val |= (((uint32_t)reg_addr & ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_MASK) << + ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_SHIFT); + + sys_write32(reg_val, base_addr + ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET); + + /* + * Wait until gem.net_status[phy_mgmt_idle] == 1 -> current command + * completed. + */ + poll_cnt = 0; + do { + if (poll_cnt++ > 0) + k_busy_wait(100); + reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); + } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); + if (poll_cnt == 10) { + LOG_ERR("GEM@0x%08X read from PHY address %hhu, " + "register address %hhu timed out", + base_addr, phy_addr, reg_addr); + return 0; + } + + /* + * Read the data returned by the PHY -> lower 16 bits of the PHY main- + * tenance register + */ + reg_val = sys_read32(base_addr + ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET); + return (uint16_t)reg_val; +} + +/** + * @brief Writes PHY data via the MDIO interface + * Writes data to a PHY attached to the respective GEM's MDIO interface + * + * @param base_addr Base address of the GEM's register space + * @param phy_addr MDIO address of the PHY to be accessed + * @param reg_addr Index of the PHY register to be written to + * @param value 16-bit data word to be written to the target register + */ +static void phy_xlnx_gem_mdio_write( + uint32_t base_addr, uint8_t phy_addr, + uint8_t reg_addr, uint16_t value) +{ + uint32_t reg_val; + uint32_t poll_cnt = 0; + + /* + * MDIO write operation as described in Zynq-7000 TRM, + * chapter 16.3.4, p. 517. + */ + + /* + * Wait until gem.net_status[phy_mgmt_idle] == 1 before issuing the + * current command. + */ + do { + if (poll_cnt++ > 0) + k_busy_wait(100); + reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); + } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); + if (poll_cnt == 10) { + LOG_ERR("GEM@0x%08X write to PHY address %hhu, " + "register address %hhu timed out", + base_addr, phy_addr, reg_addr); + return; + } + + /* Assemble & write the read command to the gem.phy_maint register */ + + /* Set the bits constant for any operation */ + reg_val = ETH_XLNX_GEM_PHY_MAINT_CONST_BITS; + /* Indicate a read operation */ + reg_val |= ETH_XLNX_GEM_PHY_MAINT_WRITE_OP_BIT; + /* PHY address */ + reg_val |= (((uint32_t)phy_addr & ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_MASK) << + ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_SHIFT); + /* Register address */ + reg_val |= (((uint32_t)reg_addr & ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_MASK) << + ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_SHIFT); + /* 16 bits of data for the destination register */ + reg_val |= ((uint32_t)value & ETH_XLNX_GEM_PHY_MAINT_DATA_MASK); + + sys_write32(reg_val, base_addr + ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET); + + /* + * Wait until gem.net_status[phy_mgmt_idle] == 1 -> current command + * completed. + */ + poll_cnt = 0; + do { + if (poll_cnt++ > 0) + k_busy_wait(100); + reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); + } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); + if (poll_cnt == 10) { + LOG_ERR("GEM@0x%08X write to PHY address %hhu, " + "register address %hhu timed out", + base_addr, phy_addr, reg_addr); + } +} + +/* + * Vendor-specific PHY management functions for: + * Marvell Alaska 88E1111 (QEMU simulated PHY) + * Marvell Alaska 88E1510/88E1518/88E1512/88E1514 (Zedboard) + * Register IDs & procedures are based on the corresponding datasheets: + * https://www.marvell.com/content/dam/marvell/en/public-collateral/transceivers/marvell-phys-transceivers-alaska-88e1111-datasheet.pdf + * https://www.marvell.com/content/dam/marvell/en/public-collateral/transceivers/marvell-phys-transceivers-alaska-88e151x-datasheet.pdf + * + * NOTICE: Unless indicated otherwise, page/table source references refer to + * the 88E151x datasheet. + */ + +/** + * @brief Marvell Alaska PHY reset function + * Reset function for the Marvell Alaska PHY series + * + * @param dev Pointer to the device data + */ +static void phy_xlnx_gem_marvell_alaska_reset(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + uint32_t retries = 0; + + /* + * Page 0, register address 0 = Copper control register, + * bit [15] = PHY reset. Register 0/0 access is R/M/W. Comp. + * datasheet chapter 2.6 and table 64 "Copper Control Register". + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER); + phy_data |= PHY_MRVL_COPPER_CONTROL_RESET_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER, phy_data); + + /* Bit [15] reverts to 0 once the reset is complete. */ + while (((phy_data & PHY_MRVL_COPPER_CONTROL_RESET_BIT) != 0) && (retries++ < 10)) { + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER); + } + if (retries == 10) { + LOG_ERR("%s reset PHY address %hhu (Marvell Alaska) timed out", + dev->name, dev_data->phy_addr); + } +} + +/** + * @brief Marvell Alaska PHY configuration function + * Configuration function for the Marvell Alaska PHY series + * + * @param dev Pointer to the device data + */ +static void phy_xlnx_gem_marvell_alaska_cfg(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + uint16_t phy_data_gbit; + uint32_t retries = 0; + + /* + * Page 0, register address 0 = Copper control register, + * bit [12] = auto-negotiation enable bit is to be cleared + * for now, afterwards, trigger a PHY reset. + * Register 0/0 access is R/M/W. Comp. datasheet chapter 2.6 + * and table 64 "Copper Control Register". + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER); + phy_data &= ~PHY_MRVL_COPPER_CONTROL_AUTONEG_ENABLE_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER, phy_data); + phy_xlnx_gem_marvell_alaska_reset(dev); + + if ((dev_data->phy_id & PHY_MRVL_PHY_ID_MODEL_MASK) == + PHY_MRVL_PHY_ID_MODEL_88E151X) { + /* + * 88E151x only: onfigure the system interface and media type + * (i.e. "RGMII to Copper", 0x0). On the 88E1111, this setting + * is configured using I/O pins on the device. + * TODO: Make this value configurable via KConfig or DT? + * Page 18, register address 20 = General Control Register 1, + * bits [2..0] = mode configuration + * Comp. datasheet table 129 "General Control Register 1" + * NOTICE: a change of this value requires a subsequent software + * reset command via the same register's bit [15]. + */ + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_PAGE_SWITCH_REGISTER, + PHY_MRVL_GENERAL_CONTROL_1_PAGE); + + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_GENERAL_CONTROL_1_REGISTER); + phy_data &= ~(PHY_MRVL_MODE_CONFIG_MASK << PHY_MRVL_MODE_CONFIG_SHIFT); + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_GENERAL_CONTROL_1_REGISTER, phy_data); + + /* + * [15] Mode Software Reset bit, affecting pages 6 and 18 + * Reset is performed immediately, bit [15] is self-clearing. + * This reset bit is not to be confused with the actual PHY + * reset in register 0/0! + */ + phy_data |= PHY_MRVL_GENERAL_CONTROL_1_RESET_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_GENERAL_CONTROL_1_REGISTER, phy_data); + + /* Bit [15] reverts to 0 once the reset is complete. */ + while (((phy_data & PHY_MRVL_GENERAL_CONTROL_1_RESET_BIT) != 0) && + (retries++ < 10)) { + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, + dev_data->phy_addr, + PHY_MRVL_GENERAL_CONTROL_1_REGISTER); + } + if (retries == 10) { + LOG_ERR("%s configure PHY address %hhu (Marvell Alaska) timed out", + dev->name, dev_data->phy_addr); + return; + } + + /* Revert to register page 0 */ + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_PAGE_SWITCH_REGISTER, + PHY_MRVL_BASE_REGISTERS_PAGE); + } + + /* + * Configure MDIX + * TODO: Make this value configurable via KConfig or DT? + * 88E151x: Page 0, register address 16 = Copper specific control register 1, + * 88E1111: Page any, register address 16 = PHY specific control register, + * bits [6..5] = MDIO crossover mode. Comp. datasheet table 76. + * NOTICE: a change of this value requires a subsequent software + * reset command via Copper Control Register's bit [15]. + */ + + /* [6..5] 11 = Enable auto cross over detection */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_1_REGISTER); + phy_data &= ~(PHY_MRVL_MDIX_CONFIG_MASK << PHY_MRVL_MDIX_CONFIG_SHIFT); + phy_data |= (PHY_MRVL_MDIX_AUTO_CROSSOVER_ENABLE << PHY_MRVL_MDIX_CONFIG_SHIFT); + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_1_REGISTER, phy_data); + + /* + * Configure the Copper Specific Interrupt Enable Register + * (88E151x) / Interrupt Enable Register (88E1111). + * The interrupt status register provides a convenient way to + * detect relevant state changes, also, PHY management could + * eventually be changed from polling to interrupt-driven. + * There's just one big catch: at least on the Zedboard, the + * PHY interrupt line isn't wired up, therefore, the GEM can + * never trigger a PHY interrupt. Still, the PHY interrupts + * are configured & enabled in order to obtain all relevant + * status data from a single source. + * + * -> all bits contained herein will be retained during the + * upcoming software reset operation. + * Page 0, register address 18 = (Copper Specific) Interrupt + * Enable Register, + * bit [14] = Speed changed interrupt enable, + * bit [13] = Duplex changed interrupt enable, + * bit [11] = Auto-negotiation completed interrupt enable, + * bit [10] = Link status changed interrupt enable. + * Comp. datasheet table 78 + */ + phy_data = PHY_MRVL_COPPER_SPEED_CHANGED_INT_BIT | + PHY_MRVL_COPPER_DUPLEX_CHANGED_INT_BIT | + PHY_MRVL_COPPER_AUTONEG_COMPLETED_INT_BIT | + PHY_MRVL_COPPER_LINK_STATUS_CHANGED_INT_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_INT_ENABLE_REGISTER, phy_data); + + /* Trigger a PHY Reset, affecting pages 0, 2, 3, 5, 7. */ + phy_xlnx_gem_marvell_alaska_reset(dev); + + /* + * Clear the interrupt status register before advertising the + * supported link speed(s). + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_INT_STATUS_REGISTER); + + /* + * Set which link speeds and duplex modes shall be advertised during + * auto-negotiation, then re-enable auto-negotiation. PHY link speed + * advertisement configuration as described in Zynq-7000 TRM, chapter + * 16.3.4, p. 517. + */ + + /* + * Advertise the link speed from the device configuration & perform + * auto-negotiation. This process involves: + * + * Page 0, register address 4 = + * Copper Auto-Negotiation Advertisement Register, + * Page 0, register address 0 = + * Copper Control Register, bit [15] = Reset -> apply all changes + * made regarding advertisement, + * Page 0, register address 9 = + * 1000BASE-T Control Register (if link speed = 1GBit/s), + * Page 0, register address 1 = + * Copper Status Register, bit [5] = Copper Auto-Negotiation + * Complete. + * + * Comp. datasheet tables 68 & 73. + */ + + /* + * 88E151x only: + * Register 4, bits [4..0] = Selector field, 00001 = 802.3. Those bits + * are reserved in other Marvell PHYs. + */ + if ((dev_data->phy_id & PHY_MRVL_PHY_ID_MODEL_MASK) == + PHY_MRVL_PHY_ID_MODEL_88E151X) { + phy_data = PHY_MRVL_ADV_SELECTOR_802_3; + } else { + phy_data = 0x0000; + } + + /* + * Clear the 1 GBit/s FDX/HDX advertisement bits from reg. 9's current + * contents in case we're going to advertise anything below 1 GBit/s + * as maximum / nominal link speed. + */ + phy_data_gbit = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_1000BASET_CONTROL_REGISTER); + phy_data_gbit &= ~PHY_MRVL_ADV_1000BASET_FDX_BIT; + phy_data_gbit &= ~PHY_MRVL_ADV_1000BASET_HDX_BIT; + + if (dev_conf->enable_fdx) { + if (dev_conf->max_link_speed == LINK_1GBIT) { + /* Advertise 1 GBit/s, full duplex */ + phy_data_gbit |= PHY_MRVL_ADV_1000BASET_FDX_BIT; + if (dev_conf->phy_advertise_lower) { + /* + 100 MBit/s, full duplex */ + phy_data |= PHY_MRVL_ADV_100BASET_FDX_BIT; + /* + 10 MBit/s, full duplex */ + phy_data |= PHY_MRVL_ADV_10BASET_FDX_BIT; + } + } else if (dev_conf->max_link_speed == LINK_100MBIT) { + /* Advertise 100 MBit/s, full duplex */ + phy_data |= PHY_MRVL_ADV_100BASET_FDX_BIT; + if (dev_conf->phy_advertise_lower) { + /* + 10 MBit/s, full duplex */ + phy_data |= PHY_MRVL_ADV_10BASET_FDX_BIT; + } + } else if (dev_conf->max_link_speed == LINK_10MBIT) { + /* Advertise 10 MBit/s, full duplex */ + phy_data |= PHY_MRVL_ADV_10BASET_FDX_BIT; + } + } else { + if (dev_conf->max_link_speed == LINK_1GBIT) { + /* Advertise 1 GBit/s, half duplex */ + phy_data_gbit = PHY_MRVL_ADV_1000BASET_HDX_BIT; + if (dev_conf->phy_advertise_lower) { + /* + 100 MBit/s, half duplex */ + phy_data |= PHY_MRVL_ADV_100BASET_HDX_BIT; + /* + 10 MBit/s, half duplex */ + phy_data |= PHY_MRVL_ADV_10BASET_HDX_BIT; + } + } else if (dev_conf->max_link_speed == LINK_100MBIT) { + /* Advertise 100 MBit/s, half duplex */ + phy_data |= PHY_MRVL_ADV_100BASET_HDX_BIT; + if (dev_conf->phy_advertise_lower) { + /* + 10 MBit/s, half duplex */ + phy_data |= PHY_MRVL_ADV_10BASET_HDX_BIT; + } + } else if (dev_conf->max_link_speed == LINK_10MBIT) { + /* Advertise 10 MBit/s, half duplex */ + phy_data |= PHY_MRVL_ADV_10BASET_HDX_BIT; + } + } + + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_1000BASET_CONTROL_REGISTER, phy_data_gbit); + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_AUTONEG_ADV_REGISTER, phy_data); + + /* + * Trigger a PHY reset, affecting pages 0, 2, 3, 5, 7. + * Afterwards, set the auto-negotiation enable bit [12] in the + * Copper Control Register. + */ + phy_xlnx_gem_marvell_alaska_reset(dev); + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER); + phy_data |= PHY_MRVL_COPPER_CONTROL_AUTONEG_ENABLE_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_CONTROL_REGISTER, phy_data); + + /* + * Set the link speed to 'link down' for now, once auto-negotiation + * is complete, the result will be handled by the system work queue. + */ + dev_data->eff_link_speed = LINK_DOWN; +} + +/** + * @brief Marvell Alaska PHY status change polling function + * Status change polling function for the Marvell Alaska PHY series + * + * @param dev Pointer to the device data + * @return A set of bits indicating whether one or more of the following + * events has occurred: auto-negotiation completed, link state + * changed, link speed changed. + */ +static uint16_t phy_xlnx_gem_marvell_alaska_poll_sc(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + uint16_t phy_status = 0; + + /* + * PHY status change detection is implemented by reading the + * interrupt status register. + * Page 0, register address 19 = Copper Interrupt Status Register + * bit [14] = Speed changed interrupt, + * bit [13] = Duplex changed interrupt, + * bit [11] = Auto-negotiation completed interrupt, + * bit [10] = Link status changed interrupt. + * Comp. datasheet table 79 + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_INT_STATUS_REGISTER); + + if ((phy_data & PHY_MRVL_COPPER_AUTONEG_COMPLETED_INT_BIT) != 0) { + phy_status |= PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE; + } + if (((phy_data & PHY_MRVL_COPPER_DUPLEX_CHANGED_INT_BIT) != 0) || + ((phy_data & PHY_MRVL_COPPER_LINK_STATUS_CHANGED_INT_BIT) != 0)) { + phy_status |= PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED; + } + if ((phy_data & PHY_MRVL_COPPER_SPEED_CHANGED_INT_BIT) != 0) { + phy_status |= PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED; + } + + /* + * Clear the status register, preserve reserved bit [3] as indicated + * by the datasheet + */ + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_INT_STATUS_REGISTER, (phy_data & 0x8)); + + return phy_status; +} + +/** + * @brief Marvell Alaska PHY link status polling function + * Link status polling function for the Marvell Alaska PHY series + * + * @param dev Pointer to the device data + * @return 1 if the PHY indicates link up, 0 if the link is down + */ +static uint8_t phy_xlnx_gem_marvell_alaska_poll_lsts(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + + /* + * Current link status is obtained from: + * Page 0, register address 1 = Copper Status Register + * bit [2] = Copper Link Status + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_STATUS_REGISTER); + + return ((phy_data >> PHY_MRVL_COPPER_LINK_STATUS_BIT_SHIFT) & 0x0001); +} + +/** + * @brief Marvell Alaska PHY link speed polling function + * Link speed polling function for the Marvell Alaska PHY series + * + * @param dev Pointer to the device data + * @return Enum containing the current link speed reported by the PHY + */ +static enum eth_xlnx_link_speed phy_xlnx_gem_marvell_alaska_poll_lspd( + const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + enum eth_xlnx_link_speed link_speed; + uint16_t phy_data; + + /* + * Current link speed is obtained from: + * Page 0, register address 17 = Copper Specific Status Register 1 + * bits [15 .. 14] = Speed. + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_MRVL_COPPER_STATUS_1_REGISTER); + phy_data >>= PHY_MRVL_LINK_SPEED_SHIFT; + phy_data &= PHY_MRVL_LINK_SPEED_MASK; + + /* + * Link speed bit masks: comp. datasheet, table 77 @ description + * of the 'Speed' bits. + */ + switch (phy_data) { + case PHY_MRVL_LINK_SPEED_10MBIT: + link_speed = LINK_10MBIT; + break; + case PHY_MRVL_LINK_SPEED_100MBIT: + link_speed = LINK_100MBIT; + break; + case PHY_MRVL_LINK_SPEED_1GBIT: + link_speed = LINK_1GBIT; + break; + default: + link_speed = LINK_DOWN; + break; + }; + + return link_speed; +} + +/* + * Vendor-specific PHY management functions for: + * Texas Instruments TLK105 + * Texas Instruments DP83822 + * with the DP83822 being the successor to the deprecated TLK105. + * Register IDs & procedures are based on the corresponding datasheets: + * https://www.ti.com/lit/gpn/tlk105 + * https://www.ti.com/lit/gpn/dp83822i + */ + +/** + * @brief TI TLK105 & DP83822 PHY reset function + * Reset function for the TI TLK105 & DP83822 PHYs + * + * @param dev Pointer to the device data + */ +static void phy_xlnx_gem_ti_dp83822_reset(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + uint32_t retries = 0; + + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_CONTROL_REGISTER); + phy_data |= PHY_TI_BASIC_MODE_CONTROL_RESET_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_CONTROL_REGISTER, phy_data); + + while (((phy_data & PHY_TI_BASIC_MODE_CONTROL_RESET_BIT) != 0) && (retries++ < 10)) { + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_CONTROL_REGISTER); + } + if (retries == 10) { + LOG_ERR("%s reset PHY address %hhu (TI TLK105/DP83822) timed out", + dev->name, dev_data->phy_addr); + } +} + +/** + * @brief TI TLK105 & DP83822 PHY configuration function + * Configuration function for the TI TLK105 & DP83822 PHYs + * + * @param dev Pointer to the device data + */ +static void phy_xlnx_gem_ti_dp83822_cfg(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data = PHY_TI_ADV_SELECTOR_802_3; + + /* Configure link advertisement */ + if (dev_conf->enable_fdx) { + if (dev_conf->max_link_speed == LINK_100MBIT) { + /* Advertise 100BASE-TX, full duplex */ + phy_data |= PHY_TI_ADV_100BASET_FDX_BIT; + if (dev_conf->phy_advertise_lower) { + /* + 10BASE-TX, full duplex */ + phy_data |= PHY_TI_ADV_10BASET_FDX_BIT; + } + } else if (dev_conf->max_link_speed == LINK_10MBIT) { + /* Advertise 10BASE-TX, full duplex */ + phy_data |= PHY_TI_ADV_10BASET_FDX_BIT; + } + } else { + if (dev_conf->max_link_speed == LINK_100MBIT) { + /* Advertise 100BASE-TX, half duplex */ + phy_data |= PHY_TI_ADV_100BASET_HDX_BIT; + if (dev_conf->phy_advertise_lower) { + /* + 10BASE-TX, half duplex */ + phy_data |= PHY_TI_ADV_10BASET_HDX_BIT; + } + } else if (dev_conf->max_link_speed == LINK_10MBIT) { + /* Advertise 10BASE-TX, half duplex */ + phy_data |= PHY_TI_ADV_10BASET_HDX_BIT; + } + } + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_AUTONEG_ADV_REGISTER, phy_data); + + /* Enable auto-negotiation */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_CONTROL_REGISTER); + phy_data |= PHY_TI_BASIC_MODE_CONTROL_AUTONEG_ENABLE_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_CONTROL_REGISTER, phy_data); + + /* Robust Auto MDIX */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_CONTROL_REGISTER_1); + phy_data |= PHY_TI_CR1_ROBUST_AUTO_MDIX_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_CONTROL_REGISTER_1, phy_data); + + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_PHY_CONTROL_REGISTER); + /* Auto MDIX enable */ + phy_data |= PHY_TI_PHY_CONTROL_AUTO_MDIX_ENABLE_BIT; + /* Link LED shall only indicate link up or down, no RX/TX activity */ + phy_data |= PHY_TI_PHY_CONTROL_LED_CONFIG_LINK_ONLY_BIT; + /* Force MDIX disable */ + phy_data &= ~PHY_TI_PHY_CONTROL_FORCE_MDIX_BIT; + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_PHY_CONTROL_REGISTER, phy_data); + + /* Set blink rate to 5 Hz */ + phy_data = (PHY_TI_LED_CONTROL_BLINK_RATE_5HZ << + PHY_TI_LED_CONTROL_BLINK_RATE_SHIFT); + phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_LED_CONTROL_REGISTER, phy_data); + + /* + * Set the link speed to 'link down' for now, once auto-negotiation + * is complete, the result will be handled by the system work queue. + */ + dev_data->eff_link_speed = LINK_DOWN; +} + +/** + * @brief TI TLK105 & DP83822 PHY status change polling function + * Status change polling function for the TI TLK105 & DP83822 PHYs + * + * @param dev Pointer to the device data + * @return A set of bits indicating whether one or more of the following + * events has occurred: auto-negotiation completed, link state + * changed, link speed changed. + */ +static uint16_t phy_xlnx_gem_ti_dp83822_poll_sc(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + uint16_t phy_status = 0; + + /* + * The relevant status bits are obtained from the MII Interrupt + * Status Register 1. The upper byte of the register's data word + * contains the status bits which are set regardless of whether + * the corresponding interrupt enable bits are set in the lower + * byte or not (comp. TLK105 documentation, chapter 8.1.16). + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_MII_INTERRUPT_STATUS_REGISTER_1); + + if ((phy_data & PHY_TI_AUTONEG_COMPLETED_INT_BIT) != 0) { + phy_status |= PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE; + } + if ((phy_data & PHY_TI_DUPLEX_CHANGED_INT_BIT) != 0) { + phy_status |= PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED; + } + if ((phy_data & PHY_TI_LINK_STATUS_CHANGED_INT_BIT) != 0) { + phy_status |= PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED; + } + if ((phy_data & PHY_TI_SPEED_CHANGED_INT_BIT) != 0) { + phy_status |= PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED; + } + + return phy_status; +} + +/** + * @brief TI TLK105 & DP83822 PHY link status polling function + * Link status polling function for the TI TLK105 & DP83822 PHYs + * + * @param dev Pointer to the device data + * @return 1 if the PHY indicates link up, 0 if the link is down + */ +static uint8_t phy_xlnx_gem_ti_dp83822_poll_lsts(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + uint16_t phy_data; + + /* + * Double read of the BMSR is intentional - the relevant bit is latched + * low so that after a link down -> link up transition, the first read + * of the BMSR will still return the latched link down status rather + * than the current status. + */ + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_STATUS_REGISTER); + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_BASIC_MODE_STATUS_REGISTER); + + return ((phy_data & PHY_TI_BASIC_MODE_STATUS_LINK_STATUS_BIT) != 0); +} + +/** + * @brief TI TLK105 & DP83822 PHY link speed polling function + * Link speed polling function for the TI TLK105 & DP83822 PHYs + * + * @param dev Pointer to the device data + * @return Enum containing the current link speed reported by the PHY + */ +static enum eth_xlnx_link_speed phy_xlnx_gem_ti_dp83822_poll_lspd( + const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + enum eth_xlnx_link_speed link_speed; + uint16_t phy_data; + + phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, + PHY_TI_PHY_STATUS_REGISTER); + + /* PHYSCR[0] is the link established indication bit */ + if ((phy_data & PHY_TI_PHY_STATUS_LINK_BIT) != 0) { + /* PHYSCR[1] is the speed status bit: 0 = 100 Mbps, 1 = 10 Mbps. */ + if ((phy_data & PHY_TI_PHY_STATUS_SPEED_BIT) != 0) { + link_speed = LINK_10MBIT; + } else { + link_speed = LINK_100MBIT; + } + } else { + link_speed = LINK_DOWN; + } + + return link_speed; +} + +/** + * @brief Marvell Alaska PHY function pointer table + * Function pointer table for the Marvell Alaska PHY series + * specific management functions + */ +static struct phy_xlnx_gem_api phy_xlnx_gem_marvell_alaska_api = { + .phy_reset_func = phy_xlnx_gem_marvell_alaska_reset, + .phy_configure_func = phy_xlnx_gem_marvell_alaska_cfg, + .phy_poll_status_change_func = phy_xlnx_gem_marvell_alaska_poll_sc, + .phy_poll_link_status_func = phy_xlnx_gem_marvell_alaska_poll_lsts, + .phy_poll_link_speed_func = phy_xlnx_gem_marvell_alaska_poll_lspd +}; + +/** + * @brief Texas Instruments TLK105 & DP83822 PHY function pointer table + * Function pointer table for the Texas Instruments TLK105 / DP83822 PHY + * series specific management functions + */ +static struct phy_xlnx_gem_api phy_xlnx_gem_ti_dp83822_api = { + .phy_reset_func = phy_xlnx_gem_ti_dp83822_reset, + .phy_configure_func = phy_xlnx_gem_ti_dp83822_cfg, + .phy_poll_status_change_func = phy_xlnx_gem_ti_dp83822_poll_sc, + .phy_poll_link_status_func = phy_xlnx_gem_ti_dp83822_poll_lsts, + .phy_poll_link_speed_func = phy_xlnx_gem_ti_dp83822_poll_lspd +}; + +/* + * All vendor-specific API structs & code are located above + * -> assemble the top-level list of supported devices the + * upcoming function phy_xlnx_gem_detect will work with. + */ + +/** + * @brief Top-level table of supported PHYs + * Top-level table of PHYs supported by the GEM driver. Contains 1..n + * supported PHY specifications, consisting of the PHY ID plus a mask + * for masking out variable parts of the PHY ID such as hardware revisions, + * as well as a textual description of the PHY model and a pointer to + * the corresponding PHY management function pointer table. + */ +static struct phy_xlnx_gem_supported_dev phy_xlnx_gem_supported_devs[] = { + { + .phy_id = PHY_MRVL_PHY_ID_MODEL_88E1111, + .phy_id_mask = PHY_MRVL_PHY_ID_MODEL_MASK, + .api = &phy_xlnx_gem_marvell_alaska_api, + .identifier = "Marvell Alaska 88E1111" + }, + { + .phy_id = PHY_MRVL_PHY_ID_MODEL_88E151X, + .phy_id_mask = PHY_MRVL_PHY_ID_MODEL_MASK, + .api = &phy_xlnx_gem_marvell_alaska_api, + .identifier = "Marvell Alaska 88E151x" + }, + { + .phy_id = PHY_TI_PHY_ID_MODEL_DP83822, + .phy_id_mask = PHY_TI_PHY_ID_MODEL_MASK, + .api = &phy_xlnx_gem_ti_dp83822_api, + .identifier = "Texas Instruments DP83822" + }, + { + .phy_id = PHY_TI_PHY_ID_MODEL_TLK105, + .phy_id_mask = PHY_TI_PHY_ID_MODEL_MASK, + .api = &phy_xlnx_gem_ti_dp83822_api, + .identifier = "Texas Instruments TLK105" + } +}; + +/** + * @brief Top-level PHY detection function + * Top-level PHY detection function called by the GEM driver if PHY management + * is enabled for the current GEM device instance. This function is generic + * and does not require any knowledge regarding PHY vendors, models etc. + * + * @param dev Pointer to the device data + * @retval -ENOTSUP if PHY management is disabled for the current GEM + * device instance + * @retval -EIO if no (supported) PHY was detected + * @retval 0 if a supported PHY has been detected + */ +int phy_xlnx_gem_detect(const struct device *dev) +{ + const struct eth_xlnx_gem_dev_cfg *dev_conf = DEV_CFG(dev); + struct eth_xlnx_gem_dev_data *dev_data = DEV_DATA(dev); + + uint8_t phy_curr_addr; + uint8_t phy_first_addr = dev_conf->phy_mdio_addr_fix; + uint8_t phy_last_addr = (dev_conf->phy_mdio_addr_fix != 0) ? + dev_conf->phy_mdio_addr_fix : 31; + uint32_t phy_id; + uint16_t phy_data; + uint32_t list_iter; + + /* + * Clear the PHY address & ID in the device data struct -> may be + * pre-initialized with a non-zero address meaning auto detection + * is disabled. If eventually a supported PHY is found, a non- + * zero address will be written back to the data struct. + */ + dev_data->phy_addr = 0; + dev_data->phy_id = 0; + dev_data->phy_access_api = NULL; + + if (!dev_conf->init_phy) { + return -ENOTSUP; + } + + /* + * PHY detection as described in Zynq-7000 TRM, chapter 16.3.4, + * p. 517 + */ + for (phy_curr_addr = phy_first_addr; + phy_curr_addr <= phy_last_addr; + phy_curr_addr++) { + /* Read the upper & lower PHY ID 16-bit words */ + phy_data = phy_xlnx_gem_mdio_read( + dev_conf->base_addr, phy_curr_addr, + PHY_IDENTIFIER_1_REGISTER); + phy_id = (((uint32_t)phy_data << 16) & 0xFFFF0000); + phy_data = phy_xlnx_gem_mdio_read( + dev_conf->base_addr, phy_curr_addr, + PHY_IDENTIFIER_2_REGISTER); + phy_id |= ((uint32_t)phy_data & 0x0000FFFF); + + if (phy_id != 0x00000000 && phy_id != 0xFFFFFFFF) { + LOG_DBG("%s detected PHY at address %hhu: " + "ID 0x%08X", + dev->name, + phy_curr_addr, phy_id); + + /* + * Iterate the list of all supported PHYs -> if the + * current PHY is supported, store all related data + * in the device's run-time data struct. + */ + for (list_iter = 0; list_iter < ARRAY_SIZE(phy_xlnx_gem_supported_devs); + list_iter++) { + if (phy_xlnx_gem_supported_devs[list_iter].phy_id == + (phy_xlnx_gem_supported_devs[list_iter].phy_id_mask + & phy_id)) { + LOG_DBG("%s identified supported PHY: %s", + dev->name, + phy_xlnx_gem_supported_devs[list_iter].identifier); + + /* + * Store the numeric values of the PHY ID and address + * as well as the corresponding set of function pointers + * in the device's run-time data struct. + */ + dev_data->phy_addr = phy_curr_addr; + dev_data->phy_id = phy_id; + dev_data->phy_access_api = + phy_xlnx_gem_supported_devs[list_iter].api; + + return 0; + } + } + } + } + + LOG_ERR("%s PHY detection failed", dev->name); + return -EIO; +} + +/* EOF */ diff --git a/drivers/ethernet/phy_xlnx_gem.h b/drivers/ethernet/phy_xlnx_gem.h new file mode 100644 index 00000000000..017a192a1b8 --- /dev/null +++ b/drivers/ethernet/phy_xlnx_gem.h @@ -0,0 +1,157 @@ +/* + * Xilinx Processor System Gigabit Ethernet controller (GEM) driver + * + * PHY management interface and related data + * + * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _ZEPHYR_DRIVERS_ETHERNET_PHY_XLNX_GEM_H_ +#define _ZEPHYR_DRIVERS_ETHERNET_PHY_XLNX_GEM_H_ + +#include +#include + +/* Event codes used to indicate a particular state change to the driver */ +#define PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED (1 << 0) +#define PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED (1 << 1) +#define PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE (1 << 2) + +/* PHY register addresses & constants that are not vendor-specific */ +#define PHY_IDENTIFIER_1_REGISTER 2 +#define PHY_IDENTIFIER_2_REGISTER 3 + +/* PHY registers & constants -> Marvell Alaska specific */ + +/* Marvell PHY ID bits [3..0] = revision -> discard during ID check */ +#define PHY_MRVL_PHY_ID_MODEL_MASK 0xFFFFFFF0 +#define PHY_MRVL_PHY_ID_MODEL_88E1111 0x01410CC0 +#define PHY_MRVL_PHY_ID_MODEL_88E151X 0x01410DD0 + +#define PHY_MRVL_BASE_REGISTERS_PAGE 0 +#define PHY_MRVL_COPPER_CONTROL_REGISTER 0 +#define PHY_MRVL_COPPER_STATUS_REGISTER 1 +#define PHY_MRVL_COPPER_AUTONEG_ADV_REGISTER 4 +#define PHY_MRVL_COPPER_LINK_PARTNER_ABILITY_REGISTER 5 +#define PHY_MRVL_1000BASET_CONTROL_REGISTER 9 +#define PHY_MRVL_COPPER_CONTROL_1_REGISTER 16 +#define PHY_MRVL_COPPER_STATUS_1_REGISTER 17 +#define PHY_MRVL_COPPER_INT_ENABLE_REGISTER 18 +#define PHY_MRVL_COPPER_INT_STATUS_REGISTER 19 +#define PHY_MRVL_COPPER_PAGE_SWITCH_REGISTER 22 +#define PHY_MRVL_GENERAL_CONTROL_1_REGISTER 20 +#define PHY_MRVL_GENERAL_CONTROL_1_PAGE 18 + +#define PHY_MRVL_GENERAL_CONTROL_1_RESET_BIT (1 << 15) + +#define PHY_MRVL_COPPER_CONTROL_RESET_BIT (1 << 15) +#define PHY_MRVL_COPPER_CONTROL_AUTONEG_ENABLE_BIT (1 << 12) + +#define PHY_MRVL_ADV_1000BASET_FDX_BIT (1 << 9) +#define PHY_MRVL_ADV_1000BASET_HDX_BIT (1 << 8) +#define PHY_MRVL_ADV_100BASET_FDX_BIT (1 << 8) +#define PHY_MRVL_ADV_100BASET_HDX_BIT (1 << 7) +#define PHY_MRVL_ADV_10BASET_FDX_BIT (1 << 6) +#define PHY_MRVL_ADV_10BASET_HDX_BIT (1 << 5) +#define PHY_MRVL_ADV_SELECTOR_802_3 0x0001 + +#define PHY_MRVL_MDIX_CONFIG_MASK 0x0003 +#define PHY_MRVL_MDIX_CONFIG_SHIFT 5 +#define PHY_MRVL_MDIX_AUTO_CROSSOVER_ENABLE 0x0003 +#define PHY_MRVL_MODE_CONFIG_MASK 0x0003 +#define PHY_MRVL_MODE_CONFIG_SHIFT 0 + +#define PHY_MRVL_COPPER_SPEED_CHANGED_INT_BIT (1 << 14) +#define PHY_MRVL_COPPER_DUPLEX_CHANGED_INT_BIT (1 << 13) +#define PHY_MRVL_COPPER_AUTONEG_COMPLETED_INT_BIT (1 << 11) +#define PHY_MRVL_COPPER_LINK_STATUS_CHANGED_INT_BIT (1 << 10) +#define PHY_MRVL_COPPER_LINK_STATUS_BIT_SHIFT 5 + +#define PHY_MRVL_LINK_SPEED_SHIFT 14 +#define PHY_MRVL_LINK_SPEED_MASK 0x3 +#define PHY_MRVL_LINK_SPEED_10MBIT 0 +#define PHY_MRVL_LINK_SPEED_100MBIT 1 +#define PHY_MRVL_LINK_SPEED_1GBIT 2 + +/*TI TLK105 & DP83822*/ + +/* TI PHY ID bits [3..0] = revision -> discard during ID check */ +#define PHY_TI_PHY_ID_MODEL_MASK 0xFFFFFFF0 +#define PHY_TI_PHY_ID_MODEL_DP83822 0x2000A240 +#define PHY_TI_PHY_ID_MODEL_TLK105 0x2000A210 + +#define PHY_TI_PHY_SPECIFIC_CONTROL_REGISTER 0x0010 +#define PHY_TI_BASIC_MODE_CONTROL_REGISTER 0x0000 +#define PHY_TI_BASIC_MODE_STATUS_REGISTER 0x0001 +#define PHY_TI_AUTONEG_ADV_REGISTER 0x0004 +#define PHY_TI_CONTROL_REGISTER_1 0x0009 +#define PHY_TI_PHY_STATUS_REGISTER 0x0010 +#define PHY_TI_MII_INTERRUPT_STATUS_REGISTER_1 0x0012 +#define PHY_TI_LED_CONTROL_REGISTER 0x0018 +#define PHY_TI_PHY_CONTROL_REGISTER 0x0019 + +#define PHY_TI_BASIC_MODE_CONTROL_RESET_BIT (1 << 15) +#define PHY_TI_BASIC_MODE_CONTROL_AUTONEG_ENABLE_BIT (1 << 12) + +#define PHY_TI_BASIC_MODE_STATUS_LINK_STATUS_BIT (1 << 2) + +#define PHY_TI_LINK_STATUS_CHANGED_INT_BIT (1 << 13) +#define PHY_TI_SPEED_CHANGED_INT_BIT (1 << 12) +#define PHY_TI_DUPLEX_CHANGED_INT_BIT (1 << 11) +#define PHY_TI_AUTONEG_COMPLETED_INT_BIT (1 << 10) + +#define PHY_TI_ADV_SELECTOR_802_3 0x0001 +#define PHY_TI_ADV_100BASET_FDX_BIT (1 << 8) +#define PHY_TI_ADV_100BASET_HDX_BIT (1 << 7) +#define PHY_TI_ADV_10BASET_FDX_BIT (1 << 6) +#define PHY_TI_ADV_10BASET_HDX_BIT (1 << 5) + +#define PHY_TI_CR1_ROBUST_AUTO_MDIX_BIT (1 << 5) + +#define PHY_TI_PHY_CONTROL_AUTO_MDIX_ENABLE_BIT (1 << 15) +#define PHY_TI_PHY_CONTROL_FORCE_MDIX_BIT (1 << 14) +#define PHY_TI_PHY_CONTROL_LED_CONFIG_LINK_ONLY_BIT (1 << 5) + +#define PHY_TI_LED_CONTROL_BLINK_RATE_SHIFT 9 +#define PHY_TI_LED_CONTROL_BLINK_RATE_20HZ 0 +#define PHY_TI_LED_CONTROL_BLINK_RATE_10HZ 1 +#define PHY_TI_LED_CONTROL_BLINK_RATE_5HZ 2 +#define PHY_TI_LED_CONTROL_BLINK_RATE_2HZ 3 + +#define PHY_TI_PHY_STATUS_LINK_BIT (1 << 0) +#define PHY_TI_PHY_STATUS_SPEED_BIT (1 << 1) + +/** + * @brief Vendor-specific PHY management function pointer table struct + * + * Contains the PHY management function pointers for a specific PHY + * make or model. + */ +struct phy_xlnx_gem_api { + void (*phy_reset_func)(const struct device *dev); + void (*phy_configure_func)(const struct device *dev); + uint16_t (*phy_poll_status_change_func)(const struct device *dev); + uint8_t (*phy_poll_link_status_func)(const struct device *dev); + enum eth_xlnx_link_speed (*phy_poll_link_speed_func)(const struct device *dev); +}; + +/** + * @brief Supported PHY list entry struct + * + * Contains the PHY management function pointers for a specific PHY + * make or model. + */ +struct phy_xlnx_gem_supported_dev { + uint32_t phy_id; + uint32_t phy_id_mask; + struct phy_xlnx_gem_api *api; + const char *identifier; +}; + +/* PHY identification function -> generic, not vendor-specific */ +int phy_xlnx_gem_detect(const struct device *dev); + +#endif /* _ZEPHYR_DRIVERS_ETHERNET_PHY_XLNX_GEM_H_ */ + +/* EOF */ diff --git a/dts/bindings/ethernet/xlnx,gem.yaml b/dts/bindings/ethernet/xlnx,gem.yaml new file mode 100644 index 00000000000..f6dfd95c5ef --- /dev/null +++ b/dts/bindings/ethernet/xlnx,gem.yaml @@ -0,0 +1,378 @@ +# +# Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG +# SPDX-License-Identifier: Apache-2.0 +# + +description: Xilinx GEM Ethernet controller + +compatible: "xlnx,gem" + +include: ethernet.yaml + +properties: + reg: + required: true + + interrupts: + required: true + + clock-frequency: + type: int + required: true + description: | + Specifies the base clock frequency from which the GEM's TX clock + frequency will be derived using two dividers in the respective GEM's + clock control register in the CRL_APB. The GEM's TX clock frequency + is determined by the current link speed reported by the PHY, to + which it will be adjusted at run-time. Therefore, the value of this + item must be set to the clock frequency of the PLL supplying the + respective GEM's TX clock - by default, this is the IO PLL. + + mdc-divider: + type: int + required: true + description: | + The MDC clock divider for the respective GEM. This is the divider + applied to the LPD_LSBUS clock in order to derive MDIO interface + clock driving communications with the attached PHY. Refer to the + ZynqMP register documentation (ug1087), network_config (GEM) Register + Description, bits [20:18] to determine the appropriate divider for + the current target's LPD LSBUS clock frequency. + + init-mdio-phy: + type: boolean + required: false + description: | + Activates the management of a PHY associated with the controller in- + stance. If this parameter is activated at the board level, the de- + fault values of the associated parameters mdio-phy-address, phy-poll- + interval, link-speed and advertise-lower-link-speeds should be checked + and overwritten at the board level if required. + + mdio-phy-address: + type: int + required: true + description: | + The address on the MDIO bus of the PHY associated with the controller + instance. Set the address to 0 for auto-detection (first responding + PHY will be claimed by the driver, watch out in case of shared MDIO + use), or to a fixed address between 1 and 32. + + phy-poll-interval: + type: int + required: true + description: | + PHY status polling interval in milliseconds for a driver instance + managing an associated PHY. + + link-speed: + type: int + required: true + description: | + Nominal link speed. If no PHY is managed by an instance of this driver, + the respective controller will be configured to match the link speed + specified here. If a PHY is managed by the driver, advertisement of + the link speed specified here will be requested. If the optional pro- + perty advertise-lower-link-speeds is set, advertisement of the link + speed specified here plus any valid link speed below this value will + be requested. + enum: + - 1 + - 2 + - 3 + + advertise-lower-link-speeds: + type: boolean + required: false + description: | + Indicates to a driver instance which manages an associated PHY on + the MDIO bus to include link speeds lower than the nominal value + set in the link-speed property in the advertisement when requesting + link speed auto-negotiation with a peer system. + + handle-rx-in-isr: + type: boolean + required: false + description: | + Moves the handling of the frame received interrupt including the + transfer of packet data from the DMA to network packet buffers and + the subsequent propagation of the received packets to the network + stack into the context of the ISR. Due to the unpredictability of + the runtime of the ISR whenever large amounts of data are received, + handling of the RX interrupt is normally deferred to the context + of the system work queue. + + handle-tx-in-workq: + type: boolean + required: false + description: | + Moves the handling of the frame transmission done interrupt into the + context of the system work queue. By default, TX done handling is per- + formed in the context of the ISR, as it only involves a limited number + of memory accesses. This option CAN NOT be used if any component ex- + ists within the current system setup that triggers the transmission + of packets from within the context of the system work queue! + + amba-ahb-dbus-width: + type: int + required: true + description: AMBA AHB data bus width. + enum: + - 0 + - 1 + - 2 + + amba-ahb-burst-length: + type: int + required: true + description: AMBA AHB burst length for DMA operations. + enum: + - 1 + - 4 + - 8 + - 16 + + hw-rx-buffer-size: + type: int + required: true + description: | + Hardware RX buffer size, scalable between 1 kB and 8 kB, where the full + 8 kB should be the default. + enum: + - 0 + - 1 + - 2 + - 3 + + hw-rx-buffer-offset: + type: int + required: true + description: | + Data offset in the hardware RX packet buffer (in bytes). Valid range is + 0-3 bytes. + + hw-tx-buffer-size-full: + type: boolean + required: false + description: | + When set, the hardware TX data buffer will make use of the full 4 kB + that are available. If unset, the hardware TX data buffer will be + limited to 2 kB. + + rx-buffer-descriptors: + type: int + required: true + description: | + The number of descriptors to be allocated in the RX buffer descriptor + ring. Must be <= 255. + + rx-buffer-size: + type: int + required: true + description: | + The size of each receive data buffer, must be a multiple of 8, highest + valid value is 16320, values less than 64 are not really useful. + + tx-buffer-descriptors: + type: int + required: true + description: | + The number of descriptors to be allocated in the TX buffer descriptor + ring. Must be <= 255. + + tx-buffer-size: + type: int + required: true + description: | + The size of each transmit data buffer, highest valid value is 16380, + values less than 64 are not really useful. + + ignore-ipg-rxer: + type: boolean + required: false + description: | + Optional feature flag - Ignore IPG rx_er. When set, rx_er has no + effect on the GEM's operation when rx_dv is low. Set this when using + the RGMII wrapper in half-duplex mode. + + disable-reject-nsp: + type: boolean + required: false + description: | + Optional feature flag - Receive bad preamble. When set, frames with + non-standard preamble will not be rejected. + + ipg-stretch: + type: boolean + required: false + description: | + Optional feature flag - Enable IPG stretch. When set, the transmit + IPG can be increased above 96 bit times depending on the previous + frame length using the IPG stretch register. + + sgmii-mode: + type: boolean + required: false + description: | + Optional feature flag - Enable SGMII mode. Changes the behaviour of + the auto-negotiation advertisement and link partner ability registers + to meet the requirements of SGMII and reduces the duration of the link + timer from 10 ms to 1.6 ms. + + disable-reject-fcs-crc-errors: + type: boolean + required: false + description: | + Optional feature flag - Disable rejection of FCS/CRC errors. + When set, frames with FCS/CRC errors will not be rejected. FCS error + statistics will still be collected for frames with bad FCS and FCS + status will be recorded in the frame's DMA descriptor. This option + should not be activated for normal operation. + + rx-halfdup-while-tx: + type: boolean + required: false + description: | + Optional feature flag - Enable frames to be received in half-duplex + mode while transmitting. + + rx-checksum-offload: + type: boolean + required: false + description: | + Optional feature flag - Enable RX IP/TCP/UDP checksum offload to + hardware. Frames with bad IP, TCP or UDP checksums will be discarded. + This option is NOT supported by the QEMU implementation of the GEM! + + tx-checksum-offload: + type: boolean + required: false + description: | + Optional feature flag - Enable TX IP/TCP/UDP checksum offload to + hardware. This option is NOT supported by the QEMU implementation + of the GEM! + + disable-pause-copy: + type: boolean + required: false + description: | + Optional feature flag - Do not copy received pause frames to memory. + Set this option in order to prevent valid pause frames from being + copied to memory. When set, pause frames are not copied to memory + regardless of the state of the copy all frames bit, whether a hash + match is found or whether a type ID match is identified. If a desti- + nation address match is found the pause frame will be copied to + memory. Note that valid pause frames received will still increment + pause statistics and pause the transmission of frames as required. + + discard-rx-fcs: + type: boolean + required: false + description: | + Optional feature flag - Remove FCS of received frames. + When set, received frames will be written to memory without their + frame check sequence (last 4 bytes). The frame length indicated will + be reduced by four bytes in this mode. + + discard-rx-length-errors: + type: boolean + required: false + description: | + Optional feature flag - Discard frames with length field errors. + When set, frames with a measured length shorter than the extracted + length field (as indicated by bytes 13 and 14 in a non-VLAN tagged + frame) will be discarded. This only applies to frames with a length + field less than 0x0600. + + pause-frame: + type: boolean + required: false + description: | + Optional feature flag - Enable pause. When set, transmission will + pause if a non zero 802.3 classic pause frame is received and PFC + has not been negotiated. + + tbi: + type: boolean + required: false + description: | + Optional feature flag - Enable TBI. When set, the TBI interface is en- + bled instead of the GMII/MII interface. + + ext-address-match: + type: boolean + required: false + description: | + Optional feature flag - Enable external address match. When set, the + external address match interface can be used to copy frames to memory. + + long-frame-rx-support: + type: boolean + required: false + description: | + Optional feature flag - Enable reception of 1536 byte frames. + Normally, the GEM rejects any frame above 1518 bytes. + + unicast-hash: + type: boolean + required: false + description: | + Optional feature flag - Enable unicast hash. When set, unicast frames + will be accepted when the 6 bit hash function of the destination + address points to a bit that is set in the hash register. + + multicast-hash: + type: boolean + required: false + description: | + Optional feature flag - Enable multicast hash. When set, mutlicast + frames will be accepted when the 6 bit hash function of the desti- + nation address points to a bit that is set in the hash register. + + reject-broadcast: + type: boolean + required: false + description: | + Optional feature flag - Reject broadcast frames. When set, frames + addressed to the all-ones broadcast address will be rejected. + + promiscuous-mode: + type: boolean + required: false + description: | + Optional feature flag - Enable promiscuous mode. When set, all valid + frames will be accepted. + + discard-non-vlan: + type: boolean + required: false + description: Optional feature flag - Discard non-VLAN frames. When set, + only VLAN tagged frames will be passed to the address matching logic. + + full-duplex: + type: boolean + required: false + description: | + Optional feature flag - Enables full duplex reception and transmission. + + discard-rx-frame-ahb-unavail: + type: boolean + required: false + description: | + Optional feature flag - Discard received packets when no AHB resource + is available. + + ahb-packet-endian-swap: + type: boolean + required: false + description: | + Optional feature flag - Enable AHB packet data endianness swap to big + endian. If this flag is not set, data will be little endian. + + ahb-md-endian-swap: + type: boolean + required: false + description: | + Optional feature flag - Enable AHB management descriptor data endian- + ness swap to big endian. If this flag is not set, data will be little + endian. diff --git a/include/dt-bindings/ethernet/xlnx_gem.h b/include/dt-bindings/ethernet/xlnx_gem.h new file mode 100644 index 00000000000..1ebe2b3f688 --- /dev/null +++ b/include/dt-bindings/ethernet/xlnx_gem.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_DT_BINDINGS_ETHERNET_XLNX_GEM_H_ +#define ZEPHYR_INCLUDE_DT_BINDINGS_ETHERNET_XLNX_GEM_H_ + +/* PHY auto-detection alias */ +#define XLNX_GEM_PHY_AUTO_DETECT 0 + +/* MDC divider values */ +#define XLNX_GEM_MDC_DIVIDER_8 0 /* LPD_LSBUS_CLK < 20 MHz */ +#define XLNX_GEM_MDC_DIVIDER_16 1 /* LPD_LSBUS_CLK 20 - 40 MHz */ +#define XLNX_GEM_MDC_DIVIDER_32 2 /* LPD_LSBUS_CLK 40 - 80 MHz */ +/* + * According to the ZynqMP's gem.network_config register documentation, + * divider /32 is to be used for a 100 MHz LPD LSBUS clock. + */ +#define XLNX_GEM_MDC_DIVIDER_48 3 /* LPD_LSBUS_CLK 80 - 120 MHz */ + +/* Link speed values */ +#define XLNX_GEM_LINK_SPEED_10MBIT 1 +#define XLNX_GEM_LINK_SPEED_100MBIT 2 +#define XLNX_GEM_LINK_SPEED_1GBIT 3 + +/* AMBA AHB data bus width */ +#define XLNX_GEM_AMBA_AHB_DBUS_WIDTH_32BIT 0 +#define XLNX_GEM_AMBA_AHB_DBUS_WIDTH_64BIT 1 +#define XLNX_GEM_AMBA_AHB_DBUS_WIDTH_128BIT 2 + +/* AMBA AHB burst length */ +#define XLNX_GEM_AMBA_AHB_BURST_SINGLE 1 +#define XLNX_GEM_AMBA_AHB_BURST_INCR4 4 +#define XLNX_GEM_AMBA_AHB_BURST_INCR8 8 +#define XLNX_GEM_AMBA_AHB_BURST_INCR16 16 + +/* Hardware RX buffer size */ +#define XLNX_GEM_HW_RX_BUFFER_SIZE_1KB 0 +#define XLNX_GEM_HW_RX_BUFFER_SIZE_2KB 1 +#define XLNX_GEM_HW_RX_BUFFER_SIZE_4KB 2 +#define XLNX_GEM_HW_RX_BUFFER_SIZE_8KB 3 + +#endif /* ZEPHYR_INCLUDE_DT_BINDINGS_ETHERNET_XLNX_GEM_H_ */