aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdrian Chadd <adrian@FreeBSD.org>2023-04-05 04:36:52 +0000
committerAdrian Chadd <adrian@FreeBSD.org>2025-02-21 02:48:12 +0000
commit9f32893b05dabedc7f8332ec12e2a944b6543158 (patch)
tree0a2c99c953ed363febcd09dcbcafcba9af8d47b2
parent79979aa44d1b7fd5d04fd574ba8702f76c81c80c (diff)
qcom_ess_edma: Add the IPQ4018/IPQ4019 ethernet MAC/MDIO driver.
This adds the ESS EDMA driver introduced by the IPQ4018/IPQ4019. It provides a number of transmit and receive rings which can be mapped into virtual ethernet devices, which this driver supports. It's partially integrated into the ar40xx etherswitch which supplies the port and some filtering/VPN offload functionality. This driver only currently supports the per-port options which allow for the virtual ethernet driver mapping. This was written by reverse engineering the functionality of the ethernet switch and ethernet driver support provided by Qualcomm Atheros via their OpenWRT contributions. The code is all originally authored by myself. Differential Revision: https://reviews.freebsd.org/D49027
-rw-r--r--sys/arm/qualcomm/std.ipq40187
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma.c985
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h52
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c351
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h63
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c462
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h46
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c752
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h86
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h429
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c514
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h51
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c454
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h50
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_var.h258
15 files changed, 4560 insertions, 0 deletions
diff --git a/sys/arm/qualcomm/std.ipq4018 b/sys/arm/qualcomm/std.ipq4018
index e6719efb9bce..6360a3ce25db 100644
--- a/sys/arm/qualcomm/std.ipq4018
+++ b/sys/arm/qualcomm/std.ipq4018
@@ -55,3 +55,10 @@ dev/qcom_tlmm/qcom_tlmm_pinmux.c optional qcom_tlmm_ipq4018
dev/qcom_tcsr/qcom_tcsr.c optional qcom_tcsr
dev/qcom_mdio/qcom_mdio_ipq4018.c optional qcom_mdio_ipq4018
+
+dev/qcom_ess_edma/qcom_ess_edma.c optional qcom_ess_edma
+dev/qcom_ess_edma/qcom_ess_edma_desc.c optional qcom_ess_edma
+dev/qcom_ess_edma/qcom_ess_edma_gmac.c optional qcom_ess_edma
+dev/qcom_ess_edma/qcom_ess_edma_hw.c optional qcom_ess_edma
+dev/qcom_ess_edma/qcom_ess_edma_rx.c optional qcom_ess_edma
+dev/qcom_ess_edma/qcom_ess_edma_tx.c optional qcom_ess_edma
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma.c b/sys/dev/qcom_ess_edma/qcom_ess_edma.c
new file mode 100644
index 000000000000..990bfe5ee074
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma.c
@@ -0,0 +1,985 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/smp.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_gmac.h>
+
+static int
+qcom_ess_edma_probe(device_t dev)
+{
+
+ if (! ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "qcom,ess-edma") == 0)
+ return (ENXIO);
+
+ device_set_desc(dev,
+ "Qualcomm Atheros IPQ4018/IPQ4019 Ethernet driver");
+ return (0);
+}
+
+static int
+qcom_ess_edma_release_intr(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_intr *intr)
+{
+
+ if (intr->irq_res == NULL)
+ return (0);
+
+ if (intr->irq_intr != NULL)
+ bus_teardown_intr(sc->sc_dev, intr->irq_res, intr->irq_intr);
+ if (intr->irq_res != NULL)
+ bus_release_resource(sc->sc_dev, SYS_RES_IRQ, intr->irq_rid,
+ intr->irq_res);
+
+ return (0);
+}
+
+static void
+qcom_ess_edma_tx_queue_xmit(struct qcom_ess_edma_softc *sc, int queue_id)
+{
+ struct qcom_ess_edma_tx_state *txs = &sc->sc_tx_state[queue_id];
+ int n = 0;
+ int ret;
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_TASK,
+ "%s: called; TX queue %d\n", __func__, queue_id);
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[queue_id]);
+
+ sc->sc_tx_ring[queue_id].stats.num_tx_xmit_defer++;
+
+ (void) atomic_cmpset_int(&txs->enqueue_is_running, 1, 0);
+
+ /* Don't do any work if the ring is empty */
+ if (buf_ring_empty(txs->br))
+ return;
+
+ /*
+ * The ring isn't empty, dequeue frames and hand
+ * them to the hardware; defer updating the
+ * transmit ring pointer until we're done.
+ */
+ while (! buf_ring_empty(txs->br)) {
+ if_t ifp;
+ struct qcom_ess_edma_gmac *gmac;
+ struct mbuf *m;
+
+ m = buf_ring_peek_clear_sc(txs->br);
+ if (m == NULL)
+ break;
+
+ ifp = m->m_pkthdr.rcvif;
+ gmac = if_getsoftc(ifp);
+
+ /*
+ * The only way we'll know if we have space is to
+ * to try and transmit it.
+ */
+ ret = qcom_ess_edma_tx_ring_frame(sc, queue_id, &m,
+ gmac->port_mask, gmac->vlan_id);
+ if (ret == 0) {
+ if_inc_counter(gmac->ifp, IFCOUNTER_OPACKETS, 1);
+ buf_ring_advance_sc(txs->br);
+ } else {
+ /* Put whatever we tried to transmit back */
+ if_inc_counter(gmac->ifp, IFCOUNTER_OERRORS, 1);
+ buf_ring_putback_sc(txs->br, m);
+ break;
+ }
+ n++;
+ }
+
+ /*
+ * Only push the updated descriptor ring stuff to the hardware
+ * if we actually queued something.
+ */
+ if (n != 0)
+ (void) qcom_ess_edma_tx_ring_frame_update(sc, queue_id);
+}
+
+/*
+ * Enqueued when a deferred TX needs to happen.
+ */
+static void
+qcom_ess_edma_tx_queue_xmit_task(void *arg, int npending)
+{
+ struct qcom_ess_edma_tx_state *txs = arg;
+ struct qcom_ess_edma_softc *sc = txs->sc;
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: called; TX queue %d\n", __func__, txs->queue_id);
+
+ EDMA_RING_LOCK(&sc->sc_tx_ring[txs->queue_id]);
+
+ sc->sc_tx_ring[txs->queue_id].stats.num_tx_xmit_task++;
+ qcom_ess_edma_tx_queue_xmit(sc, txs->queue_id);
+
+ EDMA_RING_UNLOCK(&sc->sc_tx_ring[txs->queue_id]);
+}
+
+/*
+ * Enqueued when a TX completion interrupt occurs.
+ */
+static void
+qcom_ess_edma_tx_queue_complete_task(void *arg, int npending)
+{
+ struct qcom_ess_edma_tx_state *txs = arg;
+ struct qcom_ess_edma_softc *sc = txs->sc;
+
+ /* Transmit queue */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: called; TX queue %d\n", __func__, txs->queue_id);
+
+ EDMA_RING_LOCK(&sc->sc_tx_ring[txs->queue_id]);
+
+ /*
+ * Complete/free tx mbufs.
+ */
+ (void) qcom_ess_edma_tx_ring_complete(sc, txs->queue_id);
+
+ /*
+ * ACK the interrupt.
+ */
+ (void) qcom_ess_edma_hw_intr_tx_ack(sc, txs->queue_id);
+
+ /*
+ * Re-enable the interrupt.
+ */
+ (void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, txs->queue_id,
+ true);
+
+ /*
+ * Do any pending TX work if there's any buffers in the ring.
+ */
+ if (! buf_ring_empty(txs->br))
+ qcom_ess_edma_tx_queue_xmit(sc, txs->queue_id);
+
+ EDMA_RING_UNLOCK(&sc->sc_tx_ring[txs->queue_id]);
+}
+
+static int
+qcom_ess_edma_setup_tx_state(struct qcom_ess_edma_softc *sc, int txq, int cpu)
+{
+ struct qcom_ess_edma_tx_state *txs;
+ struct qcom_ess_edma_desc_ring *ring;
+ cpuset_t mask;
+
+ txs = &sc->sc_tx_state[txq];
+ ring = &sc->sc_tx_ring[txq];
+
+ snprintf(txs->label, QCOM_ESS_EDMA_LABEL_SZ - 1, "txq%d_compl", txq);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ txs->queue_id = txq;
+ txs->sc = sc;
+ txs->completion_tq = taskqueue_create_fast(txs->label, M_NOWAIT,
+ taskqueue_thread_enqueue, &txs->completion_tq);
+#if 0
+ taskqueue_start_threads_cpuset(&txs->completion_tq, 1, PI_NET,
+ &mask, "%s", txs->label);
+#else
+ taskqueue_start_threads(&txs->completion_tq, 1, PI_NET,
+ "%s", txs->label);
+#endif
+
+ TASK_INIT(&txs->completion_task, 0,
+ qcom_ess_edma_tx_queue_complete_task, txs);
+ TASK_INIT(&txs->xmit_task, 0,
+ qcom_ess_edma_tx_queue_xmit_task, txs);
+
+ txs->br = buf_ring_alloc(EDMA_TX_BUFRING_SIZE, M_DEVBUF, M_WAITOK,
+ &ring->mtx);
+
+ return (0);
+}
+
+/*
+ * Free the transmit ring state.
+ *
+ * This assumes that the taskqueues have been drained and DMA has
+ * stopped - all we're doing here is freeing the allocated resources.
+ */
+static int
+qcom_ess_edma_free_tx_state(struct qcom_ess_edma_softc *sc, int txq)
+{
+ struct qcom_ess_edma_tx_state *txs;
+
+ txs = &sc->sc_tx_state[txq];
+
+ taskqueue_free(txs->completion_tq);
+
+ while (! buf_ring_empty(txs->br)) {
+ struct mbuf *m;
+
+ m = buf_ring_dequeue_sc(txs->br);
+ m_freem(m);
+ }
+
+ buf_ring_free(txs->br, M_DEVBUF);
+
+ return (0);
+}
+
+static void
+qcom_ess_edma_rx_queue_complete_task(void *arg, int npending)
+{
+ struct qcom_ess_edma_rx_state *rxs = arg;
+ struct qcom_ess_edma_softc *sc = rxs->sc;
+ struct mbufq mq;
+
+ mbufq_init(&mq, EDMA_RX_RING_SIZE);
+
+ /* Receive queue */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: called; RX queue %d\n",
+ __func__, rxs->queue_id);
+
+ EDMA_RING_LOCK(&sc->sc_rx_ring[rxs->queue_id]);
+
+ /*
+ * Do receive work, get completed mbufs.
+ */
+ (void) qcom_ess_edma_rx_ring_complete(sc, rxs->queue_id, &mq);
+
+ /*
+ * ACK the interrupt.
+ */
+ (void) qcom_ess_edma_hw_intr_rx_ack(sc, rxs->queue_id);
+
+ /*
+ * Re-enable interrupt for this ring.
+ */
+ (void) qcom_ess_edma_hw_intr_rx_intr_set_enable(sc, rxs->queue_id,
+ true);
+
+ EDMA_RING_UNLOCK(&sc->sc_rx_ring[rxs->queue_id]);
+
+ /* Push frames into networking stack */
+ (void) qcom_ess_edma_gmac_receive_frames(sc, rxs->queue_id, &mq);
+}
+
+static int
+qcom_ess_edma_setup_rx_state(struct qcom_ess_edma_softc *sc, int rxq, int cpu)
+{
+ struct qcom_ess_edma_rx_state *rxs;
+ cpuset_t mask;
+
+ rxs = &sc->sc_rx_state[rxq];
+
+ snprintf(rxs->label, QCOM_ESS_EDMA_LABEL_SZ - 1, "rxq%d_compl", rxq);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ rxs->queue_id = rxq;
+ rxs->sc = sc;
+ rxs->completion_tq = taskqueue_create_fast(rxs->label, M_NOWAIT,
+ taskqueue_thread_enqueue, &rxs->completion_tq);
+#if 0
+ taskqueue_start_threads_cpuset(&rxs->completion_tq, 1, PI_NET,
+ &mask, "%s", rxs->label);
+#else
+ taskqueue_start_threads(&rxs->completion_tq, 1, PI_NET,
+ "%s", rxs->label);
+#endif
+
+ TASK_INIT(&rxs->completion_task, 0,
+ qcom_ess_edma_rx_queue_complete_task, rxs);
+ return (0);
+}
+
+/*
+ * Free the receive ring state.
+ *
+ * This assumes that the taskqueues have been drained and DMA has
+ * stopped - all we're doing here is freeing the allocated resources.
+ */
+
+static int
+qcom_ess_edma_free_rx_state(struct qcom_ess_edma_softc *sc, int rxq)
+{
+ struct qcom_ess_edma_rx_state *rxs;
+
+ rxs = &sc->sc_rx_state[rxq];
+
+ taskqueue_free(rxs->completion_tq);
+
+ return (0);
+}
+
+
+static int
+qcom_ess_edma_detach(device_t dev)
+{
+ struct qcom_ess_edma_softc *sc = device_get_softc(dev);
+ int i;
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ (void) qcom_ess_edma_release_intr(sc, &sc->sc_tx_irq[i]);
+ }
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ (void) qcom_ess_edma_release_intr(sc, &sc->sc_rx_irq[i]);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ (void) qcom_ess_edma_free_tx_state(sc, i);
+ (void) qcom_ess_edma_tx_ring_clean(sc, &sc->sc_rx_ring[i]);
+ (void) qcom_ess_edma_desc_ring_free(sc, &sc->sc_tx_ring[i]);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ (void) qcom_ess_edma_free_rx_state(sc, i);
+ (void) qcom_ess_edma_rx_ring_clean(sc, &sc->sc_rx_ring[i]);
+ (void) qcom_ess_edma_desc_ring_free(sc, &sc->sc_rx_ring[i]);
+ }
+
+ if (sc->sc_dma_tag) {
+ bus_dma_tag_destroy(sc->sc_dma_tag);
+ sc->sc_dma_tag = NULL;
+ }
+
+ if (sc->sc_mem_res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
+ sc->sc_mem_res);
+ mtx_destroy(&sc->sc_mtx);
+
+ return(0);
+}
+
+static int
+qcom_ess_edma_filter(void *arg)
+{
+ struct qcom_ess_edma_intr *intr = arg;
+ struct qcom_ess_edma_softc *sc = intr->sc;
+
+ if (intr->irq_rid < QCOM_ESS_EDMA_NUM_TX_IRQS) {
+ int tx_queue = intr->irq_rid;
+
+ intr->stats.num_intr++;
+
+ /*
+ * Disable the interrupt for this ring.
+ */
+ (void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, tx_queue,
+ false);
+
+ /*
+ * Schedule taskqueue to run for this queue.
+ */
+ taskqueue_enqueue(sc->sc_tx_state[tx_queue].completion_tq,
+ &sc->sc_tx_state[tx_queue].completion_task);
+
+ return (FILTER_HANDLED);
+ } else {
+ int rx_queue = intr->irq_rid - QCOM_ESS_EDMA_NUM_TX_IRQS;
+
+ intr->stats.num_intr++;
+
+ /*
+ * Disable the interrupt for this ring.
+ */
+ (void) qcom_ess_edma_hw_intr_rx_intr_set_enable(sc, rx_queue,
+ false);
+
+ /*
+ * Schedule taskqueue to run for this queue.
+ */
+ taskqueue_enqueue(sc->sc_rx_state[rx_queue].completion_tq,
+ &sc->sc_rx_state[rx_queue].completion_task);
+
+ return (FILTER_HANDLED);
+ }
+}
+
+static int
+qcom_ess_edma_setup_intr(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_intr *intr, int rid, int cpu_id)
+{
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: setting up interrupt id %d\n", __func__, rid);
+ intr->sc = sc;
+ intr->irq_rid = rid;
+ intr->irq_res = bus_alloc_resource_any(sc->sc_dev,
+ SYS_RES_IRQ, &intr->irq_rid, RF_ACTIVE);
+ if (intr->irq_res == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: couldn't allocate IRQ %d\n",
+ rid);
+ return (ENXIO);
+ }
+
+ if ((bus_setup_intr(sc->sc_dev, intr->irq_res,
+ INTR_TYPE_NET | INTR_MPSAFE,
+ qcom_ess_edma_filter, NULL, intr,
+ &intr->irq_intr))) {
+ device_printf(sc->sc_dev,
+ "ERROR: unable to register interrupt handler for"
+ " IRQ %d\n", rid);
+ return (ENXIO);
+ }
+
+ /* If requested, bind the interrupt to the given CPU. */
+ if (cpu_id != -1) {
+ if (intr_bind_irq(sc->sc_dev, intr->irq_res, cpu_id) != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: unable to bind IRQ %d to CPU %d\n",
+ rid, cpu_id);
+ }
+ /* Note: don't completely error out here */
+ }
+
+ return (0);
+}
+
+static int
+qcom_ess_edma_sysctl_dump_state(SYSCTL_HANDLER_ARGS)
+{
+ struct qcom_ess_edma_softc *sc = arg1;
+ int val = 0;
+ int error;
+ int i;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (val == 0)
+ return (0);
+
+ EDMA_LOCK(sc);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "RXQ[%d]: prod=%u, cons=%u, hw prod=%u, hw cons=%u,"
+ " REG_SW_CONS_IDX=0x%08x\n",
+ i,
+ sc->sc_rx_ring[i].next_to_fill,
+ sc->sc_rx_ring[i].next_to_clean,
+ EDMA_REG_READ(sc,
+ EDMA_REG_RFD_IDX_Q(i)) & EDMA_RFD_PROD_IDX_BITS,
+ qcom_ess_edma_hw_rfd_get_cons_index(sc, i),
+ EDMA_REG_READ(sc, EDMA_REG_RX_SW_CONS_IDX_Q(i)));
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "TXQ[%d]: prod=%u, cons=%u, hw prod=%u, hw cons=%u\n",
+ i,
+ sc->sc_tx_ring[i].next_to_fill,
+ sc->sc_tx_ring[i].next_to_clean,
+ (EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(i))
+ >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK,
+ EDMA_REG_READ(sc, EDMA_REG_TX_SW_CONS_IDX_Q(i)));
+ }
+
+ device_printf(sc->sc_dev, "EDMA_REG_TXQ_CTRL=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL));
+ device_printf(sc->sc_dev, "EDMA_REG_RXQ_CTRL=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL));
+ device_printf(sc->sc_dev, "EDMA_REG_RX_DESC0=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RX_DESC0));
+ device_printf(sc->sc_dev, "EDMA_REG_RX_DESC1=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RX_DESC1));
+ device_printf(sc->sc_dev, "EDMA_REG_RX_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RX_ISR));
+ device_printf(sc->sc_dev, "EDMA_REG_TX_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_TX_ISR));
+ device_printf(sc->sc_dev, "EDMA_REG_MISC_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_MISC_ISR));
+ device_printf(sc->sc_dev, "EDMA_REG_WOL_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_WOL_ISR));
+
+ EDMA_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+qcom_ess_edma_sysctl_dump_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct qcom_ess_edma_softc *sc = arg1;
+ int val = 0;
+ int error;
+ int i;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (val == 0)
+ return (0);
+
+ EDMA_LOCK(sc);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "RXQ[%d]: num_added=%llu, num_cleaned=%llu,"
+ " num_dropped=%llu, num_enqueue_full=%llu,"
+ " num_rx_no_gmac=%llu, tx_mapfail=%llu,"
+ " num_tx_maxfrags=%llu, num_rx_ok=%llu\n",
+ i,
+ sc->sc_rx_ring[i].stats.num_added,
+ sc->sc_rx_ring[i].stats.num_cleaned,
+ sc->sc_rx_ring[i].stats.num_dropped,
+ sc->sc_rx_ring[i].stats.num_enqueue_full,
+ sc->sc_rx_ring[i].stats.num_rx_no_gmac,
+ sc->sc_rx_ring[i].stats.num_tx_mapfail,
+ sc->sc_rx_ring[i].stats.num_tx_maxfrags,
+ sc->sc_rx_ring[i].stats.num_rx_ok);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "TXQ[%d]: num_added=%llu, num_cleaned=%llu,"
+ " num_dropped=%llu, num_enqueue_full=%llu,"
+ " tx_mapfail=%llu, tx_complete=%llu, tx_xmit_defer=%llu,"
+ " tx_xmit_task=%llu, num_tx_maxfrags=%llu,"
+ " num_tx_ok=%llu\n",
+ i,
+ sc->sc_tx_ring[i].stats.num_added,
+ sc->sc_tx_ring[i].stats.num_cleaned,
+ sc->sc_tx_ring[i].stats.num_dropped,
+ sc->sc_tx_ring[i].stats.num_enqueue_full,
+ sc->sc_tx_ring[i].stats.num_tx_mapfail,
+ sc->sc_tx_ring[i].stats.num_tx_complete,
+ sc->sc_tx_ring[i].stats.num_tx_xmit_defer,
+ sc->sc_tx_ring[i].stats.num_tx_xmit_task,
+ sc->sc_tx_ring[i].stats.num_tx_maxfrags,
+ sc->sc_tx_ring[i].stats.num_tx_ok);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ device_printf(sc->sc_dev, "INTR_RXQ[%d]: num_intr=%llu\n",
+ i,
+ sc->sc_rx_irq[i].stats.num_intr);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ device_printf(sc->sc_dev, "INTR_TXQ[%d]: num_intr=%llu\n",
+ i,
+ sc->sc_tx_irq[i].stats.num_intr);
+ }
+
+ EDMA_UNLOCK(sc);
+
+ return (0);
+}
+
+
+static int
+qcom_ess_edma_sysctl_tx_intmit(SYSCTL_HANDLER_ARGS)
+{
+ struct qcom_ess_edma_softc *sc = arg1;
+ uint32_t usec;
+ int val = 0;
+ int error;
+
+ EDMA_LOCK(sc);
+ (void) qcom_ess_edma_hw_get_tx_intr_moderation(sc, &usec);
+ EDMA_UNLOCK(sc);
+
+ val = usec;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ goto finish;
+
+ EDMA_LOCK(sc);
+ error = qcom_ess_edma_hw_set_tx_intr_moderation(sc, (uint32_t) val);
+ EDMA_UNLOCK(sc);
+finish:
+ return error;
+}
+
+
+static int
+qcom_ess_edma_attach_sysctl(struct qcom_ess_edma_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
+
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "debug", CTLFLAG_RW, &sc->sc_debug, 0,
+ "debugging flags");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "state", CTLTYPE_INT | CTLFLAG_RW, sc,
+ 0, qcom_ess_edma_sysctl_dump_state, "I", "");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "stats", CTLTYPE_INT | CTLFLAG_RW, sc,
+ 0, qcom_ess_edma_sysctl_dump_stats, "I", "");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "tx_intmit", CTLTYPE_INT | CTLFLAG_RW, sc,
+ 0, qcom_ess_edma_sysctl_tx_intmit, "I", "");
+
+ return (0);
+}
+
+static int
+qcom_ess_edma_attach(device_t dev)
+{
+ struct qcom_ess_edma_softc *sc = device_get_softc(dev);
+ int i, ret;
+
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->sc_dev = dev;
+ sc->sc_debug = 0;
+
+ (void) qcom_ess_edma_attach_sysctl(sc);
+
+ /* Create parent DMA tag. */
+ ret = bus_dma_tag_create(
+ bus_get_dma_tag(sc->sc_dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_dma_tag);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to create parent DMA tag\n");
+ goto error;
+ }
+
+ /* Map control/status registers. */
+ sc->sc_mem_rid = 0;
+ sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_mem_rid, RF_ACTIVE);
+
+ if (sc->sc_mem_res == NULL) {
+ device_printf(dev, "ERROR: couldn't map MMIO space\n");
+ goto error;
+ }
+
+ sc->sc_mem_res_size = (size_t) bus_get_resource_count(dev,
+ SYS_RES_MEMORY, sc->sc_mem_rid);
+ if (sc->sc_mem_res_size == 0) {
+ device_printf(dev, "%s: failed to get device memory size\n",
+ __func__);
+ goto error;
+ }
+
+ /*
+ * How many TX queues per CPU, for figuring out flowid/CPU
+ * mapping.
+ */
+ sc->sc_config.num_tx_queue_per_cpu =
+ QCOM_ESS_EDMA_NUM_TX_RINGS / mp_ncpus;
+
+ /* Allocate TX IRQs */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ int cpu_id;
+
+ /*
+ * The current mapping in the if_transmit() path
+ * will map mp_ncpu groups of flowids to the TXQs.
+ * So for a 4 CPU system the first four will be CPU 0,
+ * the second four will be CPU 1, etc.
+ */
+ cpu_id = qcom_ess_edma_tx_queue_to_cpu(sc, i);
+ if (qcom_ess_edma_setup_intr(sc, &sc->sc_tx_irq[i],
+ i, cpu_id) != 0)
+ goto error;
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "mapping TX IRQ %d to CPU %d\n",
+ i, cpu_id);
+ }
+
+ /* Allocate RX IRQs */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ int cpu_id = qcom_ess_edma_rx_queue_to_cpu(sc, i);
+ if (qcom_ess_edma_setup_intr(sc, &sc->sc_rx_irq[i],
+ i + QCOM_ESS_EDMA_NUM_TX_IRQS, cpu_id) != 0)
+ goto error;
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "mapping RX IRQ %d to CPU %d\n",
+ i, cpu_id);
+ }
+
+ /* Default receive frame size - before ETHER_ALIGN hack */
+ sc->sc_config.rx_buf_size = 2048;
+ sc->sc_config.rx_buf_ether_align = true;
+
+ /* Default RSS paramters */
+ sc->sc_config.rss_type =
+ EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP
+ | EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP
+ | EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
+
+ /* Default queue parameters */
+ sc->sc_config.tx_ring_count = EDMA_TX_RING_SIZE;
+ sc->sc_config.rx_ring_count = EDMA_RX_RING_SIZE;
+
+ /* Default interrupt masks */
+ sc->sc_config.rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
+ sc->sc_config.tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
+ sc->sc_state.misc_intr_mask = 0;
+ sc->sc_state.wol_intr_mask = 0;
+ sc->sc_state.intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
+
+ /*
+ * Parse out the gmac count so we can start parsing out
+ * the gmac list and create us some ifnets.
+ */
+ if (OF_getencprop(ofw_bus_get_node(dev), "qcom,num_gmac",
+ &sc->sc_config.num_gmac, sizeof(uint32_t)) > 0) {
+ device_printf(sc->sc_dev, "Creating %d GMACs\n",
+ sc->sc_config.num_gmac);
+ } else {
+ device_printf(sc->sc_dev, "Defaulting to 1 GMAC\n");
+ sc->sc_config.num_gmac = 1;
+ }
+ if (sc->sc_config.num_gmac > QCOM_ESS_EDMA_MAX_NUM_GMACS) {
+ device_printf(sc->sc_dev, "Capping GMACs to %d\n",
+ QCOM_ESS_EDMA_MAX_NUM_GMACS);
+ sc->sc_config.num_gmac = QCOM_ESS_EDMA_MAX_NUM_GMACS;
+ }
+
+ /*
+ * And now, create some gmac entries here; we'll create the
+ * ifnet's once this is all done.
+ */
+ for (i = 0; i < sc->sc_config.num_gmac; i++) {
+ ret = qcom_ess_edma_gmac_parse(sc, i);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to parse gmac%d\n", i);
+ goto error;
+ }
+ }
+
+ /* allocate tx rings */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int cpu_id;
+
+ snprintf(label, QCOM_ESS_EDMA_LABEL_SZ - 1, "tx_ring%d", i);
+ if (qcom_ess_edma_desc_ring_setup(sc, &sc->sc_tx_ring[i],
+ label,
+ sc->sc_config.tx_ring_count,
+ sizeof(struct qcom_ess_edma_sw_desc_tx),
+ sizeof(struct qcom_ess_edma_tx_desc),
+ QCOM_ESS_EDMA_MAX_TXFRAGS,
+ ESS_EDMA_TX_BUFFER_ALIGN) != 0)
+ goto error;
+ if (qcom_ess_edma_tx_ring_setup(sc, &sc->sc_tx_ring[i]) != 0)
+ goto error;
+
+ /* Same CPU as the interrupts for now */
+ cpu_id = qcom_ess_edma_tx_queue_to_cpu(sc, i);
+
+ if (qcom_ess_edma_setup_tx_state(sc, i, cpu_id) != 0)
+ goto error;
+ }
+
+ /* allocate rx rings */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int cpu_id;
+
+ snprintf(label, QCOM_ESS_EDMA_LABEL_SZ - 1, "rx_ring%d", i);
+ if (qcom_ess_edma_desc_ring_setup(sc, &sc->sc_rx_ring[i],
+ label,
+ sc->sc_config.rx_ring_count,
+ sizeof(struct qcom_ess_edma_sw_desc_rx),
+ sizeof(struct qcom_ess_edma_rx_free_desc),
+ 1,
+ ESS_EDMA_RX_BUFFER_ALIGN) != 0)
+ goto error;
+ if (qcom_ess_edma_rx_ring_setup(sc, &sc->sc_rx_ring[i]) != 0)
+ goto error;
+
+ /* Same CPU as the interrupts for now */
+ cpu_id = qcom_ess_edma_rx_queue_to_cpu(sc, i);
+
+ if (qcom_ess_edma_setup_rx_state(sc, i, cpu_id) != 0)
+ goto error;
+ }
+
+ /*
+ * map the gmac instances <-> port masks, so incoming frames know
+ * where they need to be forwarded to.
+ */
+ for (i = 0; i < QCOM_ESS_EDMA_MAX_NUM_PORTS; i++)
+ sc->sc_gmac_port_map[i] = -1;
+ for (i = 0; i < sc->sc_config.num_gmac; i++) {
+ ret = qcom_ess_edma_gmac_setup_port_mapping(sc, i);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to setup port mpapping for gmac%d\n", i);
+ goto error;
+ }
+ }
+
+
+ /* Create ifnets */
+ for (i = 0; i < sc->sc_config.num_gmac; i++) {
+ ret = qcom_ess_edma_gmac_create_ifnet(sc, i);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to create ifnet for gmac%d\n", i);
+ goto error;
+ }
+ }
+
+ /*
+ * NOTE: If there's no ess-switch / we're a single phy, we
+ * still need to reset the ess fabric to a fixed useful state.
+ * Otherwise we won't be able to pass packets to anything.
+ *
+ * Worry about this later.
+ */
+
+ EDMA_LOCK(sc);
+
+ /* disable all interrupts */
+ ret = qcom_ess_edma_hw_intr_disable(sc);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to disable interrupts (%d)\n",
+ ret);
+ goto error_locked;
+ }
+
+ /* reset edma */
+ ret = qcom_ess_edma_hw_stop(sc);
+
+ /* fill RX ring here, explicitly */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ EDMA_RING_LOCK(&sc->sc_rx_ring[i]);
+ (void) qcom_ess_edma_rx_ring_fill(sc, i,
+ sc->sc_config.rx_ring_count);
+ EDMA_RING_UNLOCK(&sc->sc_rx_ring[i]);
+ }
+
+ /* configure TX/RX rings; RSS config; initial interrupt rates, etc */
+ ret = qcom_ess_edma_hw_setup(sc);
+ ret = qcom_ess_edma_hw_setup_tx(sc);
+ ret = qcom_ess_edma_hw_setup_rx(sc);
+ ret = qcom_ess_edma_hw_setup_txrx_desc_rings(sc);
+
+ /* setup rss indirection table */
+ ret = qcom_ess_edma_hw_configure_rss_table(sc);
+
+ /* setup load balancing table */
+ ret = qcom_ess_edma_hw_configure_load_balance_table(sc);
+
+ /* configure virtual queue */
+ ret = qcom_ess_edma_hw_configure_tx_virtual_queue(sc);
+
+ /* configure AXI burst max */
+ ret = qcom_ess_edma_hw_configure_default_axi_transaction_size(sc);
+
+ /* enable IRQs */
+ ret = qcom_ess_edma_hw_intr_enable(sc);
+
+ /* enable TX control */
+ ret = qcom_ess_edma_hw_tx_enable(sc);
+
+ /* enable RX control */
+ ret = qcom_ess_edma_hw_rx_enable(sc);
+
+ EDMA_UNLOCK(sc);
+
+ return (0);
+
+error_locked:
+ EDMA_UNLOCK(sc);
+error:
+ qcom_ess_edma_detach(dev);
+ return (ENXIO);
+}
+
+static device_method_t qcom_ess_edma_methods[] = {
+ /* Driver */
+ DEVMETHOD(device_probe, qcom_ess_edma_probe),
+ DEVMETHOD(device_attach, qcom_ess_edma_attach),
+ DEVMETHOD(device_detach, qcom_ess_edma_detach),
+
+ {0, 0},
+};
+
+static driver_t qcom_ess_edma_driver = {
+ "essedma",
+ qcom_ess_edma_methods,
+ sizeof(struct qcom_ess_edma_softc),
+};
+
+DRIVER_MODULE(qcom_ess_edma, simplebus, qcom_ess_edma_driver, NULL, 0);
+DRIVER_MODULE(qcom_ess_edma, ofwbus, qcom_ess_edma_driver, NULL, 0);
+MODULE_DEPEND(qcom_ess_edma, ether, 1, 1, 1);
+MODULE_VERSION(qcom_ess_edma, 1);
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h
new file mode 100644
index 000000000000..325f03f12cb0
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __QCOM_ESS_EDMA_DEBUG_H__
+#define __QCOM_ESS_EDMA_DEBUG_H__
+
+#define QCOM_ESS_EDMA_DBG_INTERRUPT 0x00000001
+#define QCOM_ESS_EDMA_DBG_DESCRIPTOR_SETUP 0x00000002
+#define QCOM_ESS_EDMA_DBG_RX_RING_MGMT 0x00000004
+#define QCOM_ESS_EDMA_DBG_TX_RING_MGMT 0x00000008
+#define QCOM_ESS_EDMA_DBG_RX_FRAME 0x00000010
+#define QCOM_ESS_EDMA_DBG_RX_RING 0x00000020
+#define QCOM_ESS_EDMA_DBG_TX_FRAME 0x00000040
+#define QCOM_ESS_EDMA_DBG_TX_RING 0x00000080
+#define QCOM_ESS_EDMA_DBG_TX_RING_COMPLETE 0x00000100
+#define QCOM_ESS_EDMA_DBG_TX_TASK 0x00000200
+#define QCOM_ESS_EDMA_DBG_TX_FRAME_ERROR 0x00000400
+#define QCOM_ESS_EDMA_DBG_STATE 0x00000800
+
+#define QCOM_ESS_EDMA_DPRINTF(sc, flags, ...) \
+ do { \
+ if ((sc)->sc_debug & (flags)) \
+ device_printf((sc)->sc_dev, __VA_ARGS__); \
+ } while (0)
+
+#endif /* __QCOM_ESS_EDMA_DEBUG_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c
new file mode 100644
index 000000000000..11ce74137c32
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c
@@ -0,0 +1,351 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+static void
+qcom_ess_edma_desc_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
+ int error)
+{
+ if (error != 0)
+ return;
+ KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+/*
+ * Initialise the given descriptor ring.
+ */
+int
+qcom_ess_edma_desc_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ char *label,
+ int count,
+ int sw_desc_size,
+ int hw_desc_size,
+ int num_segments,
+ int buffer_align)
+{
+ int error;
+ int hw_ring_size;
+
+ ring->label = strdup(label, M_TEMP);
+ if (ring->label == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to strdup label\n");
+ error = ENOMEM;
+ goto error;
+ }
+
+ mtx_init(&ring->mtx, ring->label, NULL, MTX_DEF);
+
+ hw_ring_size = count * hw_desc_size;
+
+ /*
+ * Round the hardware ring size up to a cacheline
+ * so we don't end up with partial cacheline sizes
+ * causing bounce buffers to be used.
+ */
+ hw_ring_size = ((hw_ring_size + PAGE_SIZE) / PAGE_SIZE) * PAGE_SIZE;
+
+ /*
+ * For now set it to 4 byte alignment, no max size.
+ */
+ ring->ring_align = EDMA_DESC_RING_ALIGN;
+ error = bus_dma_tag_create(
+ sc->sc_dma_tag, /* parent */
+ EDMA_DESC_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ hw_ring_size, /* maxsize */
+ 1, /* nsegments */
+ hw_ring_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ring->hw_ring_dma_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to create descriptor DMA tag (%d)\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * Buffer ring - used passed in value
+ */
+ ring->buffer_align = buffer_align;
+ error = bus_dma_tag_create(
+ sc->sc_dma_tag, /* parent */
+ buffer_align, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ EDMA_DESC_MAX_BUFFER_SIZE * num_segments, /* maxsize */
+ num_segments, /* nsegments */
+ EDMA_DESC_MAX_BUFFER_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ring->buffer_dma_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to create buffer DMA tag (%d)\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * Allocate software descriptors
+ */
+ ring->sw_desc = mallocarray(count, sw_desc_size, M_TEMP,
+ M_NOWAIT | M_ZERO);
+ if (ring->sw_desc == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to allocate sw_desc\n");
+ goto error;
+ }
+
+ /*
+ * Allocate hardware descriptors, initialise map, get
+ * physical address.
+ */
+ error = bus_dmamem_alloc(ring->hw_ring_dma_tag,
+ (void **)&ring->hw_desc,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
+ &ring->hw_desc_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to allocate DMA'able memory for hw_desc ring\n");
+ goto error;
+ }
+ ring->hw_desc_paddr = 0;
+ error = bus_dmamap_load(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ ring->hw_desc, hw_ring_size, qcom_ess_edma_desc_map_addr,
+ &ring->hw_desc_paddr, BUS_DMA_NOWAIT);
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_PREWRITE);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_DESCRIPTOR_SETUP,
+ "%s: PADDR=0x%08lx\n", __func__, ring->hw_desc_paddr);
+
+ /*
+ * All done, initialise state.
+ */
+ ring->hw_entry_size = hw_desc_size;
+ ring->sw_entry_size = sw_desc_size;
+ ring->ring_count = count;
+
+ return (0);
+error:
+ mtx_destroy(&ring->mtx);
+ if (ring->label != NULL)
+ free(ring->label, M_TEMP);
+ if (ring->hw_desc != NULL) {
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->hw_ring_dma_tag, ring->hw_desc_map);
+ bus_dmamem_free(ring->hw_ring_dma_tag, ring->hw_desc,
+ ring->hw_desc_map);
+ ring->hw_desc = NULL;
+ }
+ if (ring->sw_desc != NULL) {
+ free(ring->sw_desc, M_TEMP);
+ ring->sw_desc = NULL;
+ }
+ if (ring->hw_ring_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->hw_ring_dma_tag);
+ ring->hw_ring_dma_tag = NULL;
+ }
+ if (ring->buffer_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->buffer_dma_tag);
+ ring->buffer_dma_tag = NULL;
+ }
+
+ return (error);
+}
+
+/*
+ * Free/clean the given descriptor ring.
+ *
+ * The ring itself right now is static; so we don't free it.
+ * We just free the resources it has.
+ */
+int
+qcom_ess_edma_desc_ring_free(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+
+ mtx_destroy(&ring->mtx);
+ if (ring->label != NULL)
+ free(ring->label, M_TEMP);
+
+ if (ring->hw_desc != NULL) {
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->hw_ring_dma_tag, ring->hw_desc_map);
+ bus_dmamem_free(ring->hw_ring_dma_tag, ring->hw_desc,
+ ring->hw_desc_map);
+ ring->hw_desc = NULL;
+ }
+
+ if (ring->sw_desc != NULL) {
+ free(ring->sw_desc, M_TEMP);
+ ring->sw_desc = NULL;
+ }
+
+ if (ring->hw_ring_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->hw_ring_dma_tag);
+ ring->hw_ring_dma_tag = NULL;
+ }
+ if (ring->buffer_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->buffer_dma_tag);
+ ring->buffer_dma_tag = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * Fetch the given software descriptor pointer by index.
+ *
+ * Returns NULL if the index is out of bounds.
+ */
+void *
+qcom_ess_edma_desc_ring_get_sw_desc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, uint16_t index)
+{
+ char *p;
+
+ if (index >= ring->ring_count)
+ return (NULL);
+
+ p = (char *) ring->sw_desc;
+
+ return (void *) (p + (ring->sw_entry_size * index));
+}
+
+/*
+ * Fetch the given hardware descriptor pointer by index.
+ *
+ * Returns NULL if the index is out of bounds.
+ */
+void *
+qcom_ess_edma_desc_ring_get_hw_desc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, uint16_t index)
+{
+ char *p;
+
+ if (index >= ring->ring_count)
+ return (NULL);
+
+ p = (char *) ring->hw_desc;
+
+ return (void *) (p + (ring->hw_entry_size * index));
+}
+
+/*
+ * Flush the hardware ring after a write, before the hardware
+ * gets to it.
+ */
+int
+qcom_ess_edma_desc_ring_flush_preupdate(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+
+/*
+ * Flush the hardware ring after the hardware writes into it,
+ * before a read.
+ */
+int
+qcom_ess_edma_desc_ring_flush_postupdate(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ return (0);
+}
+
+/*
+ * Get how many descriptor slots are available.
+ */
+int
+qcom_ess_edma_desc_ring_get_num_available(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ uint16_t sw_next_to_fill;
+ uint16_t sw_next_to_clean;
+ uint16_t count = 0;
+
+ sw_next_to_clean = ring->next_to_clean;
+ sw_next_to_fill = ring->next_to_fill;
+
+ if (sw_next_to_clean <= sw_next_to_fill)
+ count = ring->ring_count;
+
+ return (count + sw_next_to_clean - sw_next_to_fill - 1);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h
new file mode 100644
index 000000000000..b7213d94da5d
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h
@@ -0,0 +1,63 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_DESC_H__
+#define __QCOM_ESS_EDMA_DESC_H__
+
+extern int qcom_ess_edma_desc_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ char *label,
+ int count,
+ int sw_desc_size,
+ int hw_desc_size,
+ int num_segments,
+ int buffer_alignment);
+extern int qcom_ess_edma_desc_ring_free(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern void * qcom_ess_edma_desc_ring_get_sw_desc(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ uint16_t index);
+extern void * qcom_ess_edma_desc_ring_get_hw_desc(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ uint16_t index);
+extern int qcom_ess_edma_desc_ring_flush_preupdate(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_desc_ring_flush_postupdate(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_desc_ring_get_num_available(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+
+#endif /* __QCOM_ESS_EDMA_DESC_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c
new file mode 100644
index 000000000000..6510dcc74ca2
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c
@@ -0,0 +1,462 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/gpio.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/smp.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_vlan_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+#include <net/if_types.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include <dev/gpio/gpiobusvar.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_gmac.h>
+
+static int
+qcom_ess_edma_gmac_mediachange(if_t ifp)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+ struct ifmedia *ifm = &gmac->ifm;
+ struct ifmedia_entry *ife = ifm->ifm_cur;
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
+ device_printf(sc->sc_dev,
+ "AUTO is not supported this MAC");
+ return (EINVAL);
+ }
+
+ /*
+ * Ignore everything
+ */
+ return (0);
+}
+
+static void
+qcom_ess_edma_gmac_mediastatus(if_t ifp, struct ifmediareq *ifmr)
+{
+
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
+}
+
+static int
+qcom_ess_edma_gmac_ioctl(if_t ifp, u_long command, caddr_t data)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error, mask;
+
+ switch (command) {
+ case SIOCSIFFLAGS:
+ if ((if_getflags(ifp) & IFF_UP) != 0) {
+ /* up */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_STATE,
+ "%s: gmac%d: IFF_UP\n",
+ __func__,
+ gmac->id);
+ if_setdrvflagbits(ifp, IFF_DRV_RUNNING,
+ IFF_DRV_OACTIVE);
+ if_link_state_change(ifp, LINK_STATE_UP);
+
+ } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
+ /* down */
+ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_STATE,
+ "%s: gmac%d: IF down\n",
+ __func__,
+ gmac->id);
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ }
+ error = 0;
+ break;
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &gmac->ifm, command);
+ break;
+ case SIOCSIFCAP:
+ mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
+ error = 0;
+
+ if ((mask & IFCAP_RXCSUM) != 0 &&
+ (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
+ if_togglecapenable(ifp, IFCAP_RXCSUM);
+
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
+ if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
+
+ VLAN_CAPABILITIES(ifp);
+ break;
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+qcom_ess_edma_gmac_init(void *arg)
+{
+ struct qcom_ess_edma_gmac *gmac = arg;
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_STATE,
+ "%s: gmac%d: called\n",
+ __func__,
+ gmac->id);
+
+ if_setdrvflagbits(gmac->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
+ if_link_state_change(gmac->ifp, LINK_STATE_UP);
+}
+
+static int
+qcom_ess_edma_gmac_transmit(if_t ifp, struct mbuf *m)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+ struct qcom_ess_edma_tx_state *txs;
+ int ret;
+ int q;
+
+ /* Make sure our CPU doesn't change whilst we're running */
+ sched_pin();
+
+ /*
+ * Map flowid / curcpu to a given transmit queue.
+ *
+ * Since we're running on a platform with either two
+ * or four CPUs, we want to distribute the load to a set
+ * of TX queues that won't clash with any other CPU TX queue
+ * use.
+ */
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+ /* Map flowid to a queue */
+ q = m->m_pkthdr.flowid % sc->sc_config.num_tx_queue_per_cpu;
+
+ /* Now, map the queue to a set of unique queues per CPU */
+ q = q << (mp_ncpus * curcpu);
+
+ /* And ensure we're not overflowing */
+ q = q % QCOM_ESS_EDMA_NUM_TX_RINGS;
+ } else {
+ /*
+ * Use the first TXQ in each CPU group, so we don't
+ * hit lock contention with traffic that has flowids.
+ */
+ q = (mp_ncpus * curcpu) % QCOM_ESS_EDMA_NUM_TX_RINGS;
+ }
+
+ /* Attempt to enqueue in the buf_ring. */
+ /*
+ * XXX TODO: maybe move this into *tx.c so gmac.c doesn't
+ * need to reach into the tx_state stuff?
+ */
+ txs = &sc->sc_tx_state[q];
+
+ /* XXX TODO: add an mbuf tag instead? for the transmit gmac/ifp ? */
+ m->m_pkthdr.rcvif = ifp;
+
+ ret = buf_ring_enqueue(txs->br, m);
+
+ if (ret == 0) {
+ if (atomic_cmpset_int(&txs->enqueue_is_running, 0, 1) == 1) {
+ taskqueue_enqueue(txs->completion_tq, &txs->xmit_task);
+ }
+ }
+
+ sched_unpin();
+
+ /* Don't consume mbuf; if_transmit caller will if needed */
+ return (ret);
+}
+
+static void
+qcom_ess_edma_gmac_qflush(if_t ifp)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+
+ /* XXX TODO */
+ device_printf(sc->sc_dev, "%s: gmac%d: called\n",
+ __func__,
+ gmac->id);
+
+ /*
+ * Flushing the ifnet would, sigh, require walking each buf_ring
+ * and then removing /only/ the entries matching that ifnet.
+ * Which is a complete pain to do right now.
+ */
+}
+
+int
+qcom_ess_edma_gmac_parse(struct qcom_ess_edma_softc *sc, int gmac_id)
+{
+ struct qcom_ess_edma_gmac *gmac;
+ char gmac_name[10];
+ uint32_t vlan_tag[2];
+ phandle_t p;
+ int len;
+
+ sprintf(gmac_name, "gmac%d", gmac_id);
+
+ gmac = &sc->sc_gmac[gmac_id];
+
+ /* Find our sub-device */
+ p = ofw_bus_find_child(ofw_bus_get_node(sc->sc_dev), gmac_name);
+ if (p <= 0) {
+ device_printf(sc->sc_dev,
+ "%s: couldn't find %s\n", __func__,
+ gmac_name);
+ return (ENOENT);
+ }
+
+ /* local-mac-address */
+ len = OF_getprop(p, "local-mac-address", (void *) &gmac->eaddr,
+ sizeof(struct ether_addr));
+ if (len != sizeof(struct ether_addr)) {
+ device_printf(sc->sc_dev,
+ "gmac%d: Couldn't parse local-mac-address\n",
+ gmac_id);
+ memset(&gmac->eaddr, 0, sizeof(gmac->eaddr));
+ }
+
+ /* vlan-tag - <id portmask> tuple */
+ len = OF_getproplen(p, "vlan_tag");
+ if (len != sizeof(vlan_tag)) {
+ device_printf(sc->sc_dev,
+ "gmac%d: no vlan_tag field or invalid size/values\n",
+ gmac_id);
+ return (EINVAL);
+ }
+ len = OF_getencprop(p, "vlan_tag", (void *) &vlan_tag,
+ sizeof(vlan_tag));
+ if (len != sizeof(vlan_tag)) {
+ device_printf(sc->sc_dev,
+ "gmac%d: couldn't parse vlan_tag field\n", gmac_id);
+ return (EINVAL);
+ }
+
+ /*
+ * Setup the given gmac entry.
+ */
+ gmac->sc = sc;
+ gmac->id = gmac_id;
+ gmac->enabled = true;
+ gmac->vlan_id = vlan_tag[0];
+ gmac->port_mask = vlan_tag[1];
+
+ device_printf(sc->sc_dev,
+ "gmac%d: MAC=%6D, vlan id=%d, port_mask=0x%04x\n",
+ gmac_id,
+ &gmac->eaddr, ":",
+ gmac->vlan_id,
+ gmac->port_mask);
+
+ return (0);
+}
+
+int
+qcom_ess_edma_gmac_create_ifnet(struct qcom_ess_edma_softc *sc, int gmac_id)
+{
+ struct qcom_ess_edma_gmac *gmac;
+ char gmac_name[10];
+
+ sprintf(gmac_name, "gmac%d", gmac_id);
+
+ gmac = &sc->sc_gmac[gmac_id];
+
+ /* Skip non-setup gmacs */
+ if (gmac->enabled == false)
+ return (0);
+
+ gmac->ifp = if_alloc(IFT_ETHER);
+ if (gmac->ifp == NULL) {
+ device_printf(sc->sc_dev, "gmac%d: couldn't allocate ifnet\n",
+ gmac_id);
+ return (ENOSPC);
+ }
+
+ if_setsoftc(gmac->ifp, gmac);
+
+ if_initname(gmac->ifp, "gmac", gmac_id);
+
+ if (ETHER_IS_ZERO(gmac->eaddr.octet)) {
+ device_printf(sc->sc_dev, "gmac%d: generating random MAC\n",
+ gmac_id);
+ ether_gen_addr(gmac->ifp, (void *) &gmac->eaddr.octet);
+ }
+
+ if_setflags(gmac->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+
+ if_setioctlfn(gmac->ifp, qcom_ess_edma_gmac_ioctl);
+ if_setinitfn(gmac->ifp, qcom_ess_edma_gmac_init);
+ if_settransmitfn(gmac->ifp, qcom_ess_edma_gmac_transmit);
+ if_setqflushfn(gmac->ifp, qcom_ess_edma_gmac_qflush);
+
+ if_setcapabilitiesbit(gmac->ifp, IFCAP_VLAN_MTU |
+ IFCAP_VLAN_HWTAGGING, 0);
+
+ if_setcapabilitiesbit(gmac->ifp, IFCAP_RXCSUM, 0);
+
+ /* CSUM_TCP | CSUM_UDP for TX checksum offload */
+ if_clearhwassist(gmac->ifp);
+
+ /* Configure a hard-coded media */
+ ifmedia_init(&gmac->ifm, 0, qcom_ess_edma_gmac_mediachange,
+ qcom_ess_edma_gmac_mediastatus);
+ ifmedia_add(&gmac->ifm, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+ ifmedia_set(&gmac->ifm, IFM_ETHER | IFM_1000_T | IFM_FDX);
+
+ ether_ifattach(gmac->ifp, (char *) &gmac->eaddr);
+
+ if_setcapenable(gmac->ifp, if_getcapabilities(gmac->ifp));
+
+ return (0);
+}
+
+/*
+ * Setup the port mapping for the given GMAC.
+ *
+ * This populates sc->sc_gmac_port_map[] to point the given port
+ * entry to this gmac index. The receive path code can then use
+ * this to figure out which gmac ifp to push a receive frame into.
+ */
+int
+qcom_ess_edma_gmac_setup_port_mapping(struct qcom_ess_edma_softc *sc,
+ int gmac_id)
+{
+ struct qcom_ess_edma_gmac *gmac;
+ int i;
+
+ gmac = &sc->sc_gmac[gmac_id];
+
+ /* Skip non-setup gmacs */
+ if (gmac->enabled == false)
+ return (0);
+
+ for (i = 0; i < QCOM_ESS_EDMA_MAX_NUM_PORTS; i++) {
+ if ((gmac->port_mask & (1U << i)) == 0)
+ continue;
+ if (sc->sc_gmac_port_map[i] != -1) {
+ device_printf(sc->sc_dev,
+ "DUPLICATE GMAC port map (port %d)\n",
+ i);
+ return (ENXIO);
+ }
+
+ sc->sc_gmac_port_map[i] = gmac_id;
+
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "ESS port %d maps to gmac%d\n",
+ i, gmac_id);
+ }
+
+ return (0);
+}
+
+/*
+ * Receive frames to into the network stack.
+ *
+ * This takes a list of mbufs in the mbufq and receives them
+ * up into the appopriate ifnet context. It takes care of
+ * the network epoch as well.
+ *
+ * This must be called with no locks held.
+ */
+int
+qcom_ess_edma_gmac_receive_frames(struct qcom_ess_edma_softc *sc,
+ int rx_queue, struct mbufq *mq)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ struct epoch_tracker et;
+ struct mbuf *m;
+ if_t ifp;
+
+ ring = &sc->sc_rx_ring[rx_queue];
+
+ NET_EPOCH_ENTER(et);
+ while ((m = mbufq_dequeue(mq)) != NULL) {
+ if (m->m_pkthdr.rcvif == NULL) {
+ ring->stats.num_rx_no_gmac++;
+ m_freem(m);
+ } else {
+ ring->stats.num_rx_ok++;
+ ifp = m->m_pkthdr.rcvif;
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+ if_input(ifp, m);
+ }
+ }
+ NET_EPOCH_EXIT(et);
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h
new file mode 100644
index 000000000000..48862d058d99
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h
@@ -0,0 +1,46 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_GMAC_H__
+#define __QCOM_ESS_EDMA_GMAC_H__
+
+extern int qcom_ess_edma_gmac_parse(struct qcom_ess_edma_softc *sc,
+ int gmac_id);
+extern int qcom_ess_edma_gmac_create_ifnet(struct qcom_ess_edma_softc *sc,
+ int gmac_id);
+extern int qcom_ess_edma_gmac_setup_port_mapping(
+ struct qcom_ess_edma_softc *sc, int gmac_id);
+
+extern int qcom_ess_edma_gmac_receive_frames(struct qcom_ess_edma_softc *sc,
+ int rx_queue,
+ struct mbufq *mq);
+
+#endif /* __QCOM_ESS_EDMA_GMAC_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
new file mode 100644
index 000000000000..1ba11db248e5
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
@@ -0,0 +1,752 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+/*
+ * Reset the ESS EDMA core.
+ *
+ * This is ... problematic. There's only a single clock control
+ * for the ESS core - and that includes both the EDMA (ethernet)
+ * and switch hardware.
+ *
+ * AND, it's a placeholder for what the linux ess-edma driver
+ * is doing directly to the ess core because in some instances
+ * where there's a single PHY hooked up, it's possible that
+ * ess-switch won't be initialised. In that case it defaults
+ * to a very minimal switch config. Now, that's honestly pretty
+ * bad, and instead we should be doing that kind of awareness
+ * in ar40xx_switch.
+ *
+ * So, for now this is a big no-op, at least until everything
+ * is implemented enough that I can get the switch/phy code and
+ * this EDMA driver code to co-exist.
+ */
+int
+qcom_ess_edma_hw_reset(struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_LOCK_ASSERT(sc);
+
+ device_printf(sc->sc_dev, "%s: called, TODO!\n", __func__);
+
+ /*
+ * This is where the linux ess-edma driver would reset the
+ * ESS core.
+ */
+
+ /*
+ * and here's where the linux ess-edma driver would program
+ * in the initial port config, rgmii control, traffic
+ * port forwarding and broadcast/multicast traffic forwarding.
+ *
+ * instead, this should be done by the ar40xx_switch driver!
+ */
+
+ return (0);
+}
+
+/*
+ * Get the TX interrupt moderation timer.
+ *
+ * The resolution of this register is 2uS.
+ */
+int
+qcom_ess_edma_hw_get_tx_intr_moderation(struct qcom_ess_edma_softc *sc,
+ uint32_t *usec)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
+ reg = reg >> EDMA_IRQ_MODRT_TX_TIMER_SHIFT;
+ reg &= EDMA_IRQ_MODRT_TIMER_MASK;
+
+ *usec = reg * 2;
+
+ return (0);
+}
+
+
+/*
+ * Set the TX interrupt moderation timer.
+ *
+ * The resolution of this register is 2uS.
+ */
+int
+qcom_ess_edma_hw_set_tx_intr_moderation(struct qcom_ess_edma_softc *sc,
+ uint32_t usec)
+{
+ uint32_t reg;
+
+ usec = usec / 2;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
+ reg &= ~(EDMA_IRQ_MODRT_TIMER_MASK << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
+ reg |= (usec & EDMA_IRQ_MODRT_TIMER_MASK)
+ << EDMA_IRQ_MODRT_TX_TIMER_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Set the RX interrupt moderation timer.
+ *
+ * The resolution of this register is 2uS.
+ */
+int
+qcom_ess_edma_hw_set_rx_intr_moderation(struct qcom_ess_edma_softc *sc,
+ uint32_t usec)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
+ reg &= ~(EDMA_IRQ_MODRT_TIMER_MASK << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
+ reg |= (usec & EDMA_IRQ_MODRT_TIMER_MASK)
+ << EDMA_IRQ_MODRT_RX_TIMER_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Disable all interrupts.
+ */
+int
+qcom_ess_edma_hw_intr_disable(struct qcom_ess_edma_softc *sc)
+{
+ int i;
+
+ /* Disable TX interrupts */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(i), 0);
+ }
+
+ /* Disable RX interrupts */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(i), 0);
+ }
+
+ /* Disable misc/WOL interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_MISC_IMR, 0);
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_IMR, 0);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable/disable the given RX ring interrupt.
+ */
+int
+qcom_ess_edma_hw_intr_rx_intr_set_enable(struct qcom_ess_edma_softc *sc,
+ int rxq, bool state)
+{
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(rxq), state ? 1 : 0);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable/disable the given TX ring interrupt.
+ */
+int
+qcom_ess_edma_hw_intr_tx_intr_set_enable(struct qcom_ess_edma_softc *sc,
+ int txq, bool state)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(txq), state ? 1 : 0);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable interrupts.
+ */
+int
+qcom_ess_edma_hw_intr_enable(struct qcom_ess_edma_softc *sc)
+{
+ int i;
+
+ /* ACK, then Enable TX interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, 0xffff);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(i),
+ sc->sc_config.tx_intr_mask);
+ }
+
+ /* ACK, then Enable RX interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, 0xff);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(i),
+ sc->sc_config.rx_intr_mask);
+ }
+
+ /* Disable misc/WOL interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_MISC_IMR, 0);
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_IMR, 0);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Clear interrupt status.
+ */
+int
+qcom_ess_edma_hw_intr_status_clear(struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, 0xff);
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, 0xffff);
+ EDMA_REG_WRITE(sc, EDMA_REG_MISC_ISR, 0x1fff);
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_ISR, 0x1);
+
+ return (0);
+}
+
+/*
+ * ACK the given RX queue ISR.
+ *
+ * Must be called with the RX ring lock held!
+ */
+int
+qcom_ess_edma_hw_intr_rx_ack(struct qcom_ess_edma_softc *sc, int rx_queue)
+{
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[rx_queue]);
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, (1U << rx_queue));
+ (void) EDMA_REG_READ(sc, EDMA_REG_RX_ISR);
+
+ return (0);
+}
+
+/*
+ * ACK the given TX queue ISR.
+ *
+ * Must be called with the TX ring lock held!
+ */
+int
+qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc *sc, int tx_queue)
+{
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[tx_queue]);
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, (1U << tx_queue));
+ (void) EDMA_REG_READ(sc, EDMA_REG_TX_ISR);
+
+ return (0);
+}
+
+/*
+ * Configure the default RSS indirection table.
+ */
+int
+qcom_ess_edma_hw_configure_rss_table(struct qcom_ess_edma_softc *sc)
+{
+ int i;
+
+ /*
+ * The default IDT value configures the hash buckets
+ * to a repeating pattern of q0, q2, q4, q6.
+ */
+ for (i = 0; i < EDMA_NUM_IDT; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
+ }
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Configure the default load balance mapping table.
+ */
+int
+qcom_ess_edma_hw_configure_load_balance_table(struct qcom_ess_edma_softc *sc)
+{
+
+ /*
+ * I think this is mapping things to queues 0,2,4,6.
+ * Linux says it's 0,1,3,4 but that doesn't match the
+ * EDMA_LB_REG_VALUE field.
+ */
+ EDMA_REG_WRITE(sc, EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * Configure the default virtual tx ring queues.
+ */
+int
+qcom_ess_edma_hw_configure_tx_virtual_queue(struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
+ EDMA_REG_WRITE(sc, EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * Configure the default maximum AXI bus transaction size.
+ */
+int
+qcom_ess_edma_hw_configure_default_axi_transaction_size(
+ struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_AXIW_CTRL_MAXWRSIZE,
+ EDMA_AXIW_MAXWRSIZE_VALUE);
+ return (0);
+}
+
+/*
+ * Stop the TX/RX queues.
+ */
+int
+qcom_ess_edma_hw_stop_txrx_queues(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL);
+ reg &= ~EDMA_RXQ_CTRL_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL);
+ reg &= ~EDMA_TXQ_CTRL_TXQ_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * Stop the EDMA block, disable interrupts.
+ */
+int
+qcom_ess_edma_hw_stop(struct qcom_ess_edma_softc *sc)
+{
+ int ret;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ ret = qcom_ess_edma_hw_intr_disable(sc);
+ if (ret != 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: hw_intr_disable failed (%d)\n",
+ __func__,
+ ret);
+ }
+
+ ret = qcom_ess_edma_hw_intr_status_clear(sc);
+ if (ret != 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: hw_intr_status_clear failed (%d)\n",
+ __func__,
+ ret);
+ }
+
+ ret = qcom_ess_edma_hw_stop_txrx_queues(sc);
+ if (ret != 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: hw_stop_txrx_queues failed (%d)\n",
+ __func__,
+ ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Update the producer index for the given receive queue.
+ *
+ * Note: the RX ring lock must be held!
+ *
+ * Return 0 if OK, an error number if there's an error.
+ */
+int
+qcom_ess_edma_hw_rfd_prod_index_update(struct qcom_ess_edma_softc *sc,
+ int queue, int idx)
+{
+ uint32_t reg;
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "%s: called; q=%d idx=0x%x\n",
+ __func__, queue, idx);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "%s: q=%d reg was 0x%08x\n", __func__, queue, reg);
+ reg &= ~EDMA_RFD_PROD_IDX_BITS;
+ reg |= idx;
+ EDMA_REG_WRITE(sc, EDMA_REG_RFD_IDX_Q(queue), reg);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "%s: q=%d reg now 0x%08x\n", __func__, queue, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Fetch the consumer index for the given receive queue.
+ *
+ * Returns the current consumer index.
+ *
+ * Note - since it's used in statistics/debugging it isn't asserting the
+ * RX ring lock, so be careful when/how you use this!
+ */
+int
+qcom_ess_edma_hw_rfd_get_cons_index(struct qcom_ess_edma_softc *sc, int queue)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
+ return (reg >> EDMA_RFD_CONS_IDX_SHIFT) & EDMA_RFD_CONS_IDX_MASK;
+}
+
+/*
+ * Update the software consumed index to the hardware, so
+ * it knows what we've read.
+ *
+ * Note: the RX ring lock must be held when calling this!
+ *
+ * Returns 0 if OK, error number if error.
+ */
+int
+qcom_ess_edma_hw_rfd_sw_cons_index_update(struct qcom_ess_edma_softc *sc,
+ int queue, int idx)
+{
+ EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
+
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_SW_CONS_IDX_Q(queue), idx);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Setup initial hardware configuration.
+ */
+int
+qcom_ess_edma_hw_setup(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_INTR_CTRL);
+ reg &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
+ reg |= sc->sc_state.intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_INTR_CTRL, reg);
+
+
+ /* Clear wake-on-lan config */
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_CTRL, 0);
+
+ /* configure initial interrupt moderation config */
+ reg = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
+ reg |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
+ EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Setup TX DMA burst configuration.
+ */
+int
+qcom_ess_edma_hw_setup_tx(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ reg = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
+ reg |= EDMA_TXQ_CTRL_TPD_BURST_EN;
+ reg |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
+ EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Setup default RSS, RX burst/prefetch/interrupt thresholds.
+ *
+ * Strip VLANs, those are offloaded in the RX descriptor.
+ */
+int
+qcom_ess_edma_hw_setup_rx(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ /* Configure RSS types */
+ EDMA_REG_WRITE(sc, EDMA_REG_RSS_TYPE, sc->sc_config.rss_type);
+
+ /* Configure RFD burst */
+ reg = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
+ /* .. and RFD prefetch threshold */
+ reg |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
+ /* ... and threshold to generate RFD interrupt */
+ reg |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_DESC1, reg);
+
+ /* Set RX FIFO threshold to begin DMAing data to host */
+ reg = EDMA_FIFO_THRESH_128_BYTE;
+ /* Remove VLANs (??) */
+ reg |= EDMA_RXQ_CTRL_RMV_VLAN;
+ EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * XXX TODO: this particular routine is a bit big and likely should be split
+ * across main, hw, desc, rx and tx. But to expedite initial bring-up,
+ * let's just commit the sins here and get receive up and going.
+ */
+int
+qcom_ess_edma_hw_setup_txrx_desc_rings(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg, i, idx;
+ int len;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ /*
+ * setup base addresses for each transmit ring, and
+ * read in the initial index to use for transmit.
+ */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ /* Descriptor ring based address */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING_MGMT,
+ "TXQ[%d]: ring paddr=0x%08lx\n",
+ i, sc->sc_tx_ring[i].hw_desc_paddr);
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_BASE_ADDR_Q(i),
+ sc->sc_tx_ring[i].hw_desc_paddr);
+
+ /* And now, grab the consumer index */
+ reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(i));
+ idx = (reg >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
+
+ sc->sc_tx_ring[i].next_to_fill = idx;
+ sc->sc_tx_ring[i].next_to_clean = idx;
+
+ /* Update prod and sw consumer indexes */
+ reg &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
+ reg |= idx;
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_IDX_Q(i), reg);
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_SW_CONS_IDX_Q(i), idx);
+
+ /* Set the ring size */
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_RING_SIZE,
+ sc->sc_config.tx_ring_count & EDMA_TPD_RING_SIZE_MASK);
+
+ }
+
+ /* Set base addresses for each RFD ring */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "RXQ[%d]: ring paddr=0x%08lx\n",
+ i, sc->sc_rx_ring[i].hw_desc_paddr);
+ EDMA_REG_WRITE(sc, EDMA_REG_RFD_BASE_ADDR_Q(i),
+ sc->sc_rx_ring[i].hw_desc_paddr);
+ }
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ /* Configure RX buffer size */
+ len = sc->sc_config.rx_buf_size;
+ if (sc->sc_config.rx_buf_ether_align)
+ len -= ETHER_ALIGN;
+ reg = (len & EDMA_RX_BUF_SIZE_MASK)
+ << EDMA_RX_BUF_SIZE_SHIFT;
+ /* .. and RFD ring size */
+ reg |= (sc->sc_config.rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
+ << EDMA_RFD_RING_SIZE_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_DESC0, reg);
+
+ /* Disable the TX low/high watermark (for interrupts?) */
+ EDMA_REG_WRITE(sc, EDMA_REG_TXF_WATER_MARK, 0);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ /* Load all the ring base addresses into the hardware */
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TX_SRAM_PART);
+ reg |= 1 << EDMA_LOAD_PTR_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_SRAM_PART, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable general MAC TX DMA.
+ */
+int
+qcom_ess_edma_hw_tx_enable(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL);
+ reg |= EDMA_TXQ_CTRL_TXQ_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable general MAC RX DMA.
+ */
+int
+qcom_ess_edma_hw_rx_enable(struct qcom_ess_edma_softc *sc)
+{
+ EDMA_LOCK_ASSERT(sc);
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL);
+ reg |= EDMA_RXQ_CTRL_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Read the TPD consumer index register for the given transmit ring.
+ */
+int
+qcom_ess_edma_hw_tx_read_tpd_cons_idx(struct qcom_ess_edma_softc *sc,
+ int queue_id, uint16_t *idx)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(queue_id));
+ *idx = (reg >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
+
+ return (0);
+}
+
+/*
+ * Update the TPD producer index for the given transmit wring.
+ */
+int
+qcom_ess_edma_hw_tx_update_tpd_prod_idx(struct qcom_ess_edma_softc *sc,
+ int queue_id, uint16_t idx)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(queue_id));
+ reg &= ~EDMA_TPD_PROD_IDX_BITS;
+ reg |= (idx & EDMA_TPD_PROD_IDX_MASK) << EDMA_TPD_PROD_IDX_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_IDX_Q(queue_id), reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Update the TPD software consumer index register for the given
+ * transmit ring - ie, what software has cleaned.
+ */
+int
+qcom_ess_edma_hw_tx_update_cons_idx(struct qcom_ess_edma_softc *sc,
+ int queue_id, uint16_t idx)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), idx);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
new file mode 100644
index 000000000000..3ee3bc64b658
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
@@ -0,0 +1,86 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_HW_H__
+#define __QCOM_ESS_EDMA_HW_H__
+
+extern int qcom_ess_edma_hw_reset(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_get_tx_intr_moderation(
+ struct qcom_ess_edma_softc *sc, uint32_t *usec);
+extern int qcom_ess_edma_hw_set_tx_intr_moderation(
+ struct qcom_ess_edma_softc *sc, uint32_t usec);
+extern int qcom_ess_edma_hw_set_rx_intr_moderation(
+ struct qcom_ess_edma_softc *sc, uint32_t usec);
+extern int qcom_ess_edma_hw_intr_disable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_intr_rx_intr_set_enable(
+ struct qcom_ess_edma_softc *sc, int rxq, bool state);
+extern int qcom_ess_edma_hw_intr_tx_intr_set_enable(
+ struct qcom_ess_edma_softc *sc, int txq, bool state);
+extern int qcom_ess_edma_hw_intr_enable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_intr_status_clear(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_intr_rx_ack(struct qcom_ess_edma_softc *sc,
+ int rx_queue);
+extern int qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc *sc,
+ int tx_queue);
+extern int qcom_ess_edma_hw_configure_rss_table(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_configure_load_balance_table(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_configure_tx_virtual_queue(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_configure_default_axi_transaction_size(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_stop_txrx_queues(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_stop(struct qcom_ess_edma_softc *sc);
+
+extern int qcom_ess_edma_hw_rfd_prod_index_update(
+ struct qcom_ess_edma_softc *sc, int queue, int idx);
+extern int qcom_ess_edma_hw_rfd_get_cons_index(
+ struct qcom_ess_edma_softc *sc, int queue);
+extern int qcom_ess_edma_hw_rfd_sw_cons_index_update(
+ struct qcom_ess_edma_softc *sc, int queue, int idx);
+
+extern int qcom_ess_edma_hw_setup(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_setup_tx(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_setup_rx(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_setup_txrx_desc_rings(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_tx_enable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_rx_enable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_tx_read_tpd_cons_idx(
+ struct qcom_ess_edma_softc *sc, int queue_id, uint16_t *idx);
+extern int qcom_ess_edma_hw_tx_update_tpd_prod_idx(
+ struct qcom_ess_edma_softc *sc, int queue_id, uint16_t idx);
+extern int qcom_ess_edma_hw_tx_update_cons_idx(
+ struct qcom_ess_edma_softc *sc, int queue_id, uint16_t idx);
+
+#endif /* __QCOM_ESS_EDMA_VAR_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h
new file mode 100644
index 000000000000..0fa1e37f7e5b
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h
@@ -0,0 +1,429 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ */
+
+#ifndef __QCOM_ESS_EDMA_REG_H__
+#define __QCOM_ESS_EDMA_REG_H__
+
+/*
+ * Alignment of descriptor ring memory allocation.
+ */
+#define EDMA_DESC_RING_ALIGN PAGE_SIZE
+
+/* Not sure if this is really valid or not */
+#define EDMA_DESC_MAX_BUFFER_SIZE 4096
+
+/* The hardware can accept both of these, so we don't need bounce buffers! */
+#define ESS_EDMA_TX_BUFFER_ALIGN 1
+#define ESS_EDMA_RX_BUFFER_ALIGN 1
+
+/* register definition */
+#define EDMA_REG_MAS_CTRL 0x0
+#define EDMA_REG_TIMEOUT_CTRL 0x004
+#define EDMA_REG_DBG0 0x008
+#define EDMA_REG_DBG1 0x00C
+#define EDMA_REG_SW_CTRL0 0x100
+#define EDMA_REG_SW_CTRL1 0x104
+
+/* Interrupt Status Register */
+#define EDMA_REG_RX_ISR 0x200
+#define EDMA_REG_TX_ISR 0x208
+#define EDMA_REG_MISC_ISR 0x210
+#define EDMA_REG_WOL_ISR 0x218
+
+#define EDMA_MISC_ISR_RX_URG_Q(x) (1U << (x)x)
+
+#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
+#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
+#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
+#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
+#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
+
+#define EDMA_WOL_ISR 0x00000001
+
+/* Interrupt Mask Register */
+#define EDMA_REG_MISC_IMR 0x214
+#define EDMA_REG_WOL_IMR 0x218
+
+#define EDMA_RX_IMR_NORMAL_MASK 0x1
+#define EDMA_TX_IMR_NORMAL_MASK 0x1
+#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
+#define EDMA_WOL_IMR_NORMAL_MASK 0x1
+
+/* Edma receive consumer index */
+#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
+/* Edma transmit consumer index */
+#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
+
+/* IRQ Moderator Initial Timer Register */
+#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
+#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
+#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
+#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
+
+/* Interrupt Control Register */
+#define EDMA_REG_INTR_CTRL 0x284
+#define EDMA_INTR_CLR_TYP_SHIFT 0
+#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
+#define EDMA_INTR_CLEAR_TYPE_W1 0
+#define EDMA_INTR_CLEAR_TYPE_R 1
+
+/* RX Interrupt Mask Register */
+#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
+
+/* TX Interrupt mask register */
+#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
+
+/* Load Ptr Register
+ * Software sets this bit after the initialization of the head and tail
+ */
+#define EDMA_REG_TX_SRAM_PART 0x400
+#define EDMA_LOAD_PTR_SHIFT 16
+
+/* TXQ Control Register */
+#define EDMA_REG_TXQ_CTRL 0x404
+#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
+#define EDMA_TXQ_CTRL_TXQ_EN 0x20
+#define EDMA_TXQ_CTRL_ENH_MODE 0x40
+#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
+#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
+#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
+#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
+#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
+#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
+#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
+
+#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
+#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
+#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
+#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
+#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
+
+/* WRR Control Register */
+#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
+#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
+#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
+#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
+
+/* Weight round robin(WRR), it takes queue as input, and computes
+ * starting bits where we need to write the weight for a particular
+ * queue
+ */
+#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
+
+/* Tx Descriptor Control Register */
+#define EDMA_REG_TPD_RING_SIZE 0x41C
+#define EDMA_TPD_RING_SIZE_SHIFT 0
+#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
+
+/* Transmit descriptor base address */
+#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
+
+/* TPD Index Register */
+#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
+
+#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
+#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
+#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
+#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
+#define EDMA_TPD_PROD_IDX_SHIFT 0
+#define EDMA_TPD_CONS_IDX_SHIFT 16
+
+/* TX Virtual Queue Mapping Control Register */
+#define EDMA_REG_VQ_CTRL0 0x4A0
+#define EDMA_REG_VQ_CTRL1 0x4A4
+
+/* Virtual QID shift, it takes queue as input, and computes
+ * Virtual QID position in virtual qid control register
+ */
+#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
+
+/* Virtual Queue Default Value */
+#define EDMA_VQ_REG_VALUE 0x240240
+
+/* Tx side Port Interface Control Register */
+#define EDMA_REG_PORT_CTRL 0x4A8
+#define EDMA_PAD_EN_SHIFT 15
+
+/* Tx side VLAN Configuration Register */
+#define EDMA_REG_VLAN_CFG 0x4AC
+
+#define EDMA_TX_CVLAN 16
+#define EDMA_TX_INS_CVLAN 17
+#define EDMA_TX_CVLAN_TAG_SHIFT 0
+
+#define EDMA_TX_SVLAN 14
+#define EDMA_TX_INS_SVLAN 15
+#define EDMA_TX_SVLAN_TAG_SHIFT 16
+
+/* Tx Queue Packet Statistic Register */
+#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
+
+#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
+
+/* Tx Queue Byte Statistic Register */
+#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
+
+/* Load Balance Based Ring Offset Register */
+#define EDMA_REG_LB_RING 0x800
+#define EDMA_LB_RING_ENTRY_MASK 0xff
+#define EDMA_LB_RING_ID_MASK 0x7
+#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
+#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
+#define EDMA_LB_RING_ID_OFFSET 0
+#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
+#define EDMA_LB_REG_VALUE 0x6040200
+
+/* Load Balance Priority Mapping Register */
+#define EDMA_REG_LB_PRI_START 0x804
+#define EDMA_REG_LB_PRI_END 0x810
+#define EDMA_LB_PRI_REG_INC 4
+#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
+#define EDMA_LB_PRI_ENTRY_MASK 0xf
+
+/* RSS Priority Mapping Register */
+#define EDMA_REG_RSS_PRI 0x820
+#define EDMA_RSS_PRI_ENTRY_MASK 0xf
+#define EDMA_RSS_RING_ID_MASK 0x7
+#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
+
+/* RSS Indirection Register */
+#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
+#define EDMA_NUM_IDT 16
+#define EDMA_RSS_IDT_VALUE 0x64206420
+
+/* Default RSS Ring Register */
+#define EDMA_REG_DEF_RSS 0x890
+#define EDMA_DEF_RSS_MASK 0x7
+
+/* RSS Hash Function Type Register */
+#define EDMA_REG_RSS_TYPE 0x894
+#define EDMA_RSS_TYPE_NONE 0x01
+#define EDMA_RSS_TYPE_IPV4TCP 0x02
+#define EDMA_RSS_TYPE_IPV6_TCP 0x04
+#define EDMA_RSS_TYPE_IPV4_UDP 0x08
+#define EDMA_RSS_TYPE_IPV6UDP 0x10
+#define EDMA_RSS_TYPE_IPV4 0x20
+#define EDMA_RSS_TYPE_IPV6 0x40
+#define EDMA_RSS_HASH_MODE_MASK 0x7f
+
+#define EDMA_REG_RSS_HASH_VALUE 0x8C0
+
+#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
+
+
+/* rrd5 */
+#define EDMA_HASH_TYPE_SHIFT 12
+#define EDMA_HASH_TYPE_MASK 0xf
+#define EDMA_RRD_RSS_TYPE_NONE 0
+#define EDMA_RRD_RSS_TYPE_IPV4TCP 1
+#define EDMA_RRD_RSS_TYPE_IPV6_TCP 2
+#define EDMA_RRD_RSS_TYPE_IPV4_UDP 3
+#define EDMA_RRD_RSS_TYPE_IPV6UDP 4
+#define EDMA_RRD_RSS_TYPE_IPV4 5
+#define EDMA_RRD_RSS_TYPE_IPV6 6
+
+#define EDMA_RFS_FLOW_ENTRIES 1024
+#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
+#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
+
+/* RFD Base Address Register */
+#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
+
+/* RFD Index Register */
+#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
+
+#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
+#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
+#define EDMA_RFD_PROD_IDX_MASK 0xFFF
+#define EDMA_RFD_CONS_IDX_MASK 0xFFF
+#define EDMA_RFD_PROD_IDX_SHIFT 0
+#define EDMA_RFD_CONS_IDX_SHIFT 16
+
+/* Rx Descriptor Control Register */
+#define EDMA_REG_RX_DESC0 0xA10
+#define EDMA_RFD_RING_SIZE_MASK 0xFFF
+#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
+#define EDMA_RFD_RING_SIZE_SHIFT 0
+#define EDMA_RX_BUF_SIZE_SHIFT 16
+
+#define EDMA_REG_RX_DESC1 0xA14
+#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
+#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
+#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
+#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
+#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
+#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
+
+/* RXQ Control Register */
+#define EDMA_REG_RXQ_CTRL 0xA18
+#define EDMA_FIFO_THRESH_TYPE_SHIF 0
+#define EDMA_FIFO_THRESH_128_BYTE 0x0
+#define EDMA_FIFO_THRESH_64_BYTE 0x1
+#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
+#define EDMA_RXQ_CTRL_EN 0x0000FF00
+
+/* AXI Burst Size Config */
+#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
+#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
+
+/* Rx Statistics Register */
+#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
+#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
+
+/* WoL Pattern Length Register */
+#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
+#define EDMA_WOL_PT_LEN_MASK 0xFF
+#define EDMA_WOL_PT0_LEN_SHIFT 0
+#define EDMA_WOL_PT1_LEN_SHIFT 8
+#define EDMA_WOL_PT2_LEN_SHIFT 16
+#define EDMA_WOL_PT3_LEN_SHIFT 24
+
+#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
+#define EDMA_WOL_PT4_LEN_SHIFT 0
+#define EDMA_WOL_PT5_LEN_SHIFT 8
+#define EDMA_WOL_PT6_LEN_SHIFT 16
+
+/* WoL Control Register */
+#define EDMA_REG_WOL_CTRL 0xC08
+#define EDMA_WOL_WK_EN 0x00000001
+#define EDMA_WOL_MG_EN 0x00000002
+#define EDMA_WOL_PT0_EN 0x00000004
+#define EDMA_WOL_PT1_EN 0x00000008
+#define EDMA_WOL_PT2_EN 0x00000010
+#define EDMA_WOL_PT3_EN 0x00000020
+#define EDMA_WOL_PT4_EN 0x00000040
+#define EDMA_WOL_PT5_EN 0x00000080
+#define EDMA_WOL_PT6_EN 0x00000100
+
+/* MAC Control Register */
+#define EDMA_REG_MAC_CTRL0 0xC20
+#define EDMA_REG_MAC_CTRL1 0xC24
+
+/* WoL Pattern Register */
+#define EDMA_REG_WOL_PATTERN_START 0x5000
+#define EDMA_PATTERN_PART_REG_OFFSET 0x40
+
+/* TX descriptor fields */
+#define EDMA_TPD_HDR_SHIFT 0
+#define EDMA_TPD_PPPOE_EN 0x00000100
+#define EDMA_TPD_IP_CSUM_EN 0x00000200
+#define EDMA_TPD_TCP_CSUM_EN 0x0000400
+#define EDMA_TPD_UDP_CSUM_EN 0x00000800
+#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
+#define EDMA_TPD_LSO_EN 0x00001000
+#define EDMA_TPD_LSO_V2_EN 0x00002000
+#define EDMA_TPD_IPV4_EN 0x00010000
+#define EDMA_TPD_MSS_MASK 0x1FFF
+#define EDMA_TPD_MSS_SHIFT 18
+#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
+#define EDMA_TPD_EOP 0x80000000
+
+/* word3 */
+#define EDMA_TPD_PORT_BITMAP_SHIFT 18
+#define EDMA_TPD_FROM_CPU_SHIFT 25
+#define EDMA_FROM_CPU_MASK 0x80
+
+/* TX descriptor - little endian */
+struct qcom_ess_edma_tx_desc {
+ uint16_t len; /* full packet including CRC */
+ uint16_t svlan_tag; /* vlan tag */
+ uint32_t word1; /* byte 4-7 */
+ uint32_t addr; /* address of buffer */
+ uint32_t word3; /* byte 12 */
+} __packed;
+
+/* RRD descriptor fields */
+#define EDMA_RRD_NUM_RFD_MASK 0x000F
+#define EDMA_RRD_SVLAN 0x8000
+#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF
+
+#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
+#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
+#define EDMA_RRD_CVLAN 0x0001
+#define EDMA_RRD_DESC_VALID 0x8000
+
+#define EDMA_RRD_PRIORITY_SHIFT 4
+#define EDMA_RRD_PRIORITY_MASK 0x7
+#define EDMA_RRD_PORT_TYPE_SHIFT 7
+#define EDMA_RRD_PORT_TYPE_MASK 0x1F
+
+#define EDMA_PORT_ID_SHIFT 12
+#define EDMA_PORT_ID_MASK 0x7
+
+/* RX RRD descriptor - 16 bytes */
+struct qcom_edma_rx_return_desc {
+ uint16_t rrd0;
+ uint16_t rrd1;
+ uint16_t rrd2;
+ uint16_t rrd3;
+ uint16_t rrd4;
+ uint16_t rrd5;
+ uint16_t rrd6;
+ uint16_t rrd7;
+} __packed;
+
+
+/* RX RFD descriptor - little endian */
+struct qcom_ess_edma_rx_free_desc {
+ uint32_t addr; /* buffer addr */
+} __packed;
+
+#define ESS_RGMII_CTRL 0x0004
+
+/* Configurations */
+#define EDMA_INTR_CLEAR_TYPE 0
+#define EDMA_INTR_SW_IDX_W_TYPE 0
+#define EDMA_FIFO_THRESH_TYPE 0
+#define EDMA_RSS_TYPE 0
+#define EDMA_RX_IMT 0x0020
+#define EDMA_TX_IMT 0x0050
+#define EDMA_TPD_BURST 5
+#define EDMA_TXF_BURST 0x100
+#define EDMA_RFD_BURST 8
+#define EDMA_RFD_THR 16
+#define EDMA_RFD_LTHR 0
+
+#endif /* __QCOM_ESS_EDMA_REG_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
new file mode 100644
index 000000000000..d39c0117133a
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
@@ -0,0 +1,514 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/endian.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+/*
+ * Map the given RX queue to a given CPU.
+ */
+int
+qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
+{
+ return (queue % mp_ncpus);
+}
+
+int
+qcom_ess_edma_rx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ int i, ret;
+
+ for (i = 0; i < EDMA_RX_RING_SIZE; i++) {
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, i);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", i);
+ return (EINVAL);
+ }
+ rxd->m = NULL;
+ ret = bus_dmamap_create(ring->buffer_dma_tag,
+ BUS_DMA_NOWAIT,
+ &rxd->m_dmamap);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to create dmamap (%d)\n",
+ __func__, ret);
+ }
+ }
+
+ return (0);
+}
+
+int
+qcom_ess_edma_rx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ device_printf(sc->sc_dev, "%s: TODO\n", __func__);
+ return (0);
+}
+
+/*
+ * Allocate a receive buffer for the given ring/index, setup DMA.
+ *
+ * The caller must have called the ring prewrite routine in order
+ * to flush the ring memory if needed before writing to it.
+ * It's not done here so we don't do it on /every/ ring update.
+ *
+ * Returns an error if the slot is full or unable to fill it;
+ * the caller should then figure out how to cope.
+ */
+int
+qcom_ess_edma_rx_buf_alloc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx)
+{
+ struct mbuf *m;
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ struct qcom_ess_edma_rx_free_desc *ds;
+ bus_dma_segment_t segs[1];
+ int error;
+ int nsegs;
+
+ /* Get the software/hardware descriptors we're going to update */
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
+ if (ds == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get hw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+
+ /* If this ring has an mbuf already then return error */
+ if (rxd->m != NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: sw desc idx %d already has an mbuf\n",
+ idx);
+ return (EINVAL); /* XXX */
+ }
+
+ /* Allocate mbuf */
+ m = m_get2(sc->sc_config.rx_buf_size, M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL) {
+ /* XXX keep statistics */
+ device_printf(sc->sc_dev, "ERROR: failed to allocate mbuf\n");
+ return (ENOMEM);
+ }
+
+ /* Load dma map, get physical memory address of mbuf */
+ nsegs = 1;
+ m->m_pkthdr.len = m->m_len = sc->sc_config.rx_buf_size;
+
+ /* ETHER_ALIGN hack */
+ if (sc->sc_config.rx_buf_ether_align)
+ m_adj(m, ETHER_ALIGN);
+ error = bus_dmamap_load_mbuf_sg(ring->buffer_dma_tag, rxd->m_dmamap,
+ m, segs, &nsegs, 0);
+ if (error != 0 || nsegs != 1) {
+ device_printf(sc->sc_dev,
+ "ERROR: couldn't load mbuf dmamap (%d) (nsegs=%d)\n", error, nsegs);
+ m_freem(m);
+ return (error);
+ }
+
+ /* Populate sw and hw desc */
+ rxd->m = m;
+ rxd->m_physaddr = segs[0].ds_addr;
+
+ ds->addr = htole32(segs[0].ds_addr);
+
+ ring->stats.num_added++;
+
+ return (0);
+}
+
+/*
+ * Remove a receive buffer from the given ring/index.
+ *
+ * This clears the software/hardware index and unmaps the mbuf;
+ * the returned mbuf will be owned by the caller.
+ */
+struct mbuf *
+qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx)
+{
+ struct mbuf *m;
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ struct qcom_ess_edma_rx_free_desc *ds;
+
+ /* Get the software/hardware descriptors we're going to update */
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", idx);
+ return (NULL);
+ }
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
+ if (ds == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get hw desc (idx %d)\n", idx);
+ return (NULL);
+ }
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
+ "%s: idx=%u, rxd=%p, ds=0x%p, maddr=0x%08x/0x%08lx\n",
+ __func__, idx, rxd, ds, ds->addr, rxd->m_physaddr);
+
+ /* No mbuf? return null; it's fine */
+ if (rxd->m == NULL) {
+ return (NULL);
+ }
+
+ /* Flush mbuf */
+ bus_dmamap_sync(ring->buffer_dma_tag, rxd->m_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /* Unload */
+ bus_dmamap_unload(ring->buffer_dma_tag, rxd->m_dmamap);
+
+ /* Remove sw/hw desc entries */
+ m = rxd->m;
+ rxd->m = NULL;
+
+#ifdef ESS_EDMA_DEBUG_CLEAR_DESC
+ /*
+ * Note: removing hw entries is purely for correctness; it may be
+ * VERY SLOW!
+ */
+ ds->addr = 0;
+#endif
+
+ ring->stats.num_cleaned++;
+
+ return (m);
+}
+
+/*
+ * Fill the current ring, up to 'num' entries (or the ring is full.)
+ * It will also update the producer index for the given queue.
+ *
+ * Returns 0 if OK, error if there's a problem.
+ */
+int
+qcom_ess_edma_rx_ring_fill(struct qcom_ess_edma_softc *sc,
+ int queue, int num)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ int num_fill;
+ int idx;
+ int error;
+ int prod_index;
+ int n = 0;
+
+
+ ring = &sc->sc_rx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ num_fill = num;
+ if (num_fill > ring->ring_count)
+ num_fill = ring->ring_count - 1;
+ idx = ring->next_to_fill;
+
+ while (num_fill != 0) {
+ error = qcom_ess_edma_rx_buf_alloc(sc, ring, idx);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: queue %d: failed to alloc rx buf (%d)\n",
+ queue, error);
+ break;
+ }
+ num_fill--;
+
+ /* Update ring index, wrap at ring_count */
+ idx++;
+ if (idx >= ring->ring_count)
+ idx = 0;
+ n++;
+ }
+
+ ring->next_to_fill = idx;
+
+ /* Flush ring updates before HW index is updated */
+ qcom_ess_edma_desc_ring_flush_preupdate(sc, ring);
+
+ /* producer index is the ring number, minus 1 (ie the slot BEFORE) */
+ if (idx == 0)
+ prod_index = ring->ring_count - 1;
+ else
+ prod_index = idx - 1;
+ (void) qcom_ess_edma_hw_rfd_prod_index_update(sc, queue, prod_index);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
+ "%s: queue %d: added %d bufs, prod_idx=%u\n",
+ __func__, queue, n, prod_index);
+
+ return (0);
+}
+
+/*
+ * fetch, unmap the given mbuf
+ *
+struct mbuf *
+qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx)
+*/
+
+
+/*
+ * Run through the RX ring, complete frames.
+ *
+ * For now they're simply freed and the ring is re-filled.
+ * Once that logic is working soundly we'll want to populate an
+ * mbuf list for the caller with completed mbufs so they can be
+ * dispatched up to the network stack.
+ */
+int
+qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc *sc, int queue,
+ struct mbufq *mq)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ int n, cleaned_count, len;
+ uint16_t sw_next_to_clean, hw_next_to_clean;
+ struct mbuf *m;
+ struct qcom_edma_rx_return_desc *rrd;
+ int num_rfds, port_id, priority, hash_type, hash_val, flow_cookie, vlan;
+ bool rx_checksum = 1;
+ int port_vlan = -1;
+
+ ring = &sc->sc_rx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ qcom_ess_edma_desc_ring_flush_postupdate(sc, ring);
+
+ sw_next_to_clean = ring->next_to_clean;
+ hw_next_to_clean = 0;
+ cleaned_count = 0;
+
+ for (n = 0; n < EDMA_RX_RING_SIZE - 1; n++) {
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring,
+ sw_next_to_clean);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n",
+ sw_next_to_clean);
+ return (EINVAL);
+ }
+
+ hw_next_to_clean = qcom_ess_edma_hw_rfd_get_cons_index(sc,
+ queue);
+ if (hw_next_to_clean == sw_next_to_clean)
+ break;
+
+ /* Unmap the mbuf at this index */
+ m = qcom_ess_edma_rx_buf_clean(sc, ring, sw_next_to_clean);
+ sw_next_to_clean = (sw_next_to_clean + 1) % ring->ring_count;
+ cleaned_count++;
+
+ /* Get the RRD header */
+ rrd = mtod(m, struct qcom_edma_rx_return_desc *);
+ if (rrd->rrd7 & EDMA_RRD_DESC_VALID) {
+ len = rrd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
+ num_rfds = rrd->rrd1 & EDMA_RRD_NUM_RFD_MASK;;
+ port_id = (rrd->rrd1 >> EDMA_PORT_ID_SHIFT)
+ & EDMA_PORT_ID_MASK;
+ priority = (rrd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
+ & EDMA_RRD_PRIORITY_MASK;
+ hash_type = (rrd->rrd5 >> EDMA_HASH_TYPE_SHIFT)
+ & EDMA_HASH_TYPE_MASK;
+ hash_val = rrd->rrd2;
+ flow_cookie = rrd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
+ vlan = rrd->rrd4;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
+ "%s: len=%d, num_rfds=%d, port_id=%d,"
+ " priority=%d, hash_type=%d, hash_val=%d,"
+ " flow_cookie=%d, vlan=%d\n",
+ __func__,
+ len,
+ num_rfds,
+ port_id,
+ priority,
+ hash_type,
+ hash_val,
+ flow_cookie,
+ vlan);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
+ "%s: flags: L4 checksum"
+ " fail=%d, 802.1q vlan=%d, 802.1ad vlan=%d\n",
+ __func__,
+ !! (rrd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK),
+ !! (rrd->rrd7 & EDMA_RRD_CVLAN),
+ !! (rrd->rrd1 & EDMA_RRD_SVLAN));
+ } else {
+ len = 0;
+ }
+
+ /* Payload starts after the RRD header */
+ m_adj(m, sizeof(struct qcom_edma_rx_return_desc));
+
+ /* Set mbuf length now */
+ m->m_len = m->m_pkthdr.len = len;
+
+ /*
+ * Set rcvif to the relevant GMAC ifp; GMAC receive will
+ * check the field to receive it to the right place, or
+ * if it's NULL it'll drop it for us.
+ */
+ m->m_pkthdr.rcvif = NULL;
+ if (sc->sc_gmac_port_map[port_id] != -1) {
+ struct qcom_ess_edma_gmac *gmac;
+ gmac = &sc->sc_gmac[sc->sc_gmac_port_map[port_id]];
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
+ "%s: port_id=%d gmac=%d\n", __func__,
+ port_id, gmac->id);
+ if (gmac->enabled == true) {
+ m->m_pkthdr.rcvif = gmac->ifp;
+ if ((if_getcapenable(gmac->ifp) & IFCAP_RXCSUM) != 0)
+ rx_checksum = true;
+ }
+ port_vlan = gmac->vlan_id;
+ }
+
+ /* XXX TODO: handle multi-frame packets (ie, jumbos!) */
+ /* XXX TODO: handle 802.1ad VLAN offload field */
+ /* XXX TODO: flow offload */
+
+ /*
+ * For now we don't support disabling VLAN offload.
+ * Instead, tags are stripped by the hardware.
+ * Handle the outer VLAN tag; worry about 802.1ad
+ * later on (and hopefully by something other than
+ * adding another mbuf.)
+ */
+ if ((rrd->rrd7 & EDMA_RRD_CVLAN) != 0) {
+ /*
+ * There's an outer VLAN tag that has been
+ * decaped by the hardware. Compare it to the
+ * current port vlan, and if they don't match,
+ * add an offloaded VLAN tag to the mbuf.
+ *
+ * And yes, care about the priority field too.
+ */
+ if ((port_vlan == -1) || (port_vlan != vlan)) {
+ m->m_pkthdr.ether_vtag = (vlan & 0xfff)
+ | ((priority < 1) & 0xf);
+ m->m_flags |= M_VLANTAG;
+ }
+ }
+
+ /*
+ * Store the hash info in the mbuf if it's there.
+ *
+ * XXX TODO: decode the RSS field and translate it to
+ * the mbuf hash entry. For now, just treat as OPAQUE.
+ */
+ if (hash_type != EDMA_RRD_RSS_TYPE_NONE) {
+ m->m_pkthdr.flowid = hash_val;
+ M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
+ }
+
+ /*
+ * Check the RX checksum flag if the destination ifp
+ * has the RXCSUM flag set.
+ */
+ if (rx_checksum) {
+ if (rrd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) {
+ /* Fail */
+ ring->stats.num_rx_csum_fail++;
+ } else {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED
+ | CSUM_IP_VALID
+ | CSUM_DATA_VALID
+ | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ ring->stats.num_rx_csum_ok++;
+ }
+ }
+
+
+ /*
+ * Finally enqueue into the incoming receive queue
+ * to push up into the networking stack.
+ */
+ if (mbufq_enqueue(mq, m) != 0) {
+ ring->stats.num_enqueue_full++;
+ m_freem(m);
+ }
+ }
+ ring->next_to_clean = sw_next_to_clean;
+
+ /* Refill ring if needed */
+ if (cleaned_count > 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
+ "%s: ring=%d, cleaned=%d\n",
+ __func__, queue, cleaned_count);
+ (void) qcom_ess_edma_rx_ring_fill(sc, queue, cleaned_count);
+ (void) qcom_ess_edma_hw_rfd_sw_cons_index_update(sc, queue,
+ ring->next_to_clean);
+ }
+
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
new file mode 100644
index 000000000000..e23d7f326b1d
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
@@ -0,0 +1,51 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_RX_H__
+#define __QCOM_ESS_EDMA_RX_H__
+
+extern int qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc *sc,
+ int queue);
+extern int qcom_ess_edma_rx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_rx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_rx_buf_alloc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx);
+extern struct mbuf * qcom_ess_edma_rx_buf_clean(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx);
+extern int qcom_ess_edma_rx_ring_fill(struct qcom_ess_edma_softc *sc,
+ int queue, int num);
+extern int qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc *sc,
+ int queue, struct mbufq *mq);
+
+#endif /* __QCOM_ESS_EDMA_RX_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
new file mode 100644
index 000000000000..a86ac1dfdc31
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
@@ -0,0 +1,454 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/endian.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+/*
+ * Map the given TX queue to a given CPU.
+ *
+ * The current mapping in the if_transmit() path
+ * will map mp_ncpu groups of flowids to the TXQs.
+ * So for a 4 CPU system the first four will be CPU 0,
+ * the second four will be CPU 1, etc.
+ */
+int
+qcom_ess_edma_tx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
+{
+
+ return (queue / mp_ncpus);
+}
+
+int
+qcom_ess_edma_tx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ struct qcom_ess_edma_sw_desc_tx *txd;
+ int i, ret;
+
+ for (i = 0; i < EDMA_TX_RING_SIZE; i++) {
+ txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, i);
+ if (txd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", i);
+ return (EINVAL);
+ }
+ txd->m = NULL;
+ ret = bus_dmamap_create(ring->buffer_dma_tag,
+ BUS_DMA_NOWAIT,
+ &txd->m_dmamap);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to create dmamap (%d)\n",
+ __func__, ret);
+ }
+ }
+
+ return (0);
+}
+
+int
+qcom_ess_edma_tx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ device_printf(sc->sc_dev, "%s: TODO\n", __func__);
+ return (0);
+}
+
+/*
+ * Clear the sw/hw descriptor entries, unmap/free the mbuf chain that's
+ * part of this.
+ */
+static int
+qcom_ess_edma_tx_unmap_and_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, uint16_t idx)
+{
+ struct qcom_ess_edma_sw_desc_tx *txd;
+ struct qcom_ess_edma_tx_desc *ds;
+
+ /* Get the software/hardware descriptors we're going to update */
+ txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
+ if (txd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
+ if (ds == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get hw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+
+ if (txd->m != NULL) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
+ "%s: idx %d, unmap/free\n", __func__, idx);
+ bus_dmamap_unload(ring->buffer_dma_tag, txd->m_dmamap);
+ m_freem(txd->m);
+ txd->m = NULL;
+ txd->is_first = txd->is_last = 0;
+ }
+
+#ifdef ESS_EDMA_DEBUG_CLEAR_DESC
+ /* This is purely for debugging/testing right now; it's slow! */
+ memset(ds, 0, sizeof(struct qcom_ess_edma_tx_desc));
+#endif
+
+ return (0);
+}
+
+/*
+ * Run through the TX ring, complete/free frames.
+ */
+int
+qcom_ess_edma_tx_ring_complete(struct qcom_ess_edma_softc *sc, int queue)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ uint32_t n;
+ uint16_t sw_next_to_clean, hw_next_to_clean;
+
+ ring = &sc->sc_tx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ qcom_ess_edma_desc_ring_flush_postupdate(sc, ring);
+
+ sw_next_to_clean = ring->next_to_clean;
+ hw_next_to_clean = 0;
+ n = 0;
+
+ /* Get the current hardware completion index */
+ (void) qcom_ess_edma_hw_tx_read_tpd_cons_idx(sc, queue,
+ &hw_next_to_clean);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
+ "%s: called; sw=%d, hw=%d\n", __func__,
+ sw_next_to_clean, hw_next_to_clean);
+
+ /* clean the buffer chain and descriptor(s) here */
+ while (sw_next_to_clean != hw_next_to_clean) {
+ qcom_ess_edma_tx_unmap_and_clean(sc, ring, sw_next_to_clean);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
+ "%s cleaning %d\n", __func__, sw_next_to_clean);
+ sw_next_to_clean++;
+ if (sw_next_to_clean >= ring->ring_count)
+ sw_next_to_clean = 0;
+ n++;
+ }
+
+ ring->stats.num_cleaned += n;
+ ring->stats.num_tx_complete++;
+
+ ring->next_to_clean = sw_next_to_clean;
+
+ /* update the TPD consumer index register */
+ qcom_ess_edma_hw_tx_update_cons_idx(sc, queue, sw_next_to_clean);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING_COMPLETE,
+ "%s: cleaned %d descriptors\n", __func__, n);
+
+ return (0);
+}
+
+/*
+ * Attempt to enqueue a single frame.
+ *
+ * This is the MVP required to send a single ethernet mbuf / mbuf chain.
+ * VLAN tags are added/required as the default switch configuration
+ * from device-tree uses both the port bitmap and VLAN IDs for
+ * controlling LAN/WAN/etc interface traffic.
+ *
+ * Note, this does NOT update the transmit pointer to the hardware;
+ * that must be done after calling this function one or more times.
+ *
+ * The mbuf is either consumed into the ring or it is returned
+ * unsent. If we've modifide it in any way then the caller should
+ * use what's returned back in m0 (eg to pushback.)
+ */
+int
+qcom_ess_edma_tx_ring_frame(struct qcom_ess_edma_softc *sc, int queue,
+ struct mbuf **m0, uint16_t port_bitmap, int default_vlan)
+{
+ struct qcom_ess_edma_sw_desc_tx *txd_first;
+ struct qcom_ess_edma_desc_ring *ring;
+ struct ether_vlan_header *eh;
+ bus_dma_segment_t txsegs[QCOM_ESS_EDMA_MAX_TXFRAGS];
+ uint32_t word1, word3;
+ uint32_t eop;
+ int vlan_id;
+ int num_left, ret, nsegs, i;
+ uint16_t next_to_fill;
+ uint16_t svlan_tag;
+ struct mbuf *m;
+
+ ring = &sc->sc_tx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ m = *m0;
+
+ /*
+ * Do we have ANY space? If not, return ENOBUFS, let the
+ * caller decide what to do with the mbuf.
+ */
+ num_left = qcom_ess_edma_desc_ring_get_num_available(sc, ring);
+ if (num_left < 2) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: num_left=%d\n", __func__, num_left);
+ ring->stats.num_enqueue_full++;
+ return (ENOBUFS);
+ }
+
+ /*
+ * Get the current sw/hw descriptor offset; we'll use its
+ * dmamap and then switch it out with the last one when
+ * the mbuf is put there.
+ */
+ next_to_fill = ring->next_to_fill;
+ txd_first = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring,
+ next_to_fill);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: starting at idx %d\n", __func__, next_to_fill);
+
+ /*
+ * Do the initial mbuf load; see how many fragments we
+ * have. If we don't have enough descriptors available
+ * then immediately unmap and return an error.
+ */
+ ret = bus_dmamap_load_mbuf_sg(ring->buffer_dma_tag,
+ txd_first->m_dmamap,
+ m,
+ txsegs,
+ &nsegs,
+ BUS_DMA_NOWAIT);
+ if (ret != 0) {
+ ring->stats.num_tx_mapfail++;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: map failed (%d)\n", __func__, ret);
+ return (ENOBUFS);
+ }
+ if (nsegs == 0) {
+ ring->stats.num_tx_maxfrags++;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: too many segs\n", __func__);
+ return (ENOBUFS);
+ }
+
+ if (nsegs + 2 > num_left) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: nsegs=%d, num_left=%d\n", __func__, nsegs, num_left);
+ bus_dmamap_unload(ring->buffer_dma_tag, txd_first->m_dmamap);
+ ring->stats.num_enqueue_full++;
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(ring->buffer_dma_tag, txd_first->m_dmamap,
+ BUS_DMASYNC_PREWRITE);
+
+ /*
+ * At this point we're committed to sending the frame.
+ *
+ * Get rid of the rcvif that is being used to track /send/ ifnet.
+ */
+ m->m_pkthdr.rcvif = NULL;
+
+ /*
+ *
+ * Configure up the various header fields that are shared
+ * between descriptors.
+ */
+ svlan_tag = 0; /* 802.3ad tag? */
+ /* word1 - tx checksum, v4/v6 TSO, pppoe, 802.3ad vlan flag */
+ word1 = 0;
+ /*
+ * word3 - insert default vlan; vlan tag/flag, CPU/STP/RSTP stuff,
+ * port map
+ */
+ word3 = 0;
+ word3 |= (port_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT);
+
+ /*
+ * If VLAN offload is enabled, we can enable inserting a CVLAN
+ * tag here for the default VLAN, or the VLAN interface.
+ * The default switch configuration requires both a port_bitmap
+ * and 802.1q VLANs configured.
+ *
+ * If there's a VLAN tag on the mbuf then we leave it alone.
+ * I don't want to try and strip out the VLAN header from a packet
+ * here.
+ *
+ * There's no 802.1ad support in here yet.
+ */
+ eh = mtod(m, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ /* Don't add a tag, just use what's here */
+ vlan_id = -1;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: no vlan id\n", __func__);
+
+ } else if ((m->m_flags & M_VLANTAG) != 0) {
+ /* We have an offload VLAN tag, use it */
+ vlan_id = m->m_pkthdr.ether_vtag & 0x0fff;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: header tag vlan id=%d\n", __func__, vlan_id);
+ } else {
+ /* No VLAN tag, no VLAN header; default VLAN */
+ vlan_id = default_vlan;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: no vlan tag/hdr; vlan id=%d\n", __func__,
+ vlan_id);
+ }
+
+ /*
+ * Only add the offload tag if we need to.
+ */
+ if (vlan_id != -1) {
+ word3 |= (1U << EDMA_TX_INS_CVLAN);
+ word3 |= (vlan_id << EDMA_TX_CVLAN_TAG_SHIFT);
+ }
+
+ /* End of frame flag */
+ eop = 0;
+
+ /*
+ * Walk the mbuf segment list, and allocate descriptor
+ * entries. Put the mbuf in the last descriptor entry
+ * and then switch out the first/last dmamap entries.
+ */
+ for (i = 0; i < nsegs; i++) {
+ struct qcom_ess_edma_sw_desc_tx *txd;
+ struct qcom_ess_edma_tx_desc *ds;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: filling idx %d\n", __func__, next_to_fill);
+ txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, next_to_fill);
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, next_to_fill);
+ txd->m = NULL;
+ if (i == 0) {
+ txd->is_first = 1;
+ }
+ if (i == (nsegs - 1)) {
+ bus_dmamap_t dm;
+
+ txd->is_last = 1;
+ eop = EDMA_TPD_EOP;
+ /*
+ * Put the txmap and the mbuf in the last swdesc.
+ * That way it isn't freed until we've transmitted
+ * all the descriptors of this frame, in case the
+ * hardware decides to notify us of some half-sent
+ * stuff.
+ *
+ * Moving the pointers around here sucks a little
+ * but it DOES beat not freeing the dmamap entries
+ * correctly.
+ */
+ txd->m = m;
+ dm = txd_first->m_dmamap;
+ txd_first->m_dmamap = txd->m_dmamap;
+ txd->m_dmamap = dm;
+ }
+ ds->word1 = word1 | eop;
+ ds->word3 = word3;
+ ds->svlan_tag = svlan_tag;
+ ds->addr = htole32(txsegs[i].ds_addr);
+ ds->len = htole16(txsegs[i].ds_len);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: addr=0x%lx len=%ld eop=0x%x\n",
+ __func__,
+ txsegs[i].ds_addr,
+ txsegs[i].ds_len,
+ eop);
+
+ next_to_fill++;
+ if (next_to_fill >= ring->ring_count)
+ next_to_fill = 0;
+ }
+
+ ring->stats.num_added += nsegs;
+
+ /* Finish, update ring tracking */
+ ring->next_to_fill = next_to_fill;
+
+ ring->stats.num_tx_ok++;
+
+ return (0);
+}
+
+/*
+ * Update the hardware with the new state of the transmit ring.
+ */
+int
+qcom_ess_edma_tx_ring_frame_update(struct qcom_ess_edma_softc *sc, int queue)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+
+ ring = &sc->sc_tx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ qcom_ess_edma_desc_ring_flush_preupdate(sc, ring);
+
+ (void) qcom_ess_edma_hw_tx_update_tpd_prod_idx(sc, queue,
+ ring->next_to_fill);
+
+ /* XXX keep stats for this specific call? */
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
new file mode 100644
index 000000000000..cb1dc02e0bd1
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
@@ -0,0 +1,50 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_TX_H__
+#define __QCOM_ESS_EDMA_TX_H__
+
+
+extern int qcom_ess_edma_tx_queue_to_cpu(struct qcom_ess_edma_softc *sc,
+ int queue);
+extern int qcom_ess_edma_tx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_tx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_tx_ring_complete(struct qcom_ess_edma_softc *sc,
+ int queue);
+extern int qcom_ess_edma_tx_ring_frame(struct qcom_ess_edma_softc *sc,
+ int queue, struct mbuf **m0, uint16_t port_bitmap,
+ int default_vlan);
+extern int qcom_ess_edma_tx_ring_frame_update(struct qcom_ess_edma_softc *sc,
+ int queue);
+
+#endif /* __QCOM_ESS_EDMA_TX_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_var.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_var.h
new file mode 100644
index 000000000000..0e7afcfbf1c5
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_var.h
@@ -0,0 +1,258 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_VAR_H__
+#define __QCOM_ESS_EDMA_VAR_H__
+
+#define EDMA_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define EDMA_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define EDMA_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+
+#define EDMA_RING_LOCK(_ring) mtx_lock(&(_ring)->mtx)
+#define EDMA_RING_UNLOCK(_ring) mtx_unlock(&(_ring)->mtx)
+#define EDMA_RING_LOCK_ASSERT(_ring) mtx_assert(&(_ring)->mtx, MA_OWNED)
+
+/*
+ * register space access macros
+ */
+#define EDMA_REG_WRITE(sc, reg, val) do { \
+ bus_write_4(sc->sc_mem_res, (reg), (val)); \
+ } while (0)
+
+#define EDMA_REG_READ(sc, reg) bus_read_4(sc->sc_mem_res, (reg))
+
+#define EDMA_REG_SET_BITS(sc, reg, bits) \
+ EDMA_REG_WRITE(sc, reg, EDMA_REG_READ(sc, (reg)) | (bits))
+
+#define EDMA_REG_CLEAR_BITS(sc, reg, bits) \
+ EDMA_REG_WRITE(sc, reg, EDMA_REG_READ(sc, (reg)) & ~(bits))
+
+#define EDMA_REG_BARRIER_WRITE(sc) bus_barrier((sc)->sc_mem_res, \
+ 0, (sc)->sc_mem_res_size, BUS_SPACE_BARRIER_WRITE)
+#define EDMA_REG_BARRIER_READ(sc) bus_barrier((sc)->sc_mem_res, \
+ 0, (sc)->sc_mem_res_size, BUS_SPACE_BARRIER_READ)
+#define EDMA_REG_BARRIER_RW(sc) bus_barrier((sc)->sc_mem_res, \
+ 0, (sc)->sc_mem_res_size, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
+
+
+/*
+ * Fixed number of interrupts - 16 TX, 8 RX.
+ *
+ * The Linux driver supports 4 or 8 RX queues.
+ */
+
+#define QCOM_ESS_EDMA_NUM_TX_IRQS 16
+#define QCOM_ESS_EDMA_NUM_RX_IRQS 8
+
+#define QCOM_ESS_EDMA_NUM_TX_RINGS 16
+#define QCOM_ESS_EDMA_NUM_RX_RINGS 8
+
+#define EDMA_TX_RING_SIZE 128
+#define EDMA_RX_RING_SIZE 128
+
+#define EDMA_TX_BUFRING_SIZE 512
+
+/* Maximum number of GMAC instances */
+#define QCOM_ESS_EDMA_MAX_NUM_GMACS 5
+
+/* Maximum number of ports to support mapping to GMACs */
+#define QCOM_ESS_EDMA_MAX_NUM_PORTS 8
+
+#define QCOM_ESS_EDMA_MAX_TXFRAGS 8
+
+struct qcom_ess_edma_softc;
+
+/*
+ * An instance of an interrupt queue.
+ */
+struct qcom_ess_edma_intr {
+ struct qcom_ess_edma_softc *sc;
+ struct resource *irq_res;
+ int irq_rid;
+ void *irq_intr;
+
+ struct {
+ uint64_t num_intr;
+ } stats;
+};
+
+/*
+ * A TX/RX descriptor ring.
+ */
+struct qcom_ess_edma_desc_ring {
+ bus_dma_tag_t hw_ring_dma_tag; /* tag for hw ring */
+ bus_dma_tag_t buffer_dma_tag; /* tag for mbufs */
+ char *label;
+
+ struct mtx mtx;
+
+ bus_dmamap_t hw_desc_map;
+ bus_addr_t hw_desc_paddr;
+ void *hw_desc;
+
+ void *sw_desc;
+ int hw_entry_size; /* hw desc entry size */
+ int sw_entry_size; /* sw desc entry size */
+ int ring_count; /* Number of entries */
+ int buffer_align;
+ int ring_align;
+
+ uint16_t next_to_fill;
+ uint16_t next_to_clean;
+ uint16_t pending_fill;
+
+ struct {
+ uint64_t num_added;
+ uint64_t num_cleaned;
+ uint64_t num_dropped;
+ uint64_t num_enqueue_full;
+ uint64_t num_rx_no_gmac;
+ uint64_t num_rx_ok;
+ uint64_t num_tx_ok;
+ uint64_t num_tx_maxfrags;
+ uint64_t num_tx_mapfail;
+ uint64_t num_rx_csum_ok;
+ uint64_t num_rx_csum_fail;
+ uint64_t num_tx_complete;
+ uint64_t num_tx_xmit_defer;
+ uint64_t num_tx_xmit_task;
+ } stats;
+};
+
+/*
+ * Structs for transmit and receive software
+ * ring entries.
+ */
+struct qcom_ess_edma_sw_desc_tx {
+ struct mbuf *m;
+ bus_dmamap_t m_dmamap;
+ uint32_t is_first:1;
+ uint32_t is_last:1;
+};
+
+struct qcom_ess_edma_sw_desc_rx {
+ struct mbuf *m;
+ bus_dmamap_t m_dmamap;
+ bus_addr_t m_physaddr;
+};
+
+#define QCOM_ESS_EDMA_LABEL_SZ 16
+
+/*
+ * Per transmit ring TX state for TX queue / buf_ring stuff.
+ */
+struct qcom_ess_edma_tx_state {
+ struct task completion_task;
+ struct task xmit_task;
+ struct buf_ring *br;
+ struct taskqueue *completion_tq;
+ struct qcom_ess_edma_softc *sc;
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int enqueue_is_running;
+ int queue_id;
+};
+
+/*
+ * Per receive ring RX state for taskqueue stuff.
+ */
+struct qcom_ess_edma_rx_state {
+ struct task completion_task;
+ struct taskqueue *completion_tq;
+ struct qcom_ess_edma_softc *sc;
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int queue_id;
+};
+
+struct qcom_ess_edma_gmac {
+ struct qcom_ess_edma_softc *sc;
+ int id;
+ bool enabled;
+ /* Native VLAN ID */
+ int vlan_id;
+ /* Switch portmask for this instance */
+ int port_mask;
+ /* MAC address for this ifnet (from device tree) */
+ struct ether_addr eaddr;
+ /* ifnet interface! */
+ if_t ifp;
+ /* media interface */
+ struct ifmedia ifm;
+};
+
+struct qcom_ess_edma_softc {
+ device_t sc_dev;
+ struct mtx sc_mtx;
+ struct resource *sc_mem_res;
+ size_t sc_mem_res_size;
+ int sc_mem_rid;
+ uint32_t sc_debug;
+ bus_dma_tag_t sc_dma_tag;
+
+ struct qcom_ess_edma_intr sc_tx_irq[QCOM_ESS_EDMA_NUM_TX_IRQS];
+ struct qcom_ess_edma_intr sc_rx_irq[QCOM_ESS_EDMA_NUM_RX_IRQS];
+
+ struct qcom_ess_edma_desc_ring sc_tx_ring[QCOM_ESS_EDMA_NUM_TX_RINGS];
+ struct qcom_ess_edma_desc_ring sc_rx_ring[QCOM_ESS_EDMA_NUM_RX_RINGS];
+ struct qcom_ess_edma_tx_state sc_tx_state[QCOM_ESS_EDMA_NUM_TX_RINGS];
+ struct qcom_ess_edma_rx_state sc_rx_state[QCOM_ESS_EDMA_NUM_RX_RINGS];
+ struct qcom_ess_edma_gmac sc_gmac[QCOM_ESS_EDMA_MAX_NUM_GMACS];
+
+ int sc_gmac_port_map[QCOM_ESS_EDMA_MAX_NUM_PORTS];
+
+ struct {
+ uint32_t num_gmac;
+ uint32_t mdio_supported;
+ uint32_t poll_required;
+ uint32_t rss_type;
+
+ uint32_t rx_buf_size;
+ bool rx_buf_ether_align;
+
+ uint32_t tx_intr_mask;
+ uint32_t rx_intr_mask;
+
+ /* number of tx/rx descriptor entries in each ring */
+ uint32_t rx_ring_count;
+ uint32_t tx_ring_count;
+
+ /* how many queues for each CPU */
+ uint32_t num_tx_queue_per_cpu;
+ } sc_config;
+
+ struct {
+ uint32_t misc_intr_mask;
+ uint32_t wol_intr_mask;
+ uint32_t intr_sw_idx_w;
+ } sc_state;
+};
+
+#endif /* __QCOM_ESS_EDMA_VAR_H__ */