aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Salychev <dsl@FreeBSD.org>2023-06-18 15:03:24 +0000
committerDmitry Salychev <dsl@FreeBSD.org>2023-08-20 08:17:26 +0000
commit58983e4b0253ad38a3e1ef2166fedd3133fdb552 (patch)
treecc02316c25344ddf659080efeb669c823da922f0
parent83d941e0af3720f45ab9f2c3d035519a11752bea (diff)
downloadsrc-58983e4b0253ad38a3e1ef2166fedd3133fdb552.tar.gz
src-58983e4b0253ad38a3e1ef2166fedd3133fdb552.zip
dpaa2: Clean up channels in separate tasks
Each channel gets its own DMA resources, cleanup and "bufferpool" tasks, and a separate cleanup taskqueue to isolate channels operation as much as possible to avoid various kernel panics under heavy network load. As a side-effect of this work, dpaa2_buf structure is simplified and all of the functions to re-seed those buffers are gathered now in dpaa2_buf.h and .c files; functions to work with channels are extracted into dpaa2_channel.h and .c files as well. Reported by: dch Reviewed by: bz Approved by: bz (mentor) MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D41296
-rw-r--r--sys/conf/files.arm643
-rw-r--r--sys/dev/dpaa2/dpaa2_buf.c246
-rw-r--r--sys/dev/dpaa2/dpaa2_buf.h173
-rw-r--r--sys/dev/dpaa2/dpaa2_channel.c557
-rw-r--r--sys/dev/dpaa2/dpaa2_channel.h95
-rw-r--r--sys/dev/dpaa2/dpaa2_io.c23
-rw-r--r--sys/dev/dpaa2/dpaa2_io.h8
-rw-r--r--sys/dev/dpaa2/dpaa2_mc.c64
-rw-r--r--sys/dev/dpaa2/dpaa2_mcp.c1
-rw-r--r--sys/dev/dpaa2/dpaa2_mcp.h1
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.c1483
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.h160
-rw-r--r--sys/dev/dpaa2/dpaa2_swp.c15
-rw-r--r--sys/dev/dpaa2/dpaa2_swp.h5
-rw-r--r--sys/dev/dpaa2/dpaa2_types.c114
-rw-r--r--sys/dev/dpaa2/dpaa2_types.h113
-rw-r--r--sys/modules/dpaa2/Makefile14
17 files changed, 1833 insertions, 1242 deletions
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 61f1cbf75982..2036bbe918ba 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -202,6 +202,8 @@ dev/axgbe/xgbe-phy-v1.c optional axa fdt
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/dpaa2/dpaa2_bp.c optional soc_nxp_ls dpaa2
+dev/dpaa2/dpaa2_buf.c optional soc_nxp_ls dpaa2
+dev/dpaa2/dpaa2_channel.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_cmd_if.m optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_con.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_console.c optional soc_nxp_ls dpaa2 fdt
@@ -216,6 +218,7 @@ dev/dpaa2/dpaa2_ni.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_rc.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_swp.c optional soc_nxp_ls dpaa2
dev/dpaa2/dpaa2_swp_if.m optional soc_nxp_ls dpaa2
+dev/dpaa2/dpaa2_types.c optional soc_nxp_ls dpaa2
dev/dpaa2/memac_mdio_acpi.c optional soc_nxp_ls dpaa2 acpi
dev/dpaa2/memac_mdio_common.c optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt
dev/dpaa2/memac_mdio_fdt.c optional soc_nxp_ls dpaa2 fdt
diff --git a/sys/dev/dpaa2/dpaa2_buf.c b/sys/dev/dpaa2/dpaa2_buf.c
new file mode 100644
index 000000000000..7739eda5d8de
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_buf.c
@@ -0,0 +1,246 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_buf.h"
+#include "dpaa2_bp.h"
+#include "dpaa2_channel.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_ni.h"
+
+MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)");
+
+/**
+ * @brief Allocate Rx buffers visible to QBMan and release them to the
+ * buffer pool.
+ */
+int
+dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count,
+ int size, struct mtx *dma_mtx)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev);
+ struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
+ struct dpaa2_buf *buf;
+ const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num);
+ const uint16_t bpid = bpsc->attr.bpid;
+ bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
+ int error, bufn = 0;
+
+#if defined(INVARIANTS)
+ KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__));
+ if (dma_mtx != NULL) {
+ mtx_assert(dma_mtx, MA_OWNED);
+ }
+#endif /* INVARIANTS */
+
+#ifdef _notyet_
+ /* Limit amount of buffers released to the pool */
+ count = (alloc + count > DPAA2_NI_BUFS_MAX)
+ ? DPAA2_NI_BUFS_MAX - alloc : count;
+#endif
+
+ /* Release "count" buffers to the pool */
+ for (int i = alloc; i < alloc + count; i++) {
+ /* Enough buffers were allocated for a single command */
+ if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
+ error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr,
+ bufn);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to release "
+ "buffers to the pool (1)\n", __func__);
+ return (error);
+ }
+ DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
+ bufn = 0;
+ }
+
+ buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT);
+ if (buf == NULL) {
+ device_printf(dev, "%s: malloc() failed\n", __func__);
+ return (ENOMEM);
+ }
+ DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch);
+
+ error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx);
+ if (error != 0) {
+ device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: "
+ "error=%d/n", __func__, error);
+ break;
+ }
+ paddr[bufn] = buf->paddr;
+ bufn++;
+ }
+
+ /* Release reminder of the buffers to the pool */
+ if (bufn > 0) {
+ error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to release "
+ "buffers to the pool (2)\n", __func__);
+ return (error);
+ }
+ DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Prepare Rx buffer to be released to the buffer pool.
+ */
+int
+dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size,
+ struct mtx *dma_mtx)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_fa *fa;
+ bool map_created = false;
+ bool mbuf_alloc = false;
+ int error;
+
+#if defined(INVARIANTS)
+ DPAA2_BUF_ASSERT_RXPREP(buf);
+ if (dma_mtx != NULL) {
+ mtx_assert(dma_mtx, MA_OWNED);
+ }
+#endif /* INVARIANTS */
+
+ if (__predict_false(buf->dmap == NULL)) {
+ error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to create DMA map: "
+ "error=%d\n", __func__, error);
+ goto fail_map_create;
+ }
+ map_created = true;
+ }
+
+ if (__predict_true(buf->m == NULL)) {
+ buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
+ if (__predict_false(buf->m == NULL)) {
+ device_printf(dev, "%s: m_getjcl() failed\n", __func__);
+ error = ENOMEM;
+ goto fail_mbuf_alloc;
+ }
+ buf->m->m_len = buf->m->m_ext.ext_size;
+ buf->m->m_pkthdr.len = buf->m->m_ext.ext_size;
+ mbuf_alloc = true;
+ }
+
+ error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg,
+ &buf->nseg, BUS_DMA_NOWAIT);
+ KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__,
+ buf->nseg));
+ KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d",
+ __func__, error));
+ if (__predict_false(error != 0 || buf->nseg != 1)) {
+ device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: "
+ "error=%d, nsegs=%d\n", __func__, error, buf->nseg);
+ goto fail_mbuf_map;
+ }
+ buf->paddr = buf->seg.ds_addr;
+ buf->vaddr = buf->m->m_data;
+
+ /* Populate frame annotation for future use */
+ fa = (struct dpaa2_fa *)buf->vaddr;
+ fa->magic = DPAA2_MAGIC;
+ fa->buf = buf;
+
+ bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD);
+
+ DPAA2_BUF_ASSERT_RXREADY(buf);
+
+ return (0);
+
+fail_mbuf_map:
+ if (mbuf_alloc) {
+ m_freem(buf->m);
+ buf->m = NULL;
+ }
+fail_mbuf_alloc:
+ if (map_created) {
+ (void)bus_dmamap_destroy(buf->dmat, buf->dmap);
+ }
+fail_map_create:
+ return (error);
+}
+
+/**
+ * @brief Prepare Tx buffer to be added to the Tx ring.
+ */
+int
+dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf)
+{
+ struct dpaa2_buf *sgt = buf->sgt;
+ bool map_created = false;
+ int error;
+
+ DPAA2_BUF_ASSERT_TXPREP(buf);
+
+ if (buf->dmap == NULL) {
+ error = bus_dmamap_create(buf->dmat, 0, &buf->dmap);
+ if (error != 0) {
+ device_printf(dev, "%s: bus_dmamap_create() failed: "
+ "error=%d\n", __func__, error);
+ goto fail_map_create;
+ }
+ map_created = true;
+ }
+
+ if (sgt->vaddr == NULL) {
+ error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap);
+ if (error != 0) {
+ device_printf(dev, "%s: bus_dmamem_alloc() failed: "
+ "error=%d\n", __func__, error);
+ goto fail_mem_alloc;
+ }
+ }
+
+ DPAA2_BUF_ASSERT_TXREADY(buf);
+
+ return (0);
+
+fail_mem_alloc:
+ if (map_created) {
+ (void)bus_dmamap_destroy(buf->dmat, buf->dmap);
+ }
+fail_map_create:
+ return (error);
+}
diff --git a/sys/dev/dpaa2/dpaa2_buf.h b/sys/dev/dpaa2/dpaa2_buf.h
new file mode 100644
index 000000000000..853a4fa78d3a
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_buf.h
@@ -0,0 +1,173 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_BUF_H
+#define _DPAA2_BUF_H
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+
+#define DPAA2_RX_BUF_SIZE (MJUM9BYTES)
+
+struct dpaa2_buf {
+ bus_addr_t paddr;
+ caddr_t vaddr;
+ bus_dma_tag_t dmat;
+ bus_dmamap_t dmap;
+ bus_dma_segment_t seg;
+ int nseg;
+ struct mbuf *m;
+ struct dpaa2_buf *sgt;
+ void *opt;
+};
+
+#define DPAA2_BUF_INIT_TAGOPT(__buf, __tag, __opt) do { \
+ KASSERT((__buf) != NULL, ("%s: buf is NULL", __func__)); \
+ \
+ (__buf)->paddr = 0; \
+ (__buf)->vaddr = NULL; \
+ (__buf)->dmat = (__tag); \
+ (__buf)->dmap = NULL; \
+ (__buf)->seg.ds_addr = 0; \
+ (__buf)->seg.ds_len = 0; \
+ (__buf)->nseg = 0; \
+ (__buf)->m = NULL; \
+ (__buf)->sgt = NULL; \
+ (__buf)->opt = (__opt); \
+} while(0)
+#define DPAA2_BUF_INIT(__buf) DPAA2_BUF_INIT_TAGOPT((__buf), NULL, NULL)
+
+#if defined(INVARIANTS)
+/*
+ * TXPREP/TXREADY macros allow to verify whether Tx buffer is prepared to be
+ * seeded and/or ready to be used for transmission.
+ *
+ * NOTE: Any modification should be carefully analyzed and justified.
+ */
+#define DPAA2_BUF_ASSERT_TXPREP(__buf) do { \
+ struct dpaa2_buf *__sgt = (__buf)->sgt; \
+ KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__)); \
+ \
+ KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \
+ KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \
+ KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
+ KASSERT((__buf)->dmap == NULL, ("%s: DMA map set?", __func__)); \
+ KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
+ KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \
+ KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \
+ KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \
+ KASSERT((__buf)->opt != NULL, ("%s: no Tx ring?", __func__)); \
+ \
+ KASSERT((__sgt)->paddr == 0, ("%s: S/G paddr set?", __func__)); \
+ KASSERT((__sgt)->vaddr == NULL, ("%s: S/G vaddr set?", __func__)); \
+ KASSERT((__sgt)->dmat != NULL, ("%s: no S/G DMA tag?", __func__)); \
+ KASSERT((__sgt)->dmap == NULL, ("%s: S/G DMA map set?", __func__)); \
+ KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \
+ KASSERT((__sgt)->seg.ds_len == 0, ("%s: S/G mapped?", __func__)); \
+ KASSERT((__sgt)->nseg == 0, ("%s: S/G nseg > 0?", __func__)); \
+ KASSERT((__sgt)->m == NULL, ("%s: S/G mbuf set?", __func__)); \
+ KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \
+} while(0)
+#define DPAA2_BUF_ASSERT_TXREADY(__buf) do { \
+ struct dpaa2_buf *__sgt = (__buf)->sgt; \
+ KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__)); \
+ \
+ KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \
+ KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \
+ KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
+ KASSERT((__buf)->dmap != NULL, ("%s: no DMA map?", __func__)); \
+ KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
+ KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \
+ KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \
+ KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \
+ KASSERT((__buf)->opt != NULL, ("%s: no Tx ring?", __func__)); \
+ \
+ KASSERT((__sgt)->paddr == 0, ("%s: S/G paddr set?", __func__)); \
+ KASSERT((__sgt)->vaddr != NULL, ("%s: no S/G vaddr?", __func__)); \
+ KASSERT((__sgt)->dmat != NULL, ("%s: no S/G DMA tag?", __func__)); \
+ KASSERT((__sgt)->dmap != NULL, ("%s: no S/G DMA map?", __func__)); \
+ KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \
+ KASSERT((__sgt)->seg.ds_len == 0, ("%s: S/G mapped?", __func__)); \
+ KASSERT((__sgt)->nseg == 0, ("%s: S/G nseg > 0?", __func__)); \
+ KASSERT((__sgt)->m == NULL, ("%s: S/G mbuf set?", __func__)); \
+ KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \
+} while(0)
+#else /* !INVARIANTS */
+#define DPAA2_BUF_ASSERT_TXPREP(__buf) do { \
+} while(0)
+#define DPAA2_BUF_ASSERT_TXREADY(__buf) do { \
+} while(0)
+#endif /* INVARIANTS */
+
+#if defined(INVARIANTS)
+/*
+ * RXPREP/RXREADY macros allow to verify whether Rx buffer is prepared to be
+ * seeded and/or ready to be used for reception.
+ *
+ * NOTE: Any modification should be carefully analyzed and justified.
+ */
+#define DPAA2_BUF_ASSERT_RXPREP(__buf) do { \
+ KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \
+ KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \
+ KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
+ /* KASSERT((__buf)->dmap == NULL, ("%s: DMA map set?", __func__)); */ \
+ KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \
+ KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \
+ KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \
+ KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \
+ KASSERT((__buf)->sgt == NULL, ("%s: S/G table set?", __func__)); \
+ KASSERT((__buf)->opt != NULL, ("%s: no channel?", __func__)); \
+} while(0)
+#define DPAA2_BUF_ASSERT_RXREADY(__buf) do { \
+ KASSERT((__buf)->paddr != 0, ("%s: paddr not set?", __func__)); \
+ KASSERT((__buf)->vaddr != NULL, ("%s: vaddr not set?", __func__)); \
+ KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \
+ KASSERT((__buf)->dmap != NULL, ("%s: no DMA map?", __func__)); \
+ KASSERT((__buf)->seg.ds_addr != 0, ("%s: not mapped?", __func__)); \
+ KASSERT((__buf)->seg.ds_len != 0, ("%s: not mapped?", __func__)); \
+ KASSERT((__buf)->nseg == 1, ("%s: nseg != 1?", __func__)); \
+ KASSERT((__buf)->m != NULL, ("%s: no mbuf?", __func__)); \
+ KASSERT((__buf)->sgt == NULL, ("%s: S/G table set?", __func__)); \
+ KASSERT((__buf)->opt != NULL, ("%s: no channel?", __func__)); \
+} while(0)
+#else /* !INVARIANTS */
+#define DPAA2_BUF_ASSERT_RXPREP(__buf) do { \
+} while(0)
+#define DPAA2_BUF_ASSERT_RXREADY(__buf) do { \
+} while(0)
+#endif /* INVARIANTS */
+
+int dpaa2_buf_seed_pool(device_t, device_t, void *, uint32_t, int, struct mtx *);
+int dpaa2_buf_seed_rxb(device_t, struct dpaa2_buf *, int, struct mtx *);
+int dpaa2_buf_seed_txb(device_t, struct dpaa2_buf *);
+
+#endif /* _DPAA2_BUF_H */
diff --git a/sys/dev/dpaa2/dpaa2_channel.c b/sys/dev/dpaa2/dpaa2_channel.c
new file mode 100644
index 000000000000..87b76923a16d
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_channel.c
@@ -0,0 +1,557 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error).
+ *
+ * NOTE: Several WQs are organized into a single channel.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/mbuf.h>
+#include <sys/taskqueue.h>
+#include <sys/sysctl.h>
+#include <sys/buf_ring.h>
+#include <sys/smp.h>
+#include <sys/proc.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+#include <machine/vmparam.h>
+
+#include <net/ethernet.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_channel.h"
+#include "dpaa2_ni.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_mc_if.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_io.h"
+#include "dpaa2_con.h"
+#include "dpaa2_buf.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_bp.h"
+#include "dpaa2_cmd_if.h"
+
+MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel");
+
+#define RX_SEG_N (1u)
+#define RX_SEG_SZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
+#define RX_SEG_MAXSZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
+CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0);
+CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0);
+
+#define TX_SEG_N (16u) /* XXX-DSL: does DPAA2 limit exist? */
+#define TX_SEG_SZ (PAGE_SIZE)
+#define TX_SEG_MAXSZ (TX_SEG_N * TX_SEG_SZ)
+CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0);
+CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0);
+
+#define SGT_SEG_N (1u)
+#define SGT_SEG_SZ (PAGE_SIZE)
+#define SGT_SEG_MAXSZ (PAGE_SIZE)
+CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0);
+CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0);
+
+static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t);
+static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t,
+ int, bus_size_t);
+static void dpaa2_chan_bp_task(void *, int);
+
+/**
+ * @brief Сonfigures QBMan channel and registers data availability notifications.
+ */
+int
+dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
+ struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_io_softc *iosc = device_get_softc(iodev);
+ struct dpaa2_con_softc *consc = device_get_softc(condev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev);
+ struct dpaa2_devinfo *coninfo = device_get_ivars(condev);
+ struct dpaa2_con_notif_cfg notif_cfg;
+ struct dpaa2_io_notif_ctx *ctx;
+ struct dpaa2_channel *ch = NULL;
+ struct dpaa2_cmd cmd;
+ uint16_t rctk, contk;
+ int error;
+
+ DPAA2_CMD_INIT(&cmd);
+
+ error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n",
+ __func__, rcinfo->id, error);
+ goto fail_rc_open;
+ }
+ error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n",
+ __func__, coninfo->id, error);
+ goto fail_con_open;
+ }
+
+ error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, "
+ "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id);
+ goto fail_con_enable;
+ }
+
+ ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
+ if (ch == NULL) {
+ device_printf(dev, "%s: malloc() failed\n", __func__);
+ error = ENOMEM;
+ goto fail_malloc;
+ }
+
+ ch->ni_dev = dev;
+ ch->io_dev = iodev;
+ ch->con_dev = condev;
+ ch->id = consc->attr.chan_id;
+ ch->flowid = flowid;
+ ch->tx_frames = 0; /* for debug purposes */
+ ch->tx_dropped = 0; /* for debug purposes */
+ ch->store_sz = 0;
+ ch->store_idx = 0;
+ ch->recycled_n = 0;
+ ch->rxq_n = 0;
+
+ NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
+ NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
+
+ ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
+ taskqueue_thread_enqueue, &ch->cleanup_tq);
+ taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
+ &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
+
+ error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to setup DMA\n", __func__);
+ goto fail_dma_setup;
+ }
+
+ mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
+
+ ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
+ &ch->xmit_mtx);
+ if (ch->xmit_br == NULL) {
+ device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__);
+ error = ENOMEM;
+ goto fail_buf_ring;
+ }
+
+ DPAA2_BUF_INIT(&ch->store);
+
+ /* Register the new notification context */
+ ctx = &ch->ctx;
+ ctx->qman_ctx = (uint64_t)ctx;
+ ctx->cdan_en = true;
+ ctx->fq_chan_id = ch->id;
+ ctx->io_dev = ch->io_dev;
+ ctx->channel = ch;
+ error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
+ if (error) {
+ device_printf(dev, "%s: failed to register CDAN context\n",
+ __func__);
+ goto fail_dpcon_notif;
+ }
+
+ /* Register DPCON notification within Management Complex */
+ notif_cfg.dpio_id = ioinfo->id;
+ notif_cfg.prior = 0;
+ notif_cfg.qman_ctx = ctx->qman_ctx;
+ error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to register DPCON "
+ "notifications: dpcon_id=%d, chan_id=%d\n", __func__,
+ coninfo->id, consc->attr.chan_id);
+ goto fail_dpcon_notif;
+ }
+
+ /* Allocate initial # of Rx buffers and a channel storage */
+ error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
+ DPAA2_RX_BUF_SIZE, NULL);
+ if (error) {
+ device_printf(dev, "%s: failed to seed buffer pool\n",
+ __func__);
+ goto fail_dpcon_notif;
+ }
+ error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
+ BUS_DMA_NOWAIT, sc->buf_align);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to allocate channel storage\n",
+ __func__);
+ goto fail_dpcon_notif;
+ } else {
+ ch->store_sz = DPAA2_ETH_STORE_FRAMES;
+ }
+
+ /* Prepare queues for the channel */
+ error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
+ if (error) {
+ device_printf(dev, "%s: failed to prepare TxConf queue: "
+ "error=%d\n", __func__, error);
+ goto fail_fq_setup;
+ }
+ error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
+ if (error) {
+ device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n",
+ __func__, error);
+ goto fail_fq_setup;
+ }
+
+ if (bootverbose) {
+ device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, "
+ "priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
+ consc->attr.prior_num);
+ }
+
+ *channel = ch;
+
+ (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
+ (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
+
+ return (0);
+
+fail_fq_setup:
+ if (ch->store.vaddr != NULL) {
+ bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
+ }
+ if (ch->store.dmat != NULL) {
+ bus_dma_tag_destroy(ch->store.dmat);
+ }
+ ch->store.dmat = NULL;
+ ch->store.vaddr = NULL;
+ ch->store.paddr = 0;
+ ch->store.nseg = 0;
+fail_dpcon_notif:
+ buf_ring_free(ch->xmit_br, M_DEVBUF);
+fail_buf_ring:
+ mtx_destroy(&ch->xmit_mtx);
+fail_dma_setup:
+ /* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
+ /* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
+ /* } */
+ /* taskqueue_free(ch->cleanup_tq); */
+fail_malloc:
+ (void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
+fail_con_enable:
+ (void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
+fail_con_open:
+ (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
+fail_rc_open:
+ return (error);
+}
+
+/**
+ * @brief Performs an initial configuration of the frame queue.
+ */
+int
+dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
+ enum dpaa2_ni_queue_type queue_type)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_ni_fq *fq;
+
+ switch (queue_type) {
+ case DPAA2_NI_QUEUE_TX_CONF:
+ /* One queue per channel */
+ fq = &ch->txc_queue;
+ fq->chan = ch;
+ fq->flowid = ch->flowid;
+ fq->tc = 0; /* ignored */
+ fq->type = queue_type;
+ break;
+ case DPAA2_NI_QUEUE_RX:
+ KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS,
+ ("too many Rx traffic classes: rx_tcs=%d\n",
+ sc->attr.num.rx_tcs));
+
+ /* One queue per Rx traffic class within a channel */
+ for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
+ fq = &ch->rx_queues[i];
+ fq->chan = ch;
+ fq->flowid = ch->flowid;
+ fq->tc = (uint8_t) i;
+ fq->type = queue_type;
+
+ ch->rxq_n++;
+ }
+ break;
+ case DPAA2_NI_QUEUE_RX_ERR:
+ /* One queue per network interface */
+ fq = &sc->rxe_queue;
+ fq->chan = ch;
+ fq->flowid = 0; /* ignored */
+ fq->tc = 0; /* ignored */
+ fq->type = queue_type;
+ break;
+ default:
+ device_printf(dev, "%s: unexpected frame queue type: %d\n",
+ __func__, queue_type);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Obtain the next dequeue response from the channel storage.
+ */
+int
+dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
+{
+ struct dpaa2_buf *buf = &ch->store;
+ struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr;
+ struct dpaa2_dq *msg = &msgs[ch->store_idx];
+ int rc = EINPROGRESS;
+
+ ch->store_idx++;
+
+ if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
+ rc = EALREADY; /* VDQ command is expired */
+ ch->store_idx = 0;
+ if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) {
+ msg = NULL; /* Null response, FD is invalid */
+ }
+ }
+ if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) {
+ rc = ENOENT; /* FQ is empty */
+ ch->store_idx = 0;
+ }
+
+ if (dq != NULL) {
+ *dq = msg;
+ }
+
+ return (rc);
+}
+
+static int
+dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
+ bus_size_t alignment)
+{
+ int error;
+
+ mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ RX_SEG_MAXSZ, /* maxsize */
+ RX_SEG_N, /* nsegments */
+ RX_SEG_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &ch->rx_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create rx_dmat\n", __func__);
+ goto fail_rx_tag;
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ TX_SEG_MAXSZ, /* maxsize */
+ TX_SEG_N, /* nsegments */
+ TX_SEG_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &ch->tx_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create tx_dmat\n", __func__);
+ goto fail_tx_tag;
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ SGT_SEG_MAXSZ, /* maxsize */
+ SGT_SEG_N, /* nsegments */
+ SGT_SEG_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &ch->sgt_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create sgt_dmat\n", __func__);
+ goto fail_sgt_tag;
+ }
+
+ return (0);
+
+fail_sgt_tag:
+ bus_dma_tag_destroy(ch->tx_dmat);
+fail_tx_tag:
+ bus_dma_tag_destroy(ch->rx_dmat);
+fail_rx_tag:
+ mtx_destroy(&ch->dma_mtx);
+ ch->rx_dmat = NULL;
+ ch->tx_dmat = NULL;
+ ch->sgt_dmat = NULL;
+
+ return (error);
+}
+
+/**
+ * @brief Allocate a DMA-mapped storage to keep responses from VDQ command.
+ */
+static int
+dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
+ int mapflags, bus_size_t alignment)
+{
+ struct dpaa2_buf *buf = &ch->store;
+ uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
+ int error;
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ maxsize, /* maxsize */
+ 1, /* nsegments */
+ maxsize, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &buf->dmat);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to create DMA tag\n", __func__);
+ goto fail_tag;
+ }
+
+ error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to allocate storage memory\n",
+ __func__);
+ goto fail_map_create;
+ }
+
+ buf->paddr = 0;
+ error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size,
+ dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to map storage memory\n",
+ __func__);
+ goto fail_map_load;
+ }
+
+ bus_dmamap_sync(buf->dmat, buf->dmap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ buf->nseg = 1;
+
+ return (0);
+
+fail_map_load:
+ bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap);
+fail_map_create:
+ bus_dma_tag_destroy(buf->dmat);
+fail_tag:
+ buf->dmat = NULL;
+ buf->vaddr = NULL;
+ buf->paddr = 0;
+ buf->nseg = 0;
+
+ return (error);
+}
+
+/**
+ * @brief Release new buffers to the buffer pool if necessary.
+ */
+static void
+dpaa2_chan_bp_task(void *arg, int count)
+{
+ struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
+ struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
+ struct dpaa2_bp_softc *bpsc;
+ struct dpaa2_bp_conf bpconf;
+ const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
+ device_t bpdev;
+ int error;
+
+ /* There's only one buffer pool for now */
+ bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
+ bpsc = device_get_softc(bpdev);
+
+ /* Get state of the buffer pool */
+ error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf);
+ if (error) {
+ device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: "
+ "error=%d\n", __func__, error);
+ return;
+ }
+
+ /* Double allocated Rx buffers if amount of free buffers is < 25% */
+ if (bpconf.free_bufn < (buf_num >> 2)) {
+ mtx_assert(&ch->dma_mtx, MA_NOTOWNED);
+ mtx_lock(&ch->dma_mtx);
+ (void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num,
+ DPAA2_RX_BUF_SIZE, &ch->dma_mtx);
+ mtx_unlock(&ch->dma_mtx);
+
+ DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn);
+ }
+}
diff --git a/sys/dev/dpaa2/dpaa2_channel.h b/sys/dev/dpaa2/dpaa2_channel.h
new file mode 100644
index 000000000000..67b0285b7db6
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_channel.h
@@ -0,0 +1,95 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_CHANNEL_H
+#define _DPAA2_CHANNEL_H
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/buf_ring.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_io.h"
+#include "dpaa2_ni.h"
+
+#define DPAA2_TX_BUFRING_SZ (4096u)
+
+/**
+ * @brief QBMan channel to process ingress traffic.
+ *
+ * NOTE: Several WQs are organized into a single channel.
+ */
+struct dpaa2_channel {
+ device_t ni_dev;
+ device_t io_dev;
+ device_t con_dev;
+ uint16_t id;
+ uint16_t flowid;
+
+ uint64_t tx_frames;
+ uint64_t tx_dropped;
+
+ struct mtx dma_mtx;
+ bus_dma_tag_t rx_dmat;
+ bus_dma_tag_t tx_dmat;
+ bus_dma_tag_t sgt_dmat;
+
+ struct dpaa2_io_notif_ctx ctx; /* to configure CDANs */
+
+ struct dpaa2_buf store; /* to keep VDQ responses */
+ uint32_t store_sz; /* in frames */
+ uint32_t store_idx; /* frame index */
+
+ uint32_t recycled_n;
+ struct dpaa2_buf *recycled[DPAA2_SWP_BUFS_PER_CMD];
+
+ uint32_t rxq_n;
+ struct dpaa2_ni_fq rx_queues[DPAA2_MAX_TCS];
+ struct dpaa2_ni_fq txc_queue;
+
+ struct taskqueue *cleanup_tq;
+ struct task cleanup_task;
+ struct task bp_task;
+
+ struct mtx xmit_mtx;
+ struct buf_ring *xmit_br;
+} __aligned(CACHE_LINE_SIZE);
+
+int dpaa2_chan_setup(device_t, device_t, device_t, device_t,
+ struct dpaa2_channel **, uint32_t, task_fn_t);
+int dpaa2_chan_setup_fq(device_t, struct dpaa2_channel *,
+ enum dpaa2_ni_queue_type);
+int dpaa2_chan_next_frame(struct dpaa2_channel *, struct dpaa2_dq **);
+
+#endif /* _DPAA2_CHANNEL_H */
diff --git a/sys/dev/dpaa2/dpaa2_io.c b/sys/dev/dpaa2/dpaa2_io.c
index 069dd00e849e..ef82a726ad82 100644
--- a/sys/dev/dpaa2/dpaa2_io.c
+++ b/sys/dev/dpaa2/dpaa2_io.c
@@ -38,6 +38,8 @@
* using QBMan.
*/
+#include "opt_rss.h"
+
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
@@ -58,6 +60,10 @@
#include <dev/pci/pcivar.h>
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
#include "pcib_if.h"
#include "pci_if.h"
@@ -68,6 +74,7 @@
#include "dpaa2_cmd_if.h"
#include "dpaa2_io.h"
#include "dpaa2_ni.h"
+#include "dpaa2_channel.h"
#define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */
#define DPIO_POLL_MAX 32
@@ -436,8 +443,14 @@ dpaa2_io_setup_irqs(device_t dev)
return (ENXIO);
}
- /* Wrap DPIO ID around number of CPUs. */
- bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus);
+ /* Wrap DPIO ID around number of CPUs/RSS buckets */
+#ifdef RSS
+ sc->cpu = rss_getcpu(sc->attr.id % rss_getnumbuckets());
+#else
+ sc->cpu = sc->attr.id % mp_ncpus;
+#endif
+ CPU_SETOF(sc->cpu, &sc->cpu_mask);
+ bus_bind_intr(dev, sc->irq_resource, sc->cpu);
/*
* Setup and enable Static Dequeue Command to receive CDANs from
@@ -519,7 +532,9 @@ static void
dpaa2_io_intr(void *arg)
{
struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg;
+ /* struct dpaa2_ni_softc *nisc = NULL; */
struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX];
+ struct dpaa2_channel *chan;
struct dpaa2_dq dq;
uint32_t idx, status;
uint16_t flags;
@@ -554,7 +569,9 @@ dpaa2_io_intr(void *arg)
DPAA2_SWP_UNLOCK(sc->swp);
for (int i = 0; i < cdan_n; i++) {
- ctx[i]->poll(ctx[i]->channel);
+ chan = (struct dpaa2_channel *)ctx[i]->channel;
+ /* nisc = device_get_softc(chan->ni_dev); */
+ taskqueue_enqueue(chan->cleanup_tq, &chan->cleanup_task);
}
/* Enable software portal interrupts back */
diff --git a/sys/dev/dpaa2/dpaa2_io.h b/sys/dev/dpaa2/dpaa2_io.h
index 13def050fffb..cc3156fcc595 100644
--- a/sys/dev/dpaa2/dpaa2_io.h
+++ b/sys/dev/dpaa2/dpaa2_io.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2021-2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,6 +34,7 @@
#include "dpaa2_types.h"
#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
/* Maximum resources per DPIO: 3 SYS_MEM + 1 DPMCP. */
#define DPAA2_IO_MAX_RESOURCES 4
@@ -74,8 +75,6 @@ struct dpaa2_io_attr {
* (CDAN) on a particular WQ channel.
*/
struct dpaa2_io_notif_ctx {
- void (*poll)(void *);
-
device_t io_dev;
void *channel;
uint64_t qman_ctx;
@@ -98,6 +97,9 @@ struct dpaa2_io_softc {
int irq_rid[DPAA2_IO_MSI_COUNT];
struct resource *irq_resource;
void *intr; /* interrupt handle */
+
+ int cpu;
+ cpuset_t cpu_mask;
};
extern struct resource_spec dpaa2_io_spec[];
diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c
index 10f142b0b97d..0deebf7e8b24 100644
--- a/sys/dev/dpaa2/dpaa2_mc.c
+++ b/sys/dev/dpaa2/dpaa2_mc.c
@@ -73,8 +73,6 @@
#define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
#define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
-#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0)
-
#define IORT_DEVICE_NAME "MCE"
/* MC Registers */
@@ -672,68 +670,6 @@ dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
}
/**
- * @brief Convert DPAA2 device type to string.
- */
-const char *
-dpaa2_ttos(enum dpaa2_dev_type type)
-{
- switch (type) {
- case DPAA2_DEV_MC:
- return ("mc"); /* NOTE: to print as information only. */
- case DPAA2_DEV_RC:
- return ("dprc");
- case DPAA2_DEV_IO:
- return ("dpio");
- case DPAA2_DEV_NI:
- return ("dpni");
- case DPAA2_DEV_MCP:
- return ("dpmcp");
- case DPAA2_DEV_BP:
- return ("dpbp");
- case DPAA2_DEV_CON:
- return ("dpcon");
- case DPAA2_DEV_MAC:
- return ("dpmac");
- case DPAA2_DEV_MUX:
- return ("dpdmux");
- case DPAA2_DEV_SW:
- return ("dpsw");
- default:
- break;
- }
- return ("notype");
-}
-
-/**
- * @brief Convert string to DPAA2 device type.
- */
-enum dpaa2_dev_type
-dpaa2_stot(const char *str)
-{
- if (COMPARE_TYPE(str, "dprc")) {
- return (DPAA2_DEV_RC);
- } else if (COMPARE_TYPE(str, "dpio")) {
- return (DPAA2_DEV_IO);
- } else if (COMPARE_TYPE(str, "dpni")) {
- return (DPAA2_DEV_NI);
- } else if (COMPARE_TYPE(str, "dpmcp")) {
- return (DPAA2_DEV_MCP);
- } else if (COMPARE_TYPE(str, "dpbp")) {
- return (DPAA2_DEV_BP);
- } else if (COMPARE_TYPE(str, "dpcon")) {
- return (DPAA2_DEV_CON);
- } else if (COMPARE_TYPE(str, "dpmac")) {
- return (DPAA2_DEV_MAC);
- } else if (COMPARE_TYPE(str, "dpdmux")) {
- return (DPAA2_DEV_MUX);
- } else if (COMPARE_TYPE(str, "dpsw")) {
- return (DPAA2_DEV_SW);
- }
-
- return (DPAA2_DEV_NOTYPE);
-}
-
-/**
* @internal
*/
static u_int
diff --git a/sys/dev/dpaa2/dpaa2_mcp.c b/sys/dev/dpaa2/dpaa2_mcp.c
index bebd7deced7e..1ddf0e33d8c9 100644
--- a/sys/dev/dpaa2/dpaa2_mcp.c
+++ b/sys/dev/dpaa2/dpaa2_mcp.c
@@ -40,7 +40,6 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/systm.h>
-#include <sys/condvar.h>
#include <sys/lock.h>
#include <machine/bus.h>
diff --git a/sys/dev/dpaa2/dpaa2_mcp.h b/sys/dev/dpaa2/dpaa2_mcp.h
index 5e1926308b53..8a9f942c6f71 100644
--- a/sys/dev/dpaa2/dpaa2_mcp.h
+++ b/sys/dev/dpaa2/dpaa2_mcp.h
@@ -29,7 +29,6 @@
#define _DPAA2_MCP_H
#include <sys/rman.h>
-#include <sys/condvar.h>
#include <sys/mutex.h>
#include "dpaa2_types.h"
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index b0d7441a55e0..7cb472f45ee4 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -94,6 +94,8 @@
#include "dpaa2_swp_if.h"
#include "dpaa2_cmd_if.h"
#include "dpaa2_ni.h"
+#include "dpaa2_channel.h"
+#include "dpaa2_buf.h"
#define BIT(x) (1ul << (x))
#define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -115,18 +117,20 @@
mtx_unlock(&(__sc)->lock); \
} while (0)
-#define TX_LOCK(__tx) do { \
- mtx_assert(&(__tx)->lock, MA_NOTOWNED); \
- mtx_lock(&(__tx)->lock); \
-} while (0)
-#define TX_UNLOCK(__tx) do { \
- mtx_assert(&(__tx)->lock, MA_OWNED); \
- mtx_unlock(&(__tx)->lock); \
-} while (0)
-
-#define DPAA2_TX_RING(sc, chan, tc) \
+#define DPAA2_TX_RING(sc, chan, tc) \
(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
+MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
+
+/*
+ * How many times channel cleanup routine will be repeated if the RX or TX
+ * budget was depleted.
+ */
+#define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */
+/* TX/RX budget for the channel cleanup task */
+#define DPAA2_TX_BUDGET 128 /* sysctl(9)? */
+#define DPAA2_RX_BUDGET 256 /* sysctl(9)? */
+
#define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */
#define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
#define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
@@ -144,24 +148,20 @@
#define BUF_SWA_SIZE 64 /* SW annotation size */
#define BUF_RX_HWA_SIZE 64 /* HW annotation size */
#define BUF_TX_HWA_SIZE 128 /* HW annotation size */
-#define BUF_SIZE (MJUM9BYTES)
-#define DPAA2_TX_BUFRING_SZ (4096u)
+#define DPAA2_RX_BUFRING_SZ (4096u)
+#define DPAA2_RXE_BUFRING_SZ (1024u)
+#define DPAA2_TXC_BUFRING_SZ (4096u)
#define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
-#define DPAA2_TX_SEG_SZ (4096u)
+#define DPAA2_TX_SEG_SZ (PAGE_SIZE)
#define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
#define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */
/* Size of a buffer to keep a QoS table key configuration. */
-#define ETH_QOS_KCFG_BUF_SIZE 256
+#define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
-#define DPAA2_CLASSIFIER_DMA_SIZE 256
-
-/* Channel storage buffer configuration. */
-#define ETH_STORE_FRAMES 16u
-#define ETH_STORE_SIZE ((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
-#define ETH_STORE_ALIGN 64u
+#define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
/* Buffers layout options. */
#define BUF_LOPT_TIMESTAMP 0x1
@@ -221,7 +221,12 @@
MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
-/* DPAA2 Network Interface resource specification. */
+/*
+ * DPAA2 Network Interface resource specification.
+ *
+ * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
+ * the specification!
+ */
struct resource_spec dpaa2_ni_spec[] = {
/*
* DPMCP resources.
@@ -229,11 +234,7 @@ struct resource_spec dpaa2_ni_spec[] = {
* NOTE: MC command portals (MCPs) are used to send commands to, and
* receive responses from, the MC firmware. One portal per DPNI.
*/
-#define MCP_RES_NUM (1u)
-#define MCP_RID_OFF (0u)
-#define MCP_RID(rid) ((rid) + MCP_RID_OFF)
- /* --- */
- { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
/*
* DPIO resources (software portals).
*
@@ -241,36 +242,28 @@ struct resource_spec dpaa2_ni_spec[] = {
* availability interrupts, the DPCONs are used to identify the
* network interface that has produced ingress data to that core.
*/
-#define IO_RES_NUM (16u)
-#define IO_RID_OFF (MCP_RID_OFF + MCP_RES_NUM)
-#define IO_RID(rid) ((rid) + IO_RID_OFF)
- /* --- */
- { DPAA2_DEV_IO, IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
- { DPAA2_DEV_IO, IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
- { DPAA2_DEV_IO, IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
/*
* DPBP resources (buffer pools).
*
* NOTE: One per network interface.
*/
-#define BP_RES_NUM (1u)
-#define BP_RID_OFF (IO_RID_OFF + IO_RES_NUM)
-#define BP_RID(rid) ((rid) + BP_RID_OFF)
- /* --- */
- { DPAA2_DEV_BP, BP_RID(0), RF_ACTIVE },
+ { DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE },
/*
* DPCON resources (channels).
*
@@ -280,27 +273,23 @@ struct resource_spec dpaa2_ni_spec[] = {
* different network interfaces arriving on the same core, the
* DPCONs must be private to the DPNIs.
*/
-#define CON_RES_NUM (16u)
-#define CON_RID_OFF (BP_RID_OFF + BP_RES_NUM)
-#define CON_RID(rid) ((rid) + CON_RID_OFF)
- /* --- */
- { DPAA2_DEV_CON, CON_RID(0), RF_ACTIVE },
- { DPAA2_DEV_CON, CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
- { DPAA2_DEV_CON, CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
- /* --- */
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
+
RESOURCE_SPEC_END
};
@@ -391,6 +380,13 @@ static struct dpni_stat {
"depletion in DPNI buffer pools" },
};
+struct dpaa2_ni_rx_ctx {
+ struct mbuf *head;
+ struct mbuf *tail;
+ int cnt;
+ bool last;
+};
+
/* Device interface */
static int dpaa2_ni_probe(device_t);
static int dpaa2_ni_attach(device_t);
@@ -399,8 +395,6 @@ static int dpaa2_ni_detach(device_t);
/* DPAA2 network interface setup and configuration */
static int dpaa2_ni_setup(device_t);
static int dpaa2_ni_setup_channels(device_t);
-static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
- enum dpaa2_ni_queue_type);
static int dpaa2_ni_bind(device_t);
static int dpaa2_ni_setup_rx_dist(device_t);
static int dpaa2_ni_setup_irqs(device_t);
@@ -423,13 +417,6 @@ static int dpaa2_ni_set_mac_addr(device_t);
static int dpaa2_ni_set_hash(device_t, uint64_t);
static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
-/* Buffers and buffer pools */
-static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
-static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
-static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
-static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
- struct dpaa2_ni_channel *);
-
/* Frame descriptor routines */
static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
@@ -442,8 +429,6 @@ static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
/* Various subroutines */
static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
-static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
- struct dpaa2_dq **);
/* Network interface routines */
static void dpaa2_ni_init(void *);
@@ -462,23 +447,21 @@ static int dpaa2_ni_media_change(if_t );
static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
static void dpaa2_ni_media_tick(void *);
-/* DMA mapping callback */
-static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
-
/* Tx/Rx routines. */
-static void dpaa2_ni_poll(void *);
-static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
+static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
+static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
+static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
struct dpaa2_ni_tx_ring *, struct mbuf *);
-static void dpaa2_ni_bp_task(void *, int);
+static void dpaa2_ni_cleanup_task(void *, int);
/* Tx/Rx subroutines */
-static int dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
- struct dpaa2_ni_fq **, uint32_t *);
-static int dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
- struct dpaa2_fd *);
-static int dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
+static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
+ uint32_t *);
+static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
+ struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
+static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *);
-static int dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
+static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *);
/* sysctl(9) */
@@ -531,20 +514,16 @@ dpaa2_ni_attach(device_t dev)
DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
- sc->bp_dmat = NULL;
- sc->st_dmat = NULL;
sc->rxd_dmat = NULL;
sc->qos_dmat = NULL;
- sc->qos_kcfg.type = DPAA2_BUF_STORE;
- sc->qos_kcfg.store.dmap = NULL;
- sc->qos_kcfg.store.paddr = 0;
- sc->qos_kcfg.store.vaddr = NULL;
+ sc->qos_kcfg.dmap = NULL;
+ sc->qos_kcfg.paddr = 0;
+ sc->qos_kcfg.vaddr = NULL;
- sc->rxd_kcfg.type = DPAA2_BUF_STORE;
- sc->rxd_kcfg.store.dmap = NULL;
- sc->rxd_kcfg.store.paddr = 0;
- sc->rxd_kcfg.store.vaddr = NULL;
+ sc->rxd_kcfg.dmap = NULL;
+ sc->rxd_kcfg.paddr = 0;
+ sc->rxd_kcfg.vaddr = NULL;
sc->mac.dpmac_id = 0;
sc->mac.phy_dev = NULL;
@@ -558,7 +537,7 @@ dpaa2_ni_attach(device_t dev)
}
/* Obtain MC portal. */
- mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
+ mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
mcp_dinfo = device_get_ivars(mcp_dev);
dinfo->portal = mcp_dinfo->portal;
@@ -600,16 +579,15 @@ dpaa2_ni_attach(device_t dev)
goto close_rc;
}
+ bzero(tq_name, sizeof(tq_name));
+ snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
+
/*
* XXX-DSL: Release new buffers on Buffer Pool State Change Notification
* (BPSCN) returned as a result to the VDQ command instead.
* It is similar to CDAN processed in dpaa2_io_intr().
*/
/* Create a taskqueue thread to release new buffers to the pool. */
- TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
- bzero(tq_name, sizeof (tq_name));
- snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
- device_get_nameunit(dev));
sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
taskqueue_thread_enqueue, &sc->bp_taskq);
if (sc->bp_taskq == NULL) {
@@ -619,6 +597,11 @@ dpaa2_ni_attach(device_t dev)
}
taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
+ /* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
+ /* taskqueue_thread_enqueue, &sc->cleanup_taskq); */
+ /* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
+ /* "dpaa2_ch cleanup"); */
+
error = dpaa2_ni_setup(dev);
if (error) {
device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
@@ -999,212 +982,52 @@ err_exit:
static int
dpaa2_ni_setup_channels(device_t dev)
{
- device_t pdev = device_get_parent(dev);
- device_t child = dev;
- device_t io_dev, con_dev;
+ device_t iodev, condev, bpdev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
- struct dpaa2_ni_channel *channel;
- struct dpaa2_con_softc *consc;
- struct dpaa2_con_notif_cfg notif_cfg;
- struct dpaa2_devinfo *rc_info = device_get_ivars(pdev);
- struct dpaa2_devinfo *io_info;
- struct dpaa2_devinfo *con_info;
- struct dpaa2_io_notif_ctx *ctx;
- struct dpaa2_buf *buf;
- struct dpaa2_cmd cmd;
- struct sysctl_ctx_list *sysctl_ctx;
- struct sysctl_oid *node;
- struct sysctl_oid_list *parent;
uint32_t i, num_chan;
- uint16_t rc_token, con_token;
int error;
- /* Calculate number of the channels based on the allocated resources. */
- for (i = 0; i < IO_RES_NUM; i++) {
- if (!sc->res[IO_RID(i)]) {
+ /* Calculate number of the channels based on the allocated resources */
+ for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
+ if (!sc->res[DPAA2_NI_IO_RID(i)]) {
break;
}
}
num_chan = i;
- for (i = 0; i < CON_RES_NUM; i++) {
- if (!sc->res[CON_RID(i)]) {
+ for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
+ if (!sc->res[DPAA2_NI_CON_RID(i)]) {
break;
}
}
num_chan = i < num_chan ? i : num_chan;
- sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
- ? DPAA2_NI_MAX_CHANNELS : num_chan;
+ sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
+ ? DPAA2_MAX_CHANNELS : num_chan;
sc->chan_n = sc->chan_n > sc->attr.num.queues
? sc->attr.num.queues : sc->chan_n;
- device_printf(dev, "channels=%d\n", sc->chan_n);
-
- sysctl_ctx = device_get_sysctl_ctx(sc->dev);
- parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
- node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
- CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
- parent = SYSCTL_CHILDREN(node);
-
- /* Setup channels for the portal. */
- for (uint32_t i = 0; i < sc->chan_n; i++) {
- io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
- io_info = device_get_ivars(io_dev);
-
- con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
- consc = device_get_softc(con_dev);
- con_info = device_get_ivars(con_dev);
-
- DPAA2_CMD_INIT(&cmd);
+ KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
+ "chan_n=%d", __func__, sc->chan_n));
- error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id,
- &rc_token);
- if (error) {
- device_printf(dev, "%s: failed to open resource "
- "container: id=%d, error=%d\n", __func__,
- rc_info->id, error);
- return (error);
- }
- error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id,
- &con_token);
- if (error) {
- device_printf(dev, "%s: failed to open DPCON: id=%d, "
- "error=%d\n", __func__, con_info->id, error);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
-
- error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
- if (error) {
- device_printf(dev, "%s: failed to enable channel: "
- "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
- consc->attr.chan_id);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
-
- channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
- M_WAITOK | M_ZERO);
- if (!channel) {
- device_printf(dev, "%s: failed to allocate a channel\n",
- __func__);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (ENOMEM);
- }
-
- sc->channels[i] = channel;
-
- channel->id = consc->attr.chan_id;
- channel->flowid = i;
- channel->ni_dev = dev;
- channel->io_dev = io_dev;
- channel->con_dev = con_dev;
- channel->recycled_n = 0;
- channel->tx_frames = 0; /* for debug purposes */
- channel->tx_dropped = 0; /* for debug purposes */
- channel->rxq_n = 0;
-
- buf = &channel->store;
- buf->type = DPAA2_BUF_STORE;
- buf->store.dmat = NULL;
- buf->store.dmap = NULL;
- buf->store.paddr = 0;
- buf->store.vaddr = NULL;
-
- /* Setup WQ channel notification context. */
- ctx = &channel->ctx;
- ctx->qman_ctx = (uint64_t) ctx;
- ctx->cdan_en = true;
- ctx->fq_chan_id = channel->id;
- ctx->io_dev = channel->io_dev;
- ctx->channel = channel;
- ctx->poll = dpaa2_ni_poll;
-
- /* Register the new notification context. */
- error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
- if (error) {
- device_printf(dev, "%s: failed to register notification "
- "context\n", __func__);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
-
- /* Register DPCON notification with Management Complex. */
- notif_cfg.dpio_id = io_info->id;
- notif_cfg.prior = 0;
- notif_cfg.qman_ctx = ctx->qman_ctx;
- error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
- if (error) {
- device_printf(dev, "%s: failed to set DPCON "
- "notification: dpcon_id=%d, chan_id=%d\n", __func__,
- con_info->id, consc->attr.chan_id);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
+ device_printf(dev, "channels=%d\n", sc->chan_n);
- /* Allocate initial # of Rx buffers and a channel storage. */
- error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
- if (error) {
- device_printf(dev, "%s: failed to seed buffer pool\n",
- __func__);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
- error = dpaa2_ni_seed_chan_storage(sc, channel);
- if (error) {
- device_printf(dev, "%s: failed to seed channel "
- "storage\n", __func__);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
+ for (i = 0; i < sc->chan_n; i++) {
+ iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
+ condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
+ /* Only one buffer pool available at the moment */
+ bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
- /* Prepare queues for this channel. */
- error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
- if (error) {
- device_printf(dev, "%s: failed to prepare TxConf "
- "queue: error=%d\n", __func__, error);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
- return (error);
- }
- error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
- if (error) {
- device_printf(dev, "%s: failed to prepare Rx queue: "
- "error=%d\n", __func__, error);
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
+ error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
+ &sc->channels[i], i, dpaa2_ni_cleanup_task);
+ if (error != 0) {
+ device_printf(dev, "%s: dpaa2_chan_setup() failed: "
+ "error=%d, chan_id=%d\n", __func__, error, i);
return (error);
}
-
- if (bootverbose) {
- device_printf(dev, "channel: dpio_id=%d "
- "dpcon_id=%d chan_id=%d, priorities=%d\n",
- io_info->id, con_info->id, channel->id,
- consc->attr.prior_num);
- }
-
- (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
- (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
- rc_token));
}
- /* There is exactly one Rx error queue per DPNI. */
- error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
- if (error) {
+ /* There is exactly one Rx error queue per network interface */
+ error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
+ if (error != 0) {
device_printf(dev, "%s: failed to prepare RxError queue: "
"error=%d\n", __func__, error);
return (error);
@@ -1214,65 +1037,6 @@ dpaa2_ni_setup_channels(device_t dev)
}
/**
- * @brief Performs an initial configuration of the frame queues.
- */
-static int
-dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
- enum dpaa2_ni_queue_type queue_type)
-{
- struct dpaa2_ni_softc *sc = device_get_softc(dev);
- struct dpaa2_ni_fq *fq;
-
- switch (queue_type) {
- case DPAA2_NI_QUEUE_TX_CONF:
- /* One queue per channel. */
- fq = &chan->txc_queue;
-
- fq->consume = dpaa2_ni_tx_conf;
- fq->chan = chan;
- fq->flowid = chan->flowid;
- fq->tc = 0; /* ignored */
- fq->type = queue_type;
-
- break;
- case DPAA2_NI_QUEUE_RX:
- KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
- ("too many Rx traffic classes: rx_tcs=%d\n",
- sc->attr.num.rx_tcs));
-
- /* One queue per Rx traffic class within a channel. */
- for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
- fq = &chan->rx_queues[i];
-
- fq->consume = dpaa2_ni_rx;
- fq->chan = chan;
- fq->flowid = chan->flowid;
- fq->tc = (uint8_t) i;
- fq->type = queue_type;
-
- chan->rxq_n++;
- }
- break;
- case DPAA2_NI_QUEUE_RX_ERR:
- /* One queue per network interface. */
- fq = &sc->rxe_queue;
-
- fq->consume = dpaa2_ni_rx_err;
- fq->chan = chan;
- fq->flowid = 0; /* ignored */
- fq->tc = 0; /* ignored */
- fq->type = queue_type;
- break;
- default:
- device_printf(dev, "%s: unexpected frame queue type: %d\n",
- __func__, queue_type);
- return (EINVAL);
- }
-
- return (0);
-}
-
-/**
* @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
*/
static int
@@ -1288,7 +1052,7 @@ dpaa2_ni_bind(device_t dev)
struct dpaa2_cmd cmd;
struct dpaa2_ni_pools_cfg pools_cfg;
struct dpaa2_ni_err_cfg err_cfg;
- struct dpaa2_ni_channel *chan;
+ struct dpaa2_channel *chan;
uint16_t rc_token, ni_token;
int error;
@@ -1308,7 +1072,7 @@ dpaa2_ni_bind(device_t dev)
}
/* Select buffer pool (only one available at the moment). */
- bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
+ bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bp_info = device_get_ivars(bp_dev);
/* Configure buffers pool. */
@@ -1499,6 +1263,7 @@ dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_channel *ch = fq->chan;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
@@ -1528,7 +1293,7 @@ dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
/* Obtain DPCON associated with the FQ's channel. */
con_info = device_get_ivars(fq->chan->con_dev);
- KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
+ KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
sc->attr.num.tx_tcs));
KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
@@ -1564,29 +1329,40 @@ dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
/* Allocate Tx ring buffer. */
- tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
- M_NOWAIT, &tx->lock);
- if (tx->idx_br == NULL) {
+ tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
+ &tx->lock);
+ if (tx->br == NULL) {
device_printf(dev, "%s: failed to setup Tx ring buffer"
" (2) fqid=%d\n", __func__, tx->fqid);
goto close_ni;
}
- /* Configure Tx buffers. */
+ /* Configure Tx buffers */
for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
- buf = &tx->buf[j];
- buf->type = DPAA2_BUF_TX;
- buf->tx.dmat = buf->tx.sgt_dmat = NULL;
- buf->tx.dmap = buf->tx.sgt_dmap = NULL;
- buf->tx.paddr = buf->tx.sgt_paddr = 0;
- buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
- buf->tx.m = NULL;
- buf->tx.idx = j;
-
- error = dpaa2_ni_seed_txbuf(sc, buf);
-
- /* Add index of the Tx buffer to the ring. */
- buf_ring_enqueue(tx->idx_br, (void *) j);
+ buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
+ M_WAITOK);
+ if (buf == NULL) {
+ device_printf(dev, "%s: malloc() failed (buf)\n",
+ __func__);
+ return (ENOMEM);
+ }
+ /* Keep DMA tag and Tx ring linked to the buffer */
+ DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
+
+ buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
+ M_WAITOK);
+ if (buf->sgt == NULL) {
+ device_printf(dev, "%s: malloc() failed (sgt)\n",
+ __func__);
+ return (ENOMEM);
+ }
+ /* Link SGT to DMA tag and back to its Tx buffer */
+ DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
+
+ error = dpaa2_buf_seed_txb(dev, buf);
+
+ /* Add Tx buffer to the ring */
+ buf_ring_enqueue(tx->br, buf);
}
tx_rings_n++;
@@ -2021,58 +1797,6 @@ dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
("unexpected buffer alignment: %d\n", sc->buf_align));
- /* DMA tag to allocate buffers for Rx buffer pool. */
- error = bus_dma_tag_create(
- bus_get_dma_tag(dev),
- sc->buf_align, 0, /* alignment, boundary */
- BUS_SPACE_MAXADDR, /* low restricted addr */
- BUS_SPACE_MAXADDR, /* high restricted addr */
- NULL, NULL, /* filter, filterarg */
- BUF_SIZE, 1, /* maxsize, nsegments */
- BUF_SIZE, 0, /* maxsegsize, flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->bp_dmat);
- if (error) {
- device_printf(dev, "%s: failed to create DMA tag for buffer "
- "pool\n", __func__);
- return (error);
- }
-
- /* DMA tag to map Tx mbufs. */
- error = bus_dma_tag_create(
- bus_get_dma_tag(dev),
- sc->buf_align, 0, /* alignment, boundary */
- BUS_SPACE_MAXADDR, /* low restricted addr */
- BUS_SPACE_MAXADDR, /* high restricted addr */
- NULL, NULL, /* filter, filterarg */
- DPAA2_TX_SEGS_MAXSZ, /* maxsize */
- DPAA2_TX_SEGLIMIT, /* nsegments */
- DPAA2_TX_SEG_SZ, 0, /* maxsegsize, flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->tx_dmat);
- if (error) {
- device_printf(dev, "%s: failed to create DMA tag for Tx "
- "buffers\n", __func__);
- return (error);
- }
-
- /* DMA tag to allocate channel storage. */
- error = bus_dma_tag_create(
- bus_get_dma_tag(dev),
- ETH_STORE_ALIGN, 0, /* alignment, boundary */
- BUS_SPACE_MAXADDR, /* low restricted addr */
- BUS_SPACE_MAXADDR, /* high restricted addr */
- NULL, NULL, /* filter, filterarg */
- ETH_STORE_SIZE, 1, /* maxsize, nsegments */
- ETH_STORE_SIZE, 0, /* maxsegsize, flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->st_dmat);
- if (error) {
- device_printf(dev, "%s: failed to create DMA tag for channel "
- "storage\n", __func__);
- return (error);
- }
-
/* DMA tag for Rx distribution key. */
error = bus_dma_tag_create(
bus_get_dma_tag(dev),
@@ -2106,22 +1830,6 @@ dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
return (error);
}
- error = bus_dma_tag_create(
- bus_get_dma_tag(dev),
- PAGE_SIZE, 0, /* alignment, boundary */
- BUS_SPACE_MAXADDR, /* low restricted addr */
- BUS_SPACE_MAXADDR, /* high restricted addr */
- NULL, NULL, /* filter, filterarg */
- DPAA2_TX_SGT_SZ, 1, /* maxsize, nsegments */
- DPAA2_TX_SGT_SZ, 0, /* maxsegsize, flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc->sgt_dmat);
- if (error) {
- device_printf(dev, "%s: failed to create DMA tag for S/G "
- "tables\n", __func__);
- return (error);
- }
-
return (0);
}
@@ -2169,7 +1877,7 @@ dpaa2_ni_set_buf_layout(device_t dev)
* We need to ensure that the buffer size seen by WRIOP is a multiple
* of 64 or 256 bytes depending on the WRIOP version.
*/
- sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
+ sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
if (bootverbose) {
device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
@@ -2389,23 +2097,22 @@ dpaa2_ni_set_qos_table(device_t dev)
* Allocate a buffer visible to the device to hold the QoS table key
* configuration.
*/
- KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
- __func__));
- if (__predict_true(buf->store.dmat == NULL)) {
- buf->store.dmat = sc->qos_dmat;
+
+ if (__predict_true(buf->dmat == NULL)) {
+ buf->dmat = sc->qos_dmat;
}
- error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
- BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
+ error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error) {
device_printf(dev, "%s: failed to allocate a buffer for QoS key "
"configuration\n", __func__);
goto err_exit;
}
- error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
- buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
- &buf->store.paddr, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
+ ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
+ BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "%s: failed to map QoS key configuration "
"buffer into bus space\n", __func__);
@@ -2430,7 +2137,7 @@ dpaa2_ni_set_qos_table(device_t dev)
tbl.default_tc = 0;
tbl.discard_on_miss = false;
tbl.keep_entries = false;
- tbl.kcfg_busaddr = buf->store.paddr;
+ tbl.kcfg_busaddr = buf->paddr;
error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
if (error) {
device_printf(dev, "%s: failed to set QoS table\n", __func__);
@@ -2761,22 +2468,22 @@ static int
dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
- struct dpaa2_ni_channel *chan;
- struct dpaa2_ni_tx_ring *tx;
+ struct dpaa2_channel *ch;
uint32_t fqid;
bool found = false;
- int chan_n = 0;
+ int chidx = 0, error;
- if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
+ if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
return (0);
+ }
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
fqid = m->m_pkthdr.flowid;
for (int i = 0; i < sc->chan_n; i++) {
- chan = sc->channels[i];
- for (int j = 0; j < chan->rxq_n; j++) {
- if (fqid == chan->rx_queues[j].fqid) {
- chan_n = chan->flowid;
+ ch = sc->channels[i];
+ for (int j = 0; j < ch->rxq_n; j++) {
+ if (fqid == ch->rx_queues[j].fqid) {
+ chidx = ch->flowid;
found = true;
break;
}
@@ -2786,13 +2493,16 @@ dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
}
}
}
- tx = DPAA2_TX_RING(sc, chan_n, 0);
- TX_LOCK(tx);
- dpaa2_ni_tx_locked(sc, tx, m);
- TX_UNLOCK(tx);
+ ch = sc->channels[chidx];
+ error = buf_ring_enqueue(ch->xmit_br, m);
+ if (__predict_false(error != 0)) {
+ m_freem(m);
+ } else {
+ taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
+ }
- return (0);
+ return (error);
}
static void
@@ -3092,175 +2802,172 @@ err_exit:
}
/**
- * @brief Callback to obtain a physical address of the only DMA segment mapped.
+ * @brief Execute channel's Rx/Tx routines.
+ *
+ * NOTE: Should not be re-entrant for the same channel. It is achieved by
+ * enqueuing the cleanup routine on a single-threaded taskqueue.
*/
static void
-dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+dpaa2_ni_cleanup_task(void *arg, int count)
{
- if (error == 0) {
- KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
- *(bus_addr_t *) arg = segs[0].ds_addr;
- }
-}
+ struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
+ struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
+ int error, rxc, txc;
-/**
- * @brief Release new buffers to the buffer pool if necessary.
- */
-static void
-dpaa2_ni_bp_task(void *arg, int count)
-{
- device_t bp_dev;
- struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
- struct dpaa2_bp_softc *bpsc;
- struct dpaa2_bp_conf bp_conf;
- const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
- int error;
+ for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
+ rxc = dpaa2_ni_rx_cleanup(ch);
+ txc = dpaa2_ni_tx_cleanup(ch);
- /* There's only one buffer pool for now. */
- bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
- bpsc = device_get_softc(bp_dev);
+ if (__predict_false((if_getdrvflags(sc->ifp) &
+ IFF_DRV_RUNNING) == 0)) {
+ return;
+ }
- /* Get state of the buffer pool. */
- error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
- &bp_conf);
- if (error) {
- device_printf(sc->dev, "%s: failed to query buffer pool "
- "configuration: error=%d\n", __func__, error);
- return;
+ if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
+ break;
+ }
}
- /* Double allocated buffers number if free buffers < 25%. */
- if (bp_conf.free_bufn < (buf_num >> 2)) {
- (void)dpaa2_ni_seed_buf_pool(sc, buf_num);
- DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
+ /* Re-arm channel to generate CDAN */
+ error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
+ if (error != 0) {
+ panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
+ __func__, ch->id, error);
}
}
/**
* @brief Poll frames from a specific channel when CDAN is received.
- *
- * NOTE: To be called from the DPIO interrupt handler.
*/
-static void
-dpaa2_ni_poll(void *arg)
+static int
+dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
{
- struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
- struct dpaa2_io_softc *iosc;
- struct dpaa2_swp *swp;
+ struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
+ struct dpaa2_swp *swp = iosc->swp;
struct dpaa2_ni_fq *fq;
+ struct dpaa2_buf *buf = &ch->store;
+ int budget = DPAA2_RX_BUDGET;
int error, consumed = 0;
- KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
-
- iosc = device_get_softc(chan->io_dev);
- swp = iosc->swp;
-
do {
- error = dpaa2_swp_pull(swp, chan->id, &chan->store,
- ETH_STORE_FRAMES);
+ error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
if (error) {
- device_printf(chan->ni_dev, "%s: failed to pull frames: "
- "chan_id=%d, error=%d\n", __func__, chan->id, error);
+ device_printf(ch->ni_dev, "%s: failed to pull frames: "
+ "chan_id=%d, error=%d\n", __func__, ch->id, error);
break;
}
-
- /*
- * TODO: Combine frames from the same Rx queue returned as
- * a result to the current VDQ command into a chain (linked
- * with m_nextpkt) to ammortize the FQ lock.
- */
- error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
- if (error == ENOENT) {
+ error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
+ if (error == ENOENT || error == EALREADY) {
break;
}
if (error == ETIMEDOUT) {
- device_printf(chan->ni_dev, "%s: timeout to consume "
- "frames: chan_id=%d\n", __func__, chan->id);
+ device_printf(ch->ni_dev, "%s: timeout to consume "
+ "frames: chan_id=%d\n", __func__, ch->id);
}
- } while (true);
+ } while (--budget);
- /* Re-arm channel to generate CDAN. */
- error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
- if (error) {
- device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
- "error=%d\n", __func__, chan->id, error);
- }
+ return (DPAA2_RX_BUDGET - budget);
+}
+
+static int
+dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
+ struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
+ struct mbuf *m = NULL;
+ int budget = DPAA2_TX_BUDGET;
+
+ do {
+ mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
+ mtx_lock(&ch->xmit_mtx);
+ m = buf_ring_dequeue_sc(ch->xmit_br);
+ mtx_unlock(&ch->xmit_mtx);
+
+ if (__predict_false(m == NULL)) {
+ /* TODO: Do not give up easily */
+ break;
+ } else {
+ dpaa2_ni_tx(sc, ch, tx, m);
+ }
+ } while (--budget);
+
+ return (DPAA2_TX_BUDGET - budget);
}
-/**
- * @brief Transmit mbufs.
- */
static void
-dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
- struct mbuf *m)
+dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
+ struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
{
+ device_t dev = sc->dev;
struct dpaa2_ni_fq *fq = tx->fq;
- struct dpaa2_buf *buf;
+ struct dpaa2_buf *buf, *sgt;
struct dpaa2_fd fd;
- struct mbuf *m_d;
- bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
- uint64_t idx;
- void *pidx;
- int error, rc, txnsegs;
-
- /* Obtain an index of a Tx buffer. */
- pidx = buf_ring_dequeue_sc(tx->idx_br);
- if (__predict_false(pidx == NULL)) {
- /* TODO: Do not give up easily. */
+ struct mbuf *md;
+ bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
+ int rc, nsegs;
+ int error;
+
+ mtx_assert(&tx->lock, MA_NOTOWNED);
+ mtx_lock(&tx->lock);
+ buf = buf_ring_dequeue_sc(tx->br);
+ mtx_unlock(&tx->lock);
+ if (__predict_false(buf == NULL)) {
+ /* TODO: Do not give up easily */
m_freem(m);
return;
} else {
- idx = (uint64_t) pidx;
- buf = &tx->buf[idx];
- buf->tx.m = m;
- buf->tx.sgt_paddr = 0;
+ DPAA2_BUF_ASSERT_TXREADY(buf);
+ buf->m = m;
+ sgt = buf->sgt;
}
- /* Load mbuf to transmit. */
- error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
- txsegs, &txnsegs, BUS_DMA_NOWAIT);
+#if defined(INVARIANTS)
+ struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
+ KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
+ KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
+#endif /* INVARIANTS */
+
+ error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
+ BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
/* Too many fragments, trying to defragment... */
- m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
- if (m_d == NULL) {
- device_printf(sc->dev, "%s: mbuf "
- "defragmentation failed\n", __func__);
+ md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
+ if (md == NULL) {
+ device_printf(dev, "%s: m_collapse() failed\n", __func__);
fq->chan->tx_dropped++;
goto err;
}
- buf->tx.m = m = m_d;
- error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
- buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
+ buf->m = m = md;
+ error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
+ &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
- device_printf(sc->dev, "%s: failed to load "
- "mbuf: error=%d\n", __func__, error);
+ device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
+ "failed: error=%d\n", __func__, error);
fq->chan->tx_dropped++;
goto err;
}
}
- /* Build frame descriptor. */
- error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
+ error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
if (__predict_false(error != 0)) {
- device_printf(sc->dev, "%s: failed to build frame "
- "descriptor: error=%d\n", __func__, error);
+ device_printf(dev, "%s: failed to build frame descriptor: "
+ "error=%d\n", __func__, error);
fq->chan->tx_dropped++;
goto err_unload;
}
- /* TODO: Enqueue several frames in a single command. */
+ /* TODO: Enqueue several frames in a single command */
for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
- /* TODO: Return error codes instead of # of frames. */
- rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
- &fd, 1);
+ /* TODO: Return error codes instead of # of frames */
+ rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
if (rc == 1) {
break;
}
}
- bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
if (rc != 1) {
fq->chan->tx_dropped++;
@@ -3271,58 +2978,98 @@ dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
return;
err_unload:
- bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
- if (buf->tx.sgt_paddr != 0) {
- bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
+ bus_dmamap_unload(buf->dmat, buf->dmap);
+ if (sgt->paddr != 0) {
+ bus_dmamap_unload(sgt->dmat, sgt->dmap);
}
err:
- m_freem(buf->tx.m);
- buf_ring_enqueue(tx->idx_br, pidx);
+ m_freem(buf->m);
+ buf_ring_enqueue(tx->br, buf);
}
static int
-dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
+dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
uint32_t *consumed)
{
struct dpaa2_ni_fq *fq = NULL;
struct dpaa2_dq *dq;
struct dpaa2_fd *fd;
+ struct dpaa2_ni_rx_ctx ctx = {
+ .head = NULL,
+ .tail = NULL,
+ .cnt = 0,
+ .last = false
+ };
int rc, frames = 0;
do {
- rc = dpaa2_ni_chan_storage_next(chan, &dq);
+ rc = dpaa2_chan_next_frame(chan, &dq);
if (rc == EINPROGRESS) {
if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
fd = &dq->fdr.fd;
fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
- fq->consume(chan, fq, fd);
+
+ switch (fq->type) {
+ case DPAA2_NI_QUEUE_RX:
+ (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
+ break;
+ case DPAA2_NI_QUEUE_RX_ERR:
+ (void)dpaa2_ni_rx_err(chan, fq, fd);
+ break;
+ case DPAA2_NI_QUEUE_TX_CONF:
+ (void)dpaa2_ni_tx_conf(chan, fq, fd);
+ break;
+ default:
+ panic("%s: unknown queue type (1)",
+ __func__);
+ }
frames++;
}
} else if (rc == EALREADY || rc == ENOENT) {
if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
fd = &dq->fdr.fd;
fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
- fq->consume(chan, fq, fd);
+
+ switch (fq->type) {
+ case DPAA2_NI_QUEUE_RX:
+ /*
+ * Last VDQ response (mbuf) in a chain
+ * obtained from the Rx queue.
+ */
+ ctx.last = true;
+ (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
+ break;
+ case DPAA2_NI_QUEUE_RX_ERR:
+ (void)dpaa2_ni_rx_err(chan, fq, fd);
+ break;
+ case DPAA2_NI_QUEUE_TX_CONF:
+ (void)dpaa2_ni_tx_conf(chan, fq, fd);
+ break;
+ default:
+ panic("%s: unknown queue type (2)",
+ __func__);
+ }
frames++;
}
break;
} else {
- KASSERT(1 == 0, ("%s: should not reach here", __func__));
+ panic("%s: should not reach here: rc=%d", __func__, rc);
}
} while (true);
- KASSERT(chan->store_idx < chan->store_sz,
- ("channel store idx >= size: store_idx=%d, store_sz=%d",
- chan->store_idx, chan->store_sz));
+ KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
+ "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
/*
- * A dequeue operation pulls frames from a single queue into the store.
+ * VDQ operation pulls frames from a single queue into the store.
* Return the frame queue and a number of consumed frames as an output.
*/
- if (src != NULL)
+ if (src != NULL) {
*src = fq;
- if (consumed != NULL)
+ }
+ if (consumed != NULL) {
*consumed = frames;
+ }
return (rc);
}
@@ -3331,32 +3078,33 @@ dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
* @brief Receive frames.
*/
static int
-dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
- struct dpaa2_fd *fd)
+dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
+ struct dpaa2_ni_rx_ctx *ctx)
{
- struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
+ bus_addr_t paddr = (bus_addr_t)fd->addr;
+ struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
+ struct dpaa2_buf *buf = fa->buf;
+ struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
+ struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
struct dpaa2_bp_softc *bpsc;
- struct dpaa2_buf *buf;
- struct dpaa2_fa *fa;
- if_t ifp = sc->ifp;
struct mbuf *m;
- device_t bp_dev;
- bus_addr_t paddr = (bus_addr_t) fd->addr;
+ device_t bpdev;
bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
void *buf_data;
int buf_len, error, released_n = 0;
- fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
- buf = fa->buf;
-
KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
- KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
- if (__predict_false(paddr != buf->rx.paddr)) {
+ /*
+ * NOTE: Current channel might not be the same as the "buffer" channel
+ * and it's fine. It must not be NULL though.
+ */
+ KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
+
+ if (__predict_false(paddr != buf->paddr)) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
- __func__, paddr, buf->rx.paddr);
+ __func__, paddr, buf->paddr);
}
- /* Update statistics. */
switch (dpaa2_ni_fd_err(fd)) {
case 1: /* Enqueue rejected by QMan */
sc->rx_enq_rej_frames++;
@@ -3378,70 +3126,88 @@ dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
break;
}
- m = buf->rx.m;
- buf->rx.m = NULL;
- bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
+ mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
+ mtx_lock(&bch->dma_mtx);
+ bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(buf->dmat, buf->dmap);
+ m = buf->m;
buf_len = dpaa2_ni_fd_data_len(fd);
- buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
+ buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
+ /* Prepare buffer to be re-cycled */
+ buf->m = NULL;
+ buf->paddr = 0;
+ buf->vaddr = NULL;
+ buf->seg.ds_addr = 0;
+ buf->seg.ds_len = 0;
+ buf->nseg = 0;
- /* Prefetch mbuf data. */
- __builtin_prefetch(buf_data);
+ mtx_unlock(&bch->dma_mtx);
- /* Write value to mbuf (avoid reading). */
m->m_flags |= M_PKTHDR;
m->m_data = buf_data;
m->m_len = buf_len;
m->m_pkthdr.len = buf_len;
- m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.rcvif = sc->ifp;
m->m_pkthdr.flowid = fq->fqid;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
- if_input(ifp, m);
+ if (ctx->head == NULL) {
+ KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
+ ctx->head = m;
+ ctx->tail = m;
+ } else {
+ KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
+ ctx->tail->m_nextpkt = m;
+ ctx->tail = m;
+ }
+ ctx->cnt++;
+
+ if (ctx->last) {
+ ctx->tail->m_nextpkt = NULL;
+ if_input(sc->ifp, ctx->head);
+ }
+
+ /* Keep the buffer to be recycled */
+ ch->recycled[ch->recycled_n++] = buf;
- /* Keep the buffer to be recycled. */
- chan->recycled[chan->recycled_n++] = buf;
- KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
- ("%s: too many buffers to recycle", __func__));
+ /* Re-seed and release recycled buffers back to the pool */
+ if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
+ /* Release new buffers to the pool if needed */
+ taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
- /* Re-seed and release recycled buffers back to the pool. */
- if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
- /* Release new buffers to the pool if needed. */
- taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
+ for (int i = 0; i < ch->recycled_n; i++) {
+ buf = ch->recycled[i];
+ bch = (struct dpaa2_channel *)buf->opt;
- for (int i = 0; i < chan->recycled_n; i++) {
- buf = chan->recycled[i];
+ mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
+ mtx_lock(&bch->dma_mtx);
+ error = dpaa2_buf_seed_rxb(sc->dev, buf,
+ DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
+ mtx_unlock(&bch->dma_mtx);
- /* Seed recycled buffer. */
- error = dpaa2_ni_seed_rxbuf(sc, buf);
- KASSERT(error == 0, ("%s: failed to seed recycled "
- "buffer: error=%d", __func__, error));
if (__predict_false(error != 0)) {
- device_printf(sc->dev, "%s: failed to seed "
- "recycled buffer: error=%d\n", __func__,
- error);
- continue;
+ /* TODO: What else to do with the buffer? */
+ panic("%s: failed to recycle buffer: error=%d",
+ __func__, error);
}
- /* Prepare buffer to be released in a single command. */
- released[released_n++] = buf->rx.paddr;
+ /* Prepare buffer to be released in a single command */
+ released[released_n++] = buf->paddr;
}
- /* There's only one buffer pool for now. */
- bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
- bpsc = device_get_softc(bp_dev);
+ /* There's only one buffer pool for now */
+ bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
+ bpsc = device_get_softc(bpdev);
- error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
+ error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
released, released_n);
if (__predict_false(error != 0)) {
device_printf(sc->dev, "%s: failed to release buffers "
"to the pool: error=%d\n", __func__, error);
return (error);
}
-
- /* Be ready to recycle the next portion of the buffers. */
- chan->recycled_n = 0;
+ ch->recycled_n = 0;
}
return (0);
@@ -3451,33 +3217,36 @@ dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
* @brief Receive Rx error frames.
*/
static int
-dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
+dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
- device_t bp_dev;
- struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
+ bus_addr_t paddr = (bus_addr_t)fd->addr;
+ struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
+ struct dpaa2_buf *buf = fa->buf;
+ struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
+ struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
+ device_t bpdev;
struct dpaa2_bp_softc *bpsc;
- struct dpaa2_buf *buf;
- struct dpaa2_fa *fa;
- bus_addr_t paddr = (bus_addr_t) fd->addr;
int error;
- fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
- buf = fa->buf;
-
KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
- KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
- if (__predict_false(paddr != buf->rx.paddr)) {
+ /*
+ * NOTE: Current channel might not be the same as the "buffer" channel
+ * and it's fine. It must not be NULL though.
+ */
+ KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
+
+ if (__predict_false(paddr != buf->paddr)) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
- __func__, paddr, buf->rx.paddr);
+ __func__, paddr, buf->paddr);
}
- /* There's only one buffer pool for now. */
- bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
- bpsc = device_get_softc(bp_dev);
+ /* There's only one buffer pool for now */
+ bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
+ bpsc = device_get_softc(bpdev);
- /* Release buffer to QBMan buffer pool. */
- error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
+ /* Release buffer to QBMan buffer pool */
+ error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
if (error != 0) {
device_printf(sc->dev, "%s: failed to release frame buffer to "
"the pool: error=%d\n", __func__, error);
@@ -3491,33 +3260,47 @@ dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
* @brief Receive Tx confirmation frames.
*/
static int
-dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
+dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
- struct dpaa2_ni_tx_ring *tx;
- struct dpaa2_buf *buf;
- struct dpaa2_fa *fa;
- bus_addr_t paddr = (bus_addr_t) fd->addr;
-
- fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
- buf = fa->buf;
- tx = fa->tx;
+ bus_addr_t paddr = (bus_addr_t)fd->addr;
+ struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
+ struct dpaa2_buf *buf = fa->buf;
+ struct dpaa2_buf *sgt = buf->sgt;
+ struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
+ struct dpaa2_channel *bch = tx->fq->chan;
KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
- KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
- if (paddr != buf->tx.paddr) {
+ KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
+ KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
+ /*
+ * NOTE: Current channel might not be the same as the "buffer" channel
+ * and it's fine. It must not be NULL though.
+ */
+ KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
+
+ if (paddr != buf->paddr) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
- __func__, paddr, buf->tx.paddr);
+ __func__, paddr, buf->paddr);
}
- bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_POSTWRITE);
- bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
- bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
- m_freem(buf->tx.m);
+ mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
+ mtx_lock(&bch->dma_mtx);
- /* Return Tx buffer index back to the ring. */
- buf_ring_enqueue(tx->idx_br, (void *) buf->tx.idx);
+ bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(buf->dmat, buf->dmap);
+ bus_dmamap_unload(sgt->dmat, sgt->dmap);
+ m_freem(buf->m);
+ buf->m = NULL;
+ buf->paddr = 0;
+ buf->vaddr = NULL;
+ sgt->paddr = 0;
+
+ mtx_unlock(&bch->dma_mtx);
+
+ /* Return Tx buffer back to the ring */
+ buf_ring_enqueue(tx->br, buf);
return (0);
}
@@ -3529,240 +3312,10 @@ static int
dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
uint16_t minor)
{
- if (sc->api_major == major)
+ if (sc->api_major == major) {
return sc->api_minor - minor;
- return sc->api_major - major;
-}
-
-/**
- * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
- */
-static int
-dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
-{
- device_t bp_dev;
- struct dpaa2_bp_softc *bpsc;
- struct dpaa2_buf *buf;
- bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
- const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
- int i, error, bufn = 0;
-
- KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
- "created?", __func__));
-
- /* There's only one buffer pool for now. */
- bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
- bpsc = device_get_softc(bp_dev);
-
- /* Limit # of buffers released to the pool. */
- if (allocated + seedn > DPAA2_NI_BUFS_MAX)
- seedn = DPAA2_NI_BUFS_MAX - allocated;
-
- /* Release "seedn" buffers to the pool. */
- for (i = allocated; i < (allocated + seedn); i++) {
- /* Enough buffers were allocated for a single command. */
- if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
- error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
- bpsc->attr.bpid, paddr, bufn);
- if (error) {
- device_printf(sc->dev, "%s: failed to release "
- "buffers to the pool (1)\n", __func__);
- return (error);
- }
- DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
- bufn = 0;
- }
-
- buf = &sc->buf[i];
- buf->type = DPAA2_BUF_RX;
- buf->rx.m = NULL;
- buf->rx.dmap = NULL;
- buf->rx.paddr = 0;
- buf->rx.vaddr = NULL;
- error = dpaa2_ni_seed_rxbuf(sc, buf);
- if (error != 0) {
- break;
- }
- paddr[bufn] = buf->rx.paddr;
- bufn++;
- }
-
- /* Release if there are buffers left. */
- if (bufn > 0) {
- error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
- bpsc->attr.bpid, paddr, bufn);
- if (error) {
- device_printf(sc->dev, "%s: failed to release "
- "buffers to the pool (2)\n", __func__);
- return (error);
- }
- DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
- }
-
- return (0);
-}
-
-/**
- * @brief Prepare Rx buffer to be released to the buffer pool.
- */
-static int
-dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
-{
- struct mbuf *m;
- struct dpaa2_fa *fa;
- bus_dmamap_t dmap;
- bus_dma_segment_t segs;
- int error, nsegs;
-
- KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
- "allocated?", __func__));
- KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
-
- /* Keep DMA tag for this buffer. */
- if (__predict_false(buf->rx.dmat == NULL))
- buf->rx.dmat = sc->bp_dmat;
-
- /* Create a DMA map for the giving buffer if it doesn't exist yet. */
- if (__predict_false(buf->rx.dmap == NULL)) {
- error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
- if (error) {
- device_printf(sc->dev, "%s: failed to create DMA map "
- "for buffer: error=%d\n", __func__, error);
- return (error);
- }
- buf->rx.dmap = dmap;
- }
-
- /* Allocate mbuf if needed. */
- if (__predict_false(buf->rx.m == NULL)) {
- m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
- if (__predict_false(m == NULL)) {
- device_printf(sc->dev, "%s: failed to allocate mbuf for "
- "buffer\n", __func__);
- return (ENOMEM);
- }
- m->m_len = m->m_ext.ext_size;
- m->m_pkthdr.len = m->m_ext.ext_size;
- buf->rx.m = m;
- } else
- m = buf->rx.m;
-
- error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
- m, &segs, &nsegs, BUS_DMA_NOWAIT);
- KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
- KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
- if (__predict_false(error != 0 || nsegs != 1)) {
- device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
- "nsegs=%d\n", __func__, error, nsegs);
- bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
- m_freem(m);
- return (error);
- }
- buf->rx.paddr = segs.ds_addr;
- buf->rx.vaddr = m->m_data;
-
- /* Populate frame annotation for future use. */
- fa = (struct dpaa2_fa *) m->m_data;
- fa->magic = DPAA2_MAGIC;
- fa->buf = buf;
-
- bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_PREREAD);
-
- return (0);
-}
-
-/**
- * @brief Prepare Tx buffer to be added to the Tx ring.
- */
-static int
-dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
-{
- bus_dmamap_t dmap;
- int error;
-
- KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
- __func__));
- KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
- __func__));
- KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
-
- /* Keep DMA tags for this buffer. */
- if (__predict_true(buf->tx.dmat == NULL))
- buf->tx.dmat = sc->tx_dmat;
- if (__predict_true(buf->tx.sgt_dmat == NULL))
- buf->tx.sgt_dmat = sc->sgt_dmat;
-
- /* Create a DMA map for the giving buffer if it doesn't exist yet. */
- if (__predict_true(buf->tx.dmap == NULL)) {
- error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
- if (error != 0) {
- device_printf(sc->dev, "%s: failed to create "
- "Tx DMA map: error=%d\n", __func__, error);
- return (error);
- }
- buf->tx.dmap = dmap;
- }
-
- /* Allocate a buffer to store scatter/gather table. */
- if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
- error = bus_dmamem_alloc(buf->tx.sgt_dmat,
- &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
- &buf->tx.sgt_dmap);
- if (error != 0) {
- device_printf(sc->dev, "%s: failed to allocate "
- "S/G table: error=%d\n", __func__, error);
- return (error);
- }
- }
-
- return (0);
-}
-
-/**
- * @brief Allocate channel storage visible to QBMan.
- */
-static int
-dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
- struct dpaa2_ni_channel *chan)
-{
- struct dpaa2_buf *buf = &chan->store;
- int error;
-
- KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
- "allocated?", __func__));
- KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
- __func__));
-
- /* Keep DMA tag for this buffer. */
- if (__predict_false(buf->store.dmat == NULL)) {
- buf->store.dmat = sc->st_dmat;
- }
-
- if (__predict_false(buf->store.vaddr == NULL)) {
- error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
- BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
- if (error) {
- device_printf(sc->dev, "%s: failed to allocate channel "
- "storage\n", __func__);
- return (error);
- }
- }
-
- if (__predict_false(buf->store.paddr == 0)) {
- error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
- buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
- &buf->store.paddr, BUS_DMA_NOWAIT);
- if (error) {
- device_printf(sc->dev, "%s: failed to map channel "
- "storage\n", __func__);
- return (error);
- }
}
-
- chan->store_sz = ETH_STORE_FRAMES;
- chan->store_idx = 0;
-
- return (0);
+ return sc->api_major - major;
}
/**
@@ -3770,60 +3323,55 @@ dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
*/
static int
dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
- struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
- struct dpaa2_fd *fd)
+ struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
{
- struct dpaa2_sg_entry *sgt;
+ struct dpaa2_buf *sgt = buf->sgt;
+ struct dpaa2_sg_entry *sge;
struct dpaa2_fa *fa;
int i, error;
- KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
- "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
- KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
- KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
- __func__));
+ KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
+ KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
+ KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
+ KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
- /* Reset frame descriptor fields. */
memset(fd, 0, sizeof(*fd));
- if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
- /* Populate S/G table. */
- sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
- sc->tx_data_off;
- for (i = 0; i < txnsegs; i++) {
- sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
- sgt[i].len = (uint32_t) txsegs[i].ds_len;
- sgt[i].offset_fmt = 0u;
+ /* Populate and map S/G table */
+ if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
+ sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
+ for (i = 0; i < nsegs; i++) {
+ sge[i].addr = (uint64_t)segs[i].ds_addr;
+ sge[i].len = (uint32_t)segs[i].ds_len;
+ sge[i].offset_fmt = 0u;
}
- sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
+ sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
- KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
- __func__, buf->tx.sgt_paddr));
+ KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
+ sgt->paddr));
- /* Load S/G table. */
- error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
- buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
- &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
+ DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
+ BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
- device_printf(sc->dev, "%s: failed to map S/G table: "
+ device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
"error=%d\n", __func__, error);
return (error);
}
- buf->tx.paddr = buf->tx.sgt_paddr;
- buf->tx.vaddr = buf->tx.sgt_vaddr;
+ buf->paddr = sgt->paddr;
+ buf->vaddr = sgt->vaddr;
sc->tx_sg_frames++; /* for sysctl(9) */
} else {
return (EINVAL);
}
- fa = (struct dpaa2_fa *) buf->tx.sgt_vaddr;
+ fa = (struct dpaa2_fa *)sgt->vaddr;
fa->magic = DPAA2_MAGIC;
fa->buf = buf;
- fa->tx = tx;
- fd->addr = buf->tx.paddr;
- fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
+ fd->addr = buf->paddr;
+ fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
fd->bpid_ivp_bmt = 0;
fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
fd->ctrl = 0x00800000u;
@@ -3840,9 +3388,9 @@ dpaa2_ni_fd_err(struct dpaa2_fd *fd)
static uint32_t
dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
{
- if (dpaa2_ni_fd_short_len(fd))
+ if (dpaa2_ni_fd_short_len(fd)) {
return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
-
+ }
return (fd->data_length);
}
@@ -3969,10 +3517,8 @@ dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags
uint16_t rc_token, ni_token;
int i, error = 0;
- KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
- __func__));
- if (__predict_true(buf->store.dmat == NULL)) {
- buf->store.dmat = sc->rxd_dmat;
+ if (__predict_true(buf->dmat == NULL)) {
+ buf->dmat = sc->rxd_dmat;
}
memset(&cls_cfg, 0, sizeof(cls_cfg));
@@ -3998,15 +3544,15 @@ dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags
cls_cfg.num_extracts++;
}
- error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
- BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
+ error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error != 0) {
device_printf(dev, "%s: failed to allocate a buffer for Rx "
"traffic distribution key configuration\n", __func__);
return (error);
}
- error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
+ error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
if (error != 0) {
device_printf(dev, "%s: failed to prepare key configuration: "
"error=%d\n", __func__, error);
@@ -4014,9 +3560,9 @@ dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags
}
/* Prepare for setting the Rx dist. */
- error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
- buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
- &buf->store.paddr, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
+ DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
+ BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->dev, "%s: failed to map a buffer for Rx "
"traffic distribution key configuration\n", __func__);
@@ -4044,8 +3590,7 @@ dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags
}
error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
- sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH,
- buf->store.paddr);
+ sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
if (error != 0) {
device_printf(dev, "%s: failed to set distribution mode "
"and size for the traffic class\n", __func__);
@@ -4122,36 +3667,6 @@ dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
return (0);
}
-/**
- * @brief Obtain the next dequeue response from the channel storage.
- */
-static int
-dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
-{
- struct dpaa2_buf *buf = &chan->store;
- struct dpaa2_dq *msgs = buf->store.vaddr;
- struct dpaa2_dq *msg = &msgs[chan->store_idx];
- int rc = EINPROGRESS;
-
- chan->store_idx++;
-
- if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
- rc = EALREADY; /* VDQ command is expired */
- chan->store_idx = 0;
- if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
- msg = NULL; /* Null response, FD is invalid */
- }
- if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
- rc = ENOENT; /* FQ is empty */
- chan->store_idx = 0;
- }
-
- if (dq != NULL)
- *dq = msg;
-
- return (rc);
-}
-
static device_method_t dpaa2_ni_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, dpaa2_ni_probe),
diff --git a/sys/dev/dpaa2/dpaa2_ni.h b/sys/dev/dpaa2/dpaa2_ni.h
index 523775a9365c..6fb0673fac09 100644
--- a/sys/dev/dpaa2/dpaa2_ni.h
+++ b/sys/dev/dpaa2/dpaa2_ni.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2021-2023 Dmitry Salychev
* Copyright © 2022 Mathew McBride
*
* Redistribution and use in source and binary forms, with or without
@@ -38,6 +38,7 @@
#include <sys/socket.h>
#include <sys/buf_ring.h>
#include <sys/proc.h>
+#include <sys/lock.h>
#include <sys/mutex.h>
#include <net/if.h>
@@ -50,6 +51,7 @@
#include "dpaa2_io.h"
#include "dpaa2_mac.h"
#include "dpaa2_ni_dpkg.h"
+#include "dpaa2_channel.h"
/* Name of the DPAA2 network interface. */
#define DPAA2_NI_IFNAME "dpni"
@@ -58,17 +60,13 @@
#define DPAA2_NI_MAX_RESOURCES 34
#define DPAA2_NI_MSI_COUNT 1 /* MSIs per DPNI */
-#define DPAA2_NI_MAX_CHANNELS 16 /* to distribute ingress traffic to cores */
-#define DPAA2_NI_MAX_TCS 8 /* traffic classes per DPNI */
#define DPAA2_NI_MAX_POOLS 8 /* buffer pools per DPNI */
-/* Maximum number of Rx buffers. */
-#define DPAA2_NI_BUFS_INIT (50u * DPAA2_SWP_BUFS_PER_CMD)
-#define DPAA2_NI_BUFS_MAX (1 << 15) /* 15 bits for buffer index max. */
+#define DPAA2_NI_BUFS_INIT (5u * DPAA2_SWP_BUFS_PER_CMD)
/* Maximum number of buffers allocated per Tx ring. */
#define DPAA2_NI_BUFS_PER_TX (1 << 7)
-#define DPAA2_NI_MAX_BPTX (1 << 8) /* 8 bits for buffer index max. */
+#define DPAA2_NI_MAX_BPTX (1 << 8)
/* Number of the DPNI statistics counters. */
#define DPAA2_NI_STAT_COUNTERS 7u
@@ -133,12 +131,30 @@
*/
#define DPAA2_NI_ENQUEUE_RETRIES 10
-enum dpaa2_ni_queue_type {
- DPAA2_NI_QUEUE_RX = 0,
- DPAA2_NI_QUEUE_TX,
- DPAA2_NI_QUEUE_TX_CONF,
- DPAA2_NI_QUEUE_RX_ERR
-};
+/* Channel storage buffer configuration */
+#define DPAA2_ETH_STORE_FRAMES 16u
+#define DPAA2_ETH_STORE_SIZE \
+ ((DPAA2_ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
+
+/*
+ * NOTE: Don't forget to update dpaa2_ni_spec in case of any changes in macros!
+ */
+/* DPMCP resources */
+#define DPAA2_NI_MCP_RES_NUM (1u)
+#define DPAA2_NI_MCP_RID_OFF (0u)
+#define DPAA2_NI_MCP_RID(rid) ((rid) + DPAA2_NI_MCP_RID_OFF)
+/* DPIO resources (software portals) */
+#define DPAA2_NI_IO_RES_NUM (16u)
+#define DPAA2_NI_IO_RID_OFF (DPAA2_NI_MCP_RID_OFF + DPAA2_NI_MCP_RES_NUM)
+#define DPAA2_NI_IO_RID(rid) ((rid) + DPAA2_NI_IO_RID_OFF)
+/* DPBP resources (buffer pools) */
+#define DPAA2_NI_BP_RES_NUM (1u)
+#define DPAA2_NI_BP_RID_OFF (DPAA2_NI_IO_RID_OFF + DPAA2_NI_IO_RES_NUM)
+#define DPAA2_NI_BP_RID(rid) ((rid) + DPAA2_NI_BP_RID_OFF)
+/* DPCON resources (channels) */
+#define DPAA2_NI_CON_RES_NUM (16u)
+#define DPAA2_NI_CON_RID_OFF (DPAA2_NI_BP_RID_OFF + DPAA2_NI_BP_RES_NUM)
+#define DPAA2_NI_CON_RID(rid) ((rid) + DPAA2_NI_CON_RID_OFF)
enum dpaa2_ni_dest_type {
DPAA2_NI_DEST_NONE = 0,
@@ -172,7 +188,7 @@ enum dpaa2_ni_err_action {
DPAA2_NI_ERR_SEND_TO_ERROR_QUEUE
};
-struct dpaa2_ni_channel;
+struct dpaa2_channel;
struct dpaa2_ni_fq;
/**
@@ -204,95 +220,6 @@ struct dpaa2_ni_attr {
};
/**
- * @brief Tx ring.
- *
- * fq: Parent (TxConf) frame queue.
- * fqid: ID of the logical Tx queue.
- * mbuf_br: Ring buffer for mbufs to transmit.
- * mbuf_lock: Lock for the ring buffer.
- */
-struct dpaa2_ni_tx_ring {
- struct dpaa2_ni_fq *fq;
- uint32_t fqid;
- uint32_t txid; /* Tx ring index */
-
- /* Ring buffer for indexes in "buf" array. */
- struct buf_ring *idx_br;
- struct mtx lock;
-
- /* Buffers to DMA load/unload Tx mbufs. */
- struct dpaa2_buf buf[DPAA2_NI_BUFS_PER_TX];
-};
-
-/**
- * @brief A Frame Queue is the basic queuing structure used by the QMan.
- *
- * It comprises a list of frame descriptors (FDs), so it can be thought of
- * as a queue of frames.
- *
- * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued
- * onto a work queue (WQ).
- *
- * fqid: Frame queue ID, can be used to enqueue/dequeue or execute other
- * commands on the queue through DPIO.
- * txq_n: Number of configured Tx queues.
- * tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid.
- * Note that Tx queues are logical queues and not all management
- * commands are available on these queue types.
- * qdbin: Queue destination bin. Can be used with the DPIO enqueue
- * operation based on QDID, QDBIN and QPRI. Note that all Tx queues
- * with the same flowid have the same destination bin.
- */
-struct dpaa2_ni_fq {
- int (*consume)(struct dpaa2_ni_channel *,
- struct dpaa2_ni_fq *, struct dpaa2_fd *);
-
- struct dpaa2_ni_channel *chan;
- uint32_t fqid;
- uint16_t flowid;
- uint8_t tc;
- enum dpaa2_ni_queue_type type;
-
- /* Optional fields (for TxConf queue). */
- struct dpaa2_ni_tx_ring tx_rings[DPAA2_NI_MAX_TCS];
- uint32_t tx_qdbin;
-} __aligned(CACHE_LINE_SIZE);
-
-/**
- * @brief QBMan channel to process ingress traffic (Rx, Tx conf).
- *
- * NOTE: Several WQs are organized into a single WQ Channel.
- */
-struct dpaa2_ni_channel {
- device_t ni_dev;
- device_t io_dev;
- device_t con_dev;
- uint16_t id;
- uint16_t flowid;
-
- /* For debug purposes only! */
- uint64_t tx_frames;
- uint64_t tx_dropped;
-
- /* Context to configure CDAN. */
- struct dpaa2_io_notif_ctx ctx;
-
- /* Channel storage (to keep responses from VDQ command). */
- struct dpaa2_buf store;
- uint32_t store_sz; /* in frames */
- uint32_t store_idx; /* frame index */
-
- /* Recycled buffers to release back to the pool. */
- uint32_t recycled_n;
- struct dpaa2_buf *recycled[DPAA2_SWP_BUFS_PER_CMD];
-
- /* Frame queues */
- uint32_t rxq_n;
- struct dpaa2_ni_fq rx_queues[DPAA2_NI_MAX_TCS];
- struct dpaa2_ni_fq txc_queue;
-};
-
-/**
* @brief Configuration of the network interface queue.
*
* NOTE: This configuration is used to obtain information of a queue by
@@ -534,7 +461,6 @@ struct dpaa2_ni_softc {
uint16_t buf_align;
uint16_t buf_sz;
- /* For debug purposes only! */
uint64_t rx_anomaly_frames;
uint64_t rx_single_buf_frames;
uint64_t rx_sg_buf_frames;
@@ -543,10 +469,8 @@ struct dpaa2_ni_softc {
uint64_t tx_single_buf_frames;
uint64_t tx_sg_frames;
- /* Attributes of the DPAA2 network interface. */
struct dpaa2_ni_attr attr;
- /* For network interface and miibus. */
struct ifnet *ifp;
uint32_t if_flags;
struct mtx lock;
@@ -556,37 +480,25 @@ struct dpaa2_ni_softc {
struct ifmedia fixed_ifmedia;
int media_status;
- /* DMA resources */
- bus_dma_tag_t bp_dmat; /* for buffer pool */
- bus_dma_tag_t tx_dmat; /* for Tx buffers */
- bus_dma_tag_t st_dmat; /* for channel storage */
bus_dma_tag_t rxd_dmat; /* for Rx distribution key */
bus_dma_tag_t qos_dmat; /* for QoS table key */
- bus_dma_tag_t sgt_dmat; /* for scatter/gather tables */
- struct dpaa2_buf qos_kcfg; /* QoS table key config. */
- struct dpaa2_buf rxd_kcfg; /* Rx distribution key config. */
+ struct dpaa2_buf qos_kcfg; /* QoS table key config */
+ struct dpaa2_buf rxd_kcfg; /* Rx distribution key config */
- /* Channels and RxError frame queue */
uint32_t chan_n;
- struct dpaa2_ni_channel *channels[DPAA2_NI_MAX_CHANNELS];
- struct dpaa2_ni_fq rxe_queue; /* one per network interface */
+ struct dpaa2_channel *channels[DPAA2_MAX_CHANNELS];
+ struct dpaa2_ni_fq rxe_queue; /* one per DPNI */
- /* Rx buffers for buffer pool. */
struct dpaa2_atomic buf_num;
struct dpaa2_atomic buf_free; /* for sysctl(9) only */
- struct dpaa2_buf buf[DPAA2_NI_BUFS_MAX];
- /* Interrupts */
int irq_rid[DPAA2_NI_MSI_COUNT];
struct resource *irq_res;
- void *intr; /* interrupt handle */
+ void *intr;
- /* Tasks */
struct taskqueue *bp_taskq;
- struct task bp_task;
- /* Callouts */
struct callout mii_callout;
struct {
@@ -594,7 +506,7 @@ struct dpaa2_ni_softc {
uint8_t addr[ETHER_ADDR_LEN];
device_t phy_dev;
int phy_loc;
- } mac; /* Info about connected DPMAC (if exists). */
+ } mac; /* Info about connected DPMAC (if exists) */
};
extern struct resource_spec dpaa2_ni_spec[];
diff --git a/sys/dev/dpaa2/dpaa2_swp.c b/sys/dev/dpaa2/dpaa2_swp.c
index 3fa66ca8880f..5800d7fedd82 100644
--- a/sys/dev/dpaa2/dpaa2_swp.c
+++ b/sys/dev/dpaa2/dpaa2_swp.c
@@ -78,7 +78,6 @@
#include <sys/time.h>
#include <sys/types.h>
#include <sys/systm.h>
-#include <sys/condvar.h>
#include <sys/lock.h>
#include <machine/bus.h>
@@ -298,8 +297,9 @@ dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
- /* Initialize the portal with an IRQ threshold and timeout of 0us. */
- dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 0);
+ /* TODO: sysctl(9) for the IRQ timeout? */
+ /* Initialize the portal with an IRQ threshold and timeout of 120us. */
+ dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 120);
*swp = p;
@@ -724,16 +724,13 @@ dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
uint16_t flags;
int i, error;
- KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__));
KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__));
- KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage "
- "buffer", __func__));
cmd.numf = frames_n - 1;
cmd.tok = DPAA2_SWP_VDQCR_TOKEN;
cmd.dq_src = chan_id;
- cmd.rsp_addr = (uint64_t) buf->store.paddr;
+ cmd.rsp_addr = (uint64_t)buf->paddr;
/* Dequeue command type */
cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT);
@@ -763,10 +760,10 @@ dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
}
/* Let's sync before reading VDQ response from QBMan. */
- bus_dmamap_sync(buf->store.dmat, buf->store.dmap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
/* Read VDQ response from QBMan. */
- msg = (struct dpaa2_dq *) buf->store.vaddr;
+ msg = (struct dpaa2_dq *)buf->vaddr;
for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) &&
(msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) {
diff --git a/sys/dev/dpaa2/dpaa2_swp.h b/sys/dev/dpaa2/dpaa2_swp.h
index 986ade601149..1b1383b4241f 100644
--- a/sys/dev/dpaa2/dpaa2_swp.h
+++ b/sys/dev/dpaa2/dpaa2_swp.h
@@ -33,6 +33,7 @@
#include <sys/bus.h>
#include "dpaa2_types.h"
+#include "dpaa2_buf.h"
#include "dpaa2_bp.h"
/*
@@ -318,16 +319,16 @@ CTASSERT(sizeof(struct dpaa2_fd) == DPAA2_FD_SIZE);
struct dpaa2_fa {
uint32_t magic;
struct dpaa2_buf *buf;
+#ifdef __notyet__
union {
struct { /* Tx frame annotation */
struct dpaa2_ni_tx_ring *tx;
};
-#ifdef __notyet__
struct { /* Rx frame annotation */
uint64_t _notused;
};
-#endif
};
+#endif
} __packed;
CTASSERT(sizeof(struct dpaa2_fa) <= DPAA2_FA_SIZE);
diff --git a/sys/dev/dpaa2/dpaa2_types.c b/sys/dev/dpaa2/dpaa2_types.c
new file mode 100644
index 000000000000..c2fac0ea426d
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_types.c
@@ -0,0 +1,114 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2023 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#include "dpaa2_types.h"
+
+#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0)
+
+/**
+ * @brief Convert DPAA2 device type to string.
+ */
+const char *
+dpaa2_ttos(enum dpaa2_dev_type type)
+{
+ switch (type) {
+ case DPAA2_DEV_MC:
+ return ("mc"); /* NOTE: to print as information only. */
+ case DPAA2_DEV_RC:
+ return ("dprc");
+ case DPAA2_DEV_IO:
+ return ("dpio");
+ case DPAA2_DEV_NI:
+ return ("dpni");
+ case DPAA2_DEV_MCP:
+ return ("dpmcp");
+ case DPAA2_DEV_BP:
+ return ("dpbp");
+ case DPAA2_DEV_CON:
+ return ("dpcon");
+ case DPAA2_DEV_MAC:
+ return ("dpmac");
+ case DPAA2_DEV_MUX:
+ return ("dpdmux");
+ case DPAA2_DEV_SW:
+ return ("dpsw");
+ default:
+ break;
+ }
+
+ return ("notype");
+}
+
+/**
+ * @brief Convert string to DPAA2 device type.
+ */
+enum dpaa2_dev_type
+dpaa2_stot(const char *str)
+{
+ if (COMPARE_TYPE(str, "dprc")) {
+ return (DPAA2_DEV_RC);
+ } else if (COMPARE_TYPE(str, "dpio")) {
+ return (DPAA2_DEV_IO);
+ } else if (COMPARE_TYPE(str, "dpni")) {
+ return (DPAA2_DEV_NI);
+ } else if (COMPARE_TYPE(str, "dpmcp")) {
+ return (DPAA2_DEV_MCP);
+ } else if (COMPARE_TYPE(str, "dpbp")) {
+ return (DPAA2_DEV_BP);
+ } else if (COMPARE_TYPE(str, "dpcon")) {
+ return (DPAA2_DEV_CON);
+ } else if (COMPARE_TYPE(str, "dpmac")) {
+ return (DPAA2_DEV_MAC);
+ } else if (COMPARE_TYPE(str, "dpdmux")) {
+ return (DPAA2_DEV_MUX);
+ } else if (COMPARE_TYPE(str, "dpsw")) {
+ return (DPAA2_DEV_SW);
+ }
+
+ return (DPAA2_DEV_NOTYPE);
+}
+
+/**
+ * @brief Callback to obtain a physical address of the only DMA segment mapped.
+ */
+void
+dpaa2_dmamap_oneseg_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ if (error == 0) {
+ KASSERT(nseg == 1, ("%s: too many segments: nseg=%d\n",
+ __func__, nseg));
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+ } else {
+ panic("%s: error=%d\n", __func__, error);
+ }
+}
diff --git a/sys/dev/dpaa2/dpaa2_types.h b/sys/dev/dpaa2/dpaa2_types.h
index 5e7ccad15e1c..dbfac9ce0a40 100644
--- a/sys/dev/dpaa2/dpaa2_types.h
+++ b/sys/dev/dpaa2/dpaa2_types.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2021-2023 Dmitry Salychev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +28,18 @@
#ifndef _DPAA2_TYPES_H
#define _DPAA2_TYPES_H
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
#include <machine/atomic.h>
+#include <machine/bus.h>
#define DPAA2_MAGIC ((uint32_t) 0xD4AA2C0Du)
+#define DPAA2_MAX_CHANNELS 16 /* CPU cores */
+#define DPAA2_MAX_TCS 8 /* Traffic classes */
+
/**
* @brief Types of the DPAA2 devices.
*/
@@ -51,56 +59,67 @@ enum dpaa2_dev_type {
};
/**
- * @brief Types of the DPAA2 buffers.
+ * @brief Types of the DPNI queues.
*/
-enum dpaa2_buf_type {
- DPAA2_BUF_RX = 75, /* Rx buffer */
- DPAA2_BUF_TX, /* Tx buffer */
- DPAA2_BUF_STORE /* Channel storage, key configuration */
-};
-
-/**
- * @brief DMA-mapped buffer (for Rx/Tx buffers, channel storage, etc.).
- */
-struct dpaa2_buf {
- enum dpaa2_buf_type type;
- union {
- struct {
- bus_dma_tag_t dmat; /* DMA tag for this buffer */
- bus_dmamap_t dmap;
- bus_addr_t paddr;
- void *vaddr;
-
- struct mbuf *m; /* associated mbuf */
- } rx;
- struct {
- bus_dma_tag_t dmat; /* DMA tag for this buffer */
- bus_dmamap_t dmap;
- bus_addr_t paddr;
- void *vaddr;
-
- struct mbuf *m; /* associated mbuf */
- uint64_t idx;
-
- /* for scatter/gather table */
- bus_dma_tag_t sgt_dmat;
- bus_dmamap_t sgt_dmap;
- bus_addr_t sgt_paddr;
- void *sgt_vaddr;
- } tx;
- struct {
- bus_dma_tag_t dmat; /* DMA tag for this buffer */
- bus_dmamap_t dmap;
- bus_addr_t paddr;
- void *vaddr;
- } store;
- };
+enum dpaa2_ni_queue_type {
+ DPAA2_NI_QUEUE_RX = 0,
+ DPAA2_NI_QUEUE_TX,
+ DPAA2_NI_QUEUE_TX_CONF,
+ DPAA2_NI_QUEUE_RX_ERR
};
struct dpaa2_atomic {
volatile int counter;
};
+/**
+ * @brief Tx ring.
+ *
+ * fq: Parent (TxConf) frame queue.
+ * fqid: ID of the logical Tx queue.
+ * br: Ring buffer for mbufs to transmit.
+ * lock: Lock for the ring buffer.
+ */
+struct dpaa2_ni_tx_ring {
+ struct dpaa2_ni_fq *fq;
+ uint32_t fqid;
+ uint32_t txid; /* Tx ring index */
+
+ struct buf_ring *br;
+ struct mtx lock;
+} __aligned(CACHE_LINE_SIZE);
+
+/**
+ * @brief Frame Queue is the basic queuing structure used by the QMan.
+ *
+ * It comprises a list of frame descriptors (FDs), so it can be thought of
+ * as a queue of frames.
+ *
+ * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued
+ * onto a work queue (WQ).
+ *
+ * fqid: Frame queue ID, can be used to enqueue/dequeue or execute other
+ * commands on the queue through DPIO.
+ * txq_n: Number of configured Tx queues.
+ * tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid.
+ * Note that Tx queues are logical queues and not all management
+ * commands are available on these queue types.
+ * qdbin: Queue destination bin. Can be used with the DPIO enqueue
+ * operation based on QDID, QDBIN and QPRI. Note that all Tx queues
+ * with the same flowid have the same destination bin.
+ */
+struct dpaa2_ni_fq {
+ struct dpaa2_channel *chan;
+ uint32_t fqid;
+ uint16_t flowid;
+ uint8_t tc;
+ enum dpaa2_ni_queue_type type;
+
+ /* Optional fields (for TxConf queue). */
+ struct dpaa2_ni_tx_ring tx_rings[DPAA2_MAX_TCS];
+ uint32_t tx_qdbin;
+} __aligned(CACHE_LINE_SIZE);
+
/* Handy wrappers over atomic operations. */
#define DPAA2_ATOMIC_XCHG(a, val) \
(atomic_swap_int(&(a)->counter, (val)))
@@ -109,8 +128,8 @@ struct dpaa2_atomic {
#define DPAA2_ATOMIC_ADD(a, val) \
(atomic_add_acq_int(&(a)->counter, (val)))
-/* Convert DPAA2 type to/from string. */
-const char *dpaa2_ttos(enum dpaa2_dev_type type);
-enum dpaa2_dev_type dpaa2_stot(const char *str);
+const char *dpaa2_ttos(enum dpaa2_dev_type);
+enum dpaa2_dev_type dpaa2_stot(const char *);
+void dpaa2_dmamap_oneseg_cb(void *, bus_dma_segment_t *, int, int);
#endif /* _DPAA2_TYPES_H */
diff --git a/sys/modules/dpaa2/Makefile b/sys/modules/dpaa2/Makefile
index 64b73cb20723..816d6fa5cf4a 100644
--- a/sys/modules/dpaa2/Makefile
+++ b/sys/modules/dpaa2/Makefile
@@ -11,6 +11,9 @@ SRCS+= dpaa2_mcp.c
SRCS+= dpaa2_swp.c
SRCS+= dpaa2_mac.c
SRCS+= dpaa2_con.c
+SRCS+= dpaa2_buf.c
+SRCS+= dpaa2_channel.c
+SRCS+= dpaa2_types.c
SRCS+= dpaa2_cmd_if.c dpaa2_cmd_if.h
SRCS+= dpaa2_swp_if.c dpaa2_swp_if.h
@@ -21,12 +24,15 @@ SRCS+= dpaa2_console.c
SRCS+= bus_if.h device_if.h miibus_if.h
SRCS+= pcib_if.h pci_if.h
-SRCS+= opt_acpi.h opt_platform.h
+
+SRCS+= opt_acpi.h
+SRCS+= opt_platform.h
+SRCS+= opt_rss.h
SRCS.DEV_ACPI= dpaa2_mc_acpi.c \
- memac_mdio_acpi.c \
- acpi_if.h \
- acpi_bus_if.h
+ memac_mdio_acpi.c \
+ acpi_if.h \
+ acpi_bus_if.h
.if !empty(OPT_FDT)
SRCS+= dpaa2_mc_fdt.c \