aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/dpaa2
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/dpaa2')
-rw-r--r--sys/dev/dpaa2/dpaa2_bp.c205
-rw-r--r--sys/dev/dpaa2/dpaa2_bp.h74
-rw-r--r--sys/dev/dpaa2/dpaa2_cmd_if.m1583
-rw-r--r--sys/dev/dpaa2/dpaa2_con.c213
-rw-r--r--sys/dev/dpaa2/dpaa2_con.h70
-rw-r--r--sys/dev/dpaa2/dpaa2_io.c570
-rw-r--r--sys/dev/dpaa2/dpaa2_io.h110
-rw-r--r--sys/dev/dpaa2/dpaa2_mac.c376
-rw-r--r--sys/dev/dpaa2/dpaa2_mac.h124
-rw-r--r--sys/dev/dpaa2/dpaa2_mc.c973
-rw-r--r--sys/dev/dpaa2/dpaa2_mc.h218
-rw-r--r--sys/dev/dpaa2/dpaa2_mc_acpi.c393
-rw-r--r--sys/dev/dpaa2/dpaa2_mc_fdt.c399
-rw-r--r--sys/dev/dpaa2/dpaa2_mc_if.m152
-rw-r--r--sys/dev/dpaa2/dpaa2_mcp.c318
-rw-r--r--sys/dev/dpaa2/dpaa2_mcp.h449
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.c3670
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.h607
-rw-r--r--sys/dev/dpaa2/dpaa2_ni_dpkg.h536
-rw-r--r--sys/dev/dpaa2/dpaa2_rc.c3585
-rw-r--r--sys/dev/dpaa2/dpaa2_swp.c1169
-rw-r--r--sys/dev/dpaa2/dpaa2_swp.h504
-rw-r--r--sys/dev/dpaa2/dpaa2_swp_if.m96
-rw-r--r--sys/dev/dpaa2/dpaa2_types.h114
-rw-r--r--sys/dev/dpaa2/memac_mdio.h64
-rw-r--r--sys/dev/dpaa2/memac_mdio_acpi.c310
-rw-r--r--sys/dev/dpaa2/memac_mdio_common.c306
-rw-r--r--sys/dev/dpaa2/memac_mdio_fdt.c308
-rw-r--r--sys/dev/dpaa2/memac_mdio_if.m42
29 files changed, 17538 insertions, 0 deletions
diff --git a/sys/dev/dpaa2/dpaa2_bp.c b/sys/dev/dpaa2/dpaa2_bp.c
new file mode 100644
index 000000000000..78e1ca68cdb1
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_bp.c
@@ -0,0 +1,205 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Buffer Pool (DPBP) driver.
+ *
+ * The DPBP configures a buffer pool that can be associated with DPAA2 network
+ * and accelerator interfaces.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mc.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_cmd_if.h"
+
+/* DPAA2 Buffer Pool resource specification. */
+struct resource_spec dpaa2_bp_spec[] = {
+ /*
+ * DPMCP resources.
+ *
+ * NOTE: MC command portals (MCPs) are used to send commands to, and
+ * receive responses from, the MC firmware. One portal per DPBP.
+ */
+#define MCP_RES_NUM (1u)
+#define MCP_RID_OFF (0u)
+#define MCP_RID(rid) ((rid) + MCP_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ /* --- */
+ RESOURCE_SPEC_END
+};
+
+static int
+dpaa2_bp_probe(device_t dev)
+{
+ /* DPBP device will be added by the parent resource container. */
+ device_set_desc(dev, "DPAA2 Buffer Pool");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_bp_detach(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_bp_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+
+ if (sc->cmd != NULL) {
+ (void)DPAA2_CMD_BP_DISABLE(dev, child, sc->cmd);
+ (void)DPAA2_CMD_BP_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->bp_token));
+ (void)DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->rc_token));
+
+ dpaa2_mcp_free_command(sc->cmd);
+ sc->cmd = NULL;
+ }
+
+ dinfo->portal = NULL;
+ bus_release_resources(sc->dev, dpaa2_bp_spec, sc->res);
+
+ return (0);
+}
+
+static int
+dpaa2_bp_attach(device_t dev)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ device_t mcp_dev;
+ struct dpaa2_bp_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_devinfo *mcp_dinfo;
+ int error;
+
+ sc->dev = dev;
+ sc->cmd = NULL;
+
+ error = bus_alloc_resources(sc->dev, dpaa2_bp_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources: "
+ "error=%d\n", __func__, error);
+ return (ENXIO);
+ }
+
+ /* Send commands to MC via allocated portal. */
+ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
+ mcp_dinfo = device_get_ivars(mcp_dev);
+ dinfo->portal = mcp_dinfo->portal;
+
+ /* Allocate a command to send to MC hardware. */
+ error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate dpaa2_cmd: "
+ "error=%d\n", __func__, error);
+ dpaa2_bp_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Open resource container and DPBP object. */
+ error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
+ &sc->rc_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPRC: error=%d\n",
+ __func__, error);
+ dpaa2_bp_detach(dev);
+ return (ENXIO);
+ }
+ error = DPAA2_CMD_BP_OPEN(dev, child, sc->cmd, dinfo->id, &sc->bp_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPBP: id=%d, error=%d\n",
+ __func__, dinfo->id, error);
+ dpaa2_bp_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Prepare DPBP object. */
+ error = DPAA2_CMD_BP_RESET(dev, child, sc->cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to reset DPBP: id=%d, error=%d\n",
+ __func__, dinfo->id, error);
+ dpaa2_bp_detach(dev);
+ return (ENXIO);
+ }
+ error = DPAA2_CMD_BP_ENABLE(dev, child, sc->cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to enable DPBP: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+ dpaa2_bp_detach(dev);
+ return (ENXIO);
+ }
+ error = DPAA2_CMD_BP_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr);
+ if (error) {
+ device_printf(dev, "%s: failed to get DPBP attributes: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+ dpaa2_bp_detach(dev);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static device_method_t dpaa2_bp_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_bp_probe),
+ DEVMETHOD(device_attach, dpaa2_bp_attach),
+ DEVMETHOD(device_detach, dpaa2_bp_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_bp_driver = {
+ "dpaa2_bp",
+ dpaa2_bp_methods,
+ sizeof(struct dpaa2_bp_softc),
+};
+
+DRIVER_MODULE(dpaa2_bp, dpaa2_rc, dpaa2_bp_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_bp.h b/sys/dev/dpaa2/dpaa2_bp.h
new file mode 100644
index 000000000000..3ba7196eb030
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_bp.h
@@ -0,0 +1,74 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_BP_H
+#define _DPAA2_BP_H
+
+#include <sys/bus.h>
+
+/* Maximum resources per DPBP: 1 DPMCP. */
+#define DPAA2_BP_MAX_RESOURCES 1
+
+/**
+ * @brief Attributes of the DPBP object.
+ *
+ * id: DPBP object ID.
+ * bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers.
+ */
+struct dpaa2_bp_attr {
+ uint32_t id;
+ uint16_t bpid;
+};
+
+/**
+ * @brief Configuration/state of the buffer pool.
+ */
+struct dpaa2_bp_conf {
+ uint8_t bdi;
+ uint8_t state; /* bitmask */
+ uint32_t free_bufn;
+};
+
+/**
+ * @brief Software context for the DPAA2 Buffer Pool driver.
+ */
+struct dpaa2_bp_softc {
+ device_t dev;
+ struct dpaa2_bp_attr attr;
+
+ /* Help to send commands to MC. */
+ struct dpaa2_cmd *cmd;
+ uint16_t rc_token;
+ uint16_t bp_token;
+
+ struct resource *res[DPAA2_BP_MAX_RESOURCES];
+};
+
+extern struct resource_spec dpaa2_bp_spec[];
+
+#endif /* _DPAA2_BP_H */
diff --git a/sys/dev/dpaa2/dpaa2_cmd_if.m b/sys/dev/dpaa2/dpaa2_cmd_if.m
new file mode 100644
index 000000000000..96031d4ae8c5
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_cmd_if.m
@@ -0,0 +1,1583 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright © 2021-2022 Dmitry Salychev
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+#include <machine/bus.h>
+#include <dev/dpaa2/dpaa2_types.h>
+#include <dev/dpaa2/dpaa2_mc.h>
+#include <dev/dpaa2/dpaa2_mcp.h>
+
+/**
+ * @brief DPAA2 MC command interface.
+ *
+ * The primary purpose of the MC provided DPAA2 objects is to simplify DPAA2
+ * hardware block usage through abstraction and encapsulation.
+ */
+INTERFACE dpaa2_cmd;
+
+#
+# Default implementation of the commands.
+#
+CODE {
+ static void
+ panic_on_mc(device_t dev)
+ {
+ if (strcmp(device_get_name(dev), "dpaa2_mc") == 0)
+ panic("No one can handle a command above DPAA2 MC");
+ }
+
+ static int
+ bypass_mng_get_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *major, uint32_t *minor, uint32_t *rev)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MNG_GET_VERSION(device_get_parent(dev), child,
+ cmd, major, minor, rev));
+ return (ENXIO);
+ }
+ static int
+ bypass_mng_get_soc_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *pvr, uint32_t *svr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MNG_GET_SOC_VERSION(
+ device_get_parent(dev), child, cmd, pvr, svr));
+ return (ENXIO);
+ }
+ static int
+ bypass_mng_get_container_id(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *cont_id)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MNG_GET_CONTAINER_ID(
+ device_get_parent(dev), child, cmd, cont_id));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t cont_id,
+ uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_OPEN(
+ device_get_parent(dev), child, cmd, cont_id, token));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_obj_count(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *obj_count)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_OBJ_COUNT(
+ device_get_parent(dev), child, cmd, obj_count));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_obj(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_idx,
+ struct dpaa2_obj *obj)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_OBJ(
+ device_get_parent(dev), child, cmd, obj_idx, obj));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_obj_descriptor(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t obj_id, enum dpaa2_dev_type type, struct dpaa2_obj *obj)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_OBJ_DESCRIPTOR(
+ device_get_parent(dev), child, cmd, obj_id, type, obj));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_rc_attr *attr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_ATTRIBUTES(
+ device_get_parent(dev), child, cmd, attr));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_obj_region(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t obj_id, uint8_t reg_idx, enum dpaa2_dev_type type,
+ struct dpaa2_rc_obj_region *reg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_OBJ_REGION(
+ device_get_parent(dev), child, cmd, obj_id, reg_idx,
+ type, reg));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t *major, uint16_t *minor)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_API_VERSION(
+ device_get_parent(dev), child, cmd, major, minor));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint8_t enable)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_SET_IRQ_ENABLE(
+ device_get_parent(dev), child, cmd, irq_idx, enable));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_set_obj_irq(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint64_t addr, uint32_t data, uint32_t irq_usr,
+ uint32_t obj_id, enum dpaa2_dev_type type)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_SET_OBJ_IRQ(
+ device_get_parent(dev), child, cmd, irq_idx, addr, data,
+ irq_usr, obj_id, type));
+ return (ENXIO);
+ }
+ static int
+ bypass_rc_get_conn(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ep_desc *ep1_desc, struct dpaa2_ep_desc *ep2_desc,
+ uint32_t *link_stat)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_RC_GET_CONN(
+ device_get_parent(dev), child, cmd, ep1_desc, ep2_desc,
+ link_stat));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_ni_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpni_id,
+ uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_OPEN(
+ device_get_parent(dev), child, cmd, dpni_id, token));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_ENABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_DISABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t *major, uint16_t *minor)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_API_VERSION(
+ device_get_parent(dev), child, cmd, major, minor));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_RESET(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_attr *attr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_ATTRIBUTES(
+ device_get_parent(dev), child, cmd, attr));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_buf_layout(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_buf_layout *bl)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_BUF_LAYOUT(
+ device_get_parent(dev), child, cmd, bl));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_tx_data_off(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t *offset)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_TX_DATA_OFF(
+ device_get_parent(dev), child, cmd, offset));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_link_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_LINK_CFG(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_link_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_LINK_CFG(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_link_state *state)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_LINK_STATE(
+ device_get_parent(dev), child, cmd, state));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_port_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_PORT_MAC_ADDR(
+ device_get_parent(dev), child, cmd, mac));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(
+ device_get_parent(dev), child, cmd, mac));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(
+ device_get_parent(dev), child, cmd, mac));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_qos_table *tbl)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_QOS_TABLE(
+ device_get_parent(dev), child, cmd, tbl));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_clear_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_CLEAR_QOS_TABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_pools(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_pools_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_POOLS(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_err_behavior(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_err_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_ERR_BEHAVIOR(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_queue_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_QUEUE(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_queue_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_QUEUE(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_qdid(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ enum dpaa2_ni_queue_type type, uint16_t *qdid)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_QDID(
+ device_get_parent(dev), child, cmd, type, qdid));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_add_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_ADD_MAC_ADDR(
+ device_get_parent(dev), child, cmd, mac));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_remove_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_REMOVE_MAC_ADDR(
+ device_get_parent(dev), child, cmd, mac));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_clear_mac_filters(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ bool rm_uni, bool rm_multi)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_CLEAR_MAC_FILTERS(
+ device_get_parent(dev), child, cmd, rm_uni, rm_multi));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_mfl(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t length)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_MFL(
+ device_get_parent(dev), child, cmd, length));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_offload(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ enum dpaa2_ni_ofl_type ofl_type, bool en)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_OFFLOAD(
+ device_get_parent(dev), child, cmd, ofl_type, en));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t mask)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_IRQ_MASK(
+ device_get_parent(dev), child, cmd, irq_idx, mask));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool en)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_IRQ_ENABLE(
+ device_get_parent(dev), child, cmd, irq_idx, en));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t *status)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_IRQ_STATUS(
+ device_get_parent(dev), child, cmd, irq_idx, status));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_uni_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_UNI_PROMISC(
+ device_get_parent(dev), child, cmd, en));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_multi_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_MULTI_PROMISC(
+ device_get_parent(dev), child, cmd, en));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_get_statistics(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t page, uint16_t param, uint64_t *cnt)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_GET_STATISTICS(
+ device_get_parent(dev), child, cmd, page, param, cnt));
+ return (ENXIO);
+ }
+ static int
+ bypass_ni_set_rx_tc_dist(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t dist_size, uint8_t tc, enum dpaa2_ni_dist_mode dist_mode,
+ bus_addr_t key_cfg_buf)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_NI_SET_RX_TC_DIST(
+ device_get_parent(dev), child, cmd, dist_size, tc,
+ dist_mode, key_cfg_buf));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_io_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpio_id,
+ uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_OPEN(
+ device_get_parent(dev), child, cmd, dpio_id, token));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_ENABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_DISABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_RESET(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_io_attr *attr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_GET_ATTRIBUTES(
+ device_get_parent(dev), child, cmd, attr));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t mask)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_SET_IRQ_MASK(
+ device_get_parent(dev), child, cmd, irq_idx, mask));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t *status)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_GET_IRQ_STATUS(
+ device_get_parent(dev), child, cmd, irq_idx, status));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool en)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_SET_IRQ_ENABLE(
+ device_get_parent(dev), child, cmd, irq_idx, en));
+ return (ENXIO);
+ }
+ static int
+ bypass_io_add_static_dq_chan(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpcon_id, uint8_t *chan_idx)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_IO_ADD_STATIC_DQ_CHAN(
+ device_get_parent(dev), child, cmd, dpcon_id, chan_idx));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_bp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpbp_id,
+ uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_BP_OPEN(
+ device_get_parent(dev), child, cmd, dpbp_id, token));
+ return (ENXIO);
+ }
+ static int
+ bypass_bp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_BP_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_bp_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_BP_ENABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_bp_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_BP_DISABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_bp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_BP_RESET(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_bp_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_bp_attr *attr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_BP_GET_ATTRIBUTES(
+ device_get_parent(dev), child, cmd, attr));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_mac_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmac_id,
+ uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_OPEN(
+ device_get_parent(dev), child, cmd, dpmac_id, token));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_RESET(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_mdio_read(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy,
+ uint16_t reg, uint16_t *val)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_MDIO_READ(
+ device_get_parent(dev), child, cmd, phy, reg, val));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_mdio_write(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy,
+ uint16_t reg, uint16_t val)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_MDIO_WRITE(
+ device_get_parent(dev), child, cmd, phy, reg, val));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_get_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_GET_ADDR(
+ device_get_parent(dev), child, cmd, mac));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_mac_attr *attr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_GET_ATTRIBUTES(
+ device_get_parent(dev), child, cmd, attr));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_set_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_mac_link_state *state)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_SET_LINK_STATE(
+ device_get_parent(dev), child, cmd, state));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t mask)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_SET_IRQ_MASK(
+ device_get_parent(dev), child, cmd, irq_idx, mask));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool en)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_SET_IRQ_ENABLE(
+ device_get_parent(dev), child, cmd, irq_idx, en));
+ return (ENXIO);
+ }
+ static int
+ bypass_mac_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t *status)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MAC_GET_IRQ_STATUS(
+ device_get_parent(dev), child, cmd, irq_idx, status));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_con_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id,
+ uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_OPEN(
+ device_get_parent(dev), child, cmd, dpcon_id, token));
+ return (ENXIO);
+ }
+ static int
+ bypass_con_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_con_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_RESET(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_con_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_ENABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_con_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_DISABLE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_con_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_con_attr *attr)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_GET_ATTRIBUTES(
+ device_get_parent(dev), child, cmd, attr));
+ return (ENXIO);
+ }
+ static int
+ bypass_con_set_notif(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_con_notif_cfg *cfg)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_CON_SET_NOTIF(
+ device_get_parent(dev), child, cmd, cfg));
+ return (ENXIO);
+ }
+
+ /* Data Path MC Portal (DPMCP) commands. */
+
+ static int
+ bypass_mcp_create(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t portal_id, uint32_t options, uint32_t *dpmcp_id)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MCP_CREATE(
+ device_get_parent(dev), child, cmd, portal_id,
+ options, dpmcp_id));
+ return (ENXIO);
+ }
+ static int
+ bypass_mcp_destroy(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpmcp_id)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MCP_DESTROY(
+ device_get_parent(dev), child, cmd, dpmcp_id));
+ return (ENXIO);
+ }
+ static int
+ bypass_mcp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpmcp_id, uint16_t *token)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MCP_OPEN(
+ device_get_parent(dev), child, cmd, dpmcp_id,
+ token));
+ return (ENXIO);
+ }
+ static int
+ bypass_mcp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MCP_CLOSE(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+ static int
+ bypass_mcp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+ {
+ panic_on_mc(dev);
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_CMD_MCP_RESET(
+ device_get_parent(dev), child, cmd));
+ return (ENXIO);
+ }
+};
+
+/**
+ * @brief Data Path Management (DPMNG) commands.
+ */
+
+METHOD int mng_get_version {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t *major;
+ uint32_t *minor;
+ uint32_t *rev;
+} DEFAULT bypass_mng_get_version;
+
+METHOD int mng_get_soc_version {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t *pvr;
+ uint32_t *svr;
+} DEFAULT bypass_mng_get_soc_version;
+
+METHOD int mng_get_container_id {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t *cont_id;
+} DEFAULT bypass_mng_get_container_id;
+
+/**
+ * @brief Data Path Resource Containter (DPRC) commands.
+ */
+
+METHOD int rc_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t cont_id;
+ uint16_t *token;
+} DEFAULT bypass_rc_open;
+
+METHOD int rc_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_rc_close;
+
+METHOD int rc_get_obj_count {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t *obj_count;
+} DEFAULT bypass_rc_get_obj_count;
+
+METHOD int rc_get_obj {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t obj_idx;
+ struct dpaa2_obj *obj;
+} DEFAULT bypass_rc_get_obj;
+
+METHOD int rc_get_obj_descriptor {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t obj_id;
+ enum dpaa2_dev_type type;
+ struct dpaa2_obj *obj;
+} DEFAULT bypass_rc_get_obj_descriptor;
+
+METHOD int rc_get_attributes {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_rc_attr *attr;
+} DEFAULT bypass_rc_get_attributes;
+
+METHOD int rc_get_obj_region {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t obj_id;
+ uint8_t reg_idx;
+ enum dpaa2_dev_type type;
+ struct dpaa2_rc_obj_region *reg;
+} DEFAULT bypass_rc_get_obj_region;
+
+METHOD int rc_get_api_version {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint16_t *major;
+ uint16_t *minor;
+} DEFAULT bypass_rc_get_api_version;
+
+METHOD int rc_set_irq_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint8_t enable;
+} DEFAULT bypass_rc_set_irq_enable;
+
+METHOD int rc_set_obj_irq {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint64_t addr;
+ uint32_t data;
+ uint32_t irq_usr;
+ uint32_t obj_id;
+ enum dpaa2_dev_type type;
+} DEFAULT bypass_rc_set_obj_irq;
+
+METHOD int rc_get_conn {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ep_desc *ep1_desc;
+ struct dpaa2_ep_desc *ep2_desc;
+ uint32_t *link_stat;
+} DEFAULT bypass_rc_get_conn;
+
+/**
+ * @brief Data Path Network Interface (DPNI) commands.
+ */
+
+METHOD int ni_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpni_id;
+ uint16_t *token;
+} DEFAULT bypass_ni_open;
+
+METHOD int ni_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_ni_close;
+
+METHOD int ni_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_ni_enable;
+
+METHOD int ni_disable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_ni_disable;
+
+METHOD int ni_get_api_version {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint16_t *major;
+ uint16_t *minor;
+} DEFAULT bypass_ni_get_api_version;
+
+METHOD int ni_reset {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_ni_reset;
+
+METHOD int ni_get_attributes {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_attr *attr;
+} DEFAULT bypass_ni_get_attributes;
+
+METHOD int ni_set_buf_layout {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_buf_layout *bl;
+} DEFAULT bypass_ni_set_buf_layout;
+
+METHOD int ni_get_tx_data_off {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint16_t *offset;
+} DEFAULT bypass_ni_get_tx_data_off;
+
+METHOD int ni_set_link_cfg {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_link_cfg *cfg;
+} DEFAULT bypass_ni_set_link_cfg;
+
+METHOD int ni_get_link_cfg {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_link_cfg *cfg;
+} DEFAULT bypass_ni_get_link_cfg;
+
+METHOD int ni_get_link_state {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_link_state *state;
+} DEFAULT bypass_ni_get_link_state;
+
+METHOD int ni_get_port_mac_addr {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t *mac;
+} DEFAULT bypass_ni_get_port_mac_addr;
+
+METHOD int ni_set_prim_mac_addr {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t *mac;
+} DEFAULT bypass_ni_set_prim_mac_addr;
+
+METHOD int ni_get_prim_mac_addr {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t *mac;
+} DEFAULT bypass_ni_get_prim_mac_addr;
+
+METHOD int ni_set_qos_table {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_qos_table *tbl;
+} DEFAULT bypass_ni_set_qos_table;
+
+METHOD int ni_clear_qos_table {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_ni_clear_qos_table;
+
+METHOD int ni_set_pools {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_pools_cfg *cfg;
+} DEFAULT bypass_ni_set_pools;
+
+METHOD int ni_set_err_behavior {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_err_cfg *cfg;
+} DEFAULT bypass_ni_set_err_behavior;
+
+METHOD int ni_get_queue {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_queue_cfg *cfg;
+} DEFAULT bypass_ni_get_queue;
+
+METHOD int ni_set_queue {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_ni_queue_cfg *cfg;
+} DEFAULT bypass_ni_set_queue;
+
+METHOD int ni_get_qdid {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ enum dpaa2_ni_queue_type type;
+ uint16_t *qdid;
+} DEFAULT bypass_ni_get_qdid;
+
+METHOD int ni_add_mac_addr {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t *mac;
+} DEFAULT bypass_ni_add_mac_addr;
+
+METHOD int ni_remove_mac_addr {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t *mac;
+} DEFAULT bypass_ni_remove_mac_addr;
+
+METHOD int ni_clear_mac_filters {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ bool rm_uni;
+ bool rm_multi;
+} DEFAULT bypass_ni_clear_mac_filters;
+
+METHOD int ni_set_mfl {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint16_t length;
+} DEFAULT bypass_ni_set_mfl;
+
+METHOD int ni_set_offload {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ enum dpaa2_ni_ofl_type ofl_type;
+ bool en;
+} DEFAULT bypass_ni_set_offload;
+
+METHOD int ni_set_irq_mask {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint32_t mask;
+} DEFAULT bypass_ni_set_irq_mask;
+
+METHOD int ni_set_irq_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ bool en;
+} DEFAULT bypass_ni_set_irq_enable;
+
+METHOD int ni_get_irq_status {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint32_t *status;
+} DEFAULT bypass_ni_get_irq_status;
+
+METHOD int ni_set_uni_promisc {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ bool en;
+} DEFAULT bypass_ni_set_uni_promisc;
+
+METHOD int ni_set_multi_promisc {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ bool en;
+} DEFAULT bypass_ni_set_multi_promisc;
+
+METHOD int ni_get_statistics {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t page;
+ uint16_t param;
+ uint64_t *cnt;
+} DEFAULT bypass_ni_get_statistics;
+
+METHOD int ni_set_rx_tc_dist {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint16_t dist_size;
+ uint8_t tc;
+ enum dpaa2_ni_dist_mode dist_mode;
+ bus_addr_t key_cfg_buf;
+} DEFAULT bypass_ni_set_rx_tc_dist;
+
+/**
+ * @brief Data Path I/O (DPIO) commands.
+ */
+
+METHOD int io_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpio_id;
+ uint16_t *token;
+} DEFAULT bypass_io_open;
+
+METHOD int io_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_io_close;
+
+METHOD int io_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_io_enable;
+
+METHOD int io_disable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_io_disable;
+
+METHOD int io_reset {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_io_reset;
+
+METHOD int io_get_attributes {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_io_attr *attr;
+} DEFAULT bypass_io_get_attributes;
+
+METHOD int io_set_irq_mask {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint32_t mask;
+} DEFAULT bypass_io_set_irq_mask;
+
+METHOD int io_get_irq_status {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint32_t *status;
+} DEFAULT bypass_io_get_irq_status;
+
+METHOD int io_set_irq_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ bool en;
+} DEFAULT bypass_io_set_irq_enable;
+
+METHOD int io_add_static_dq_chan {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpcon_id;
+ uint8_t *chan_idx;
+} DEFAULT bypass_io_add_static_dq_chan;
+
+/**
+ * @brief Data Path Buffer Pool (DPBP) commands.
+ */
+
+METHOD int bp_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpbp_id;
+ uint16_t *token;
+} DEFAULT bypass_bp_open;
+
+METHOD int bp_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_bp_close;
+
+METHOD int bp_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_bp_enable;
+
+METHOD int bp_disable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_bp_disable;
+
+METHOD int bp_reset {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_bp_reset;
+
+METHOD int bp_get_attributes {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_bp_attr *attr;
+} DEFAULT bypass_bp_get_attributes;
+
+/**
+ * @brief Data Path MAC (DPMAC) commands.
+ */
+
+METHOD int mac_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpmac_id;
+ uint16_t *token;
+} DEFAULT bypass_mac_open;
+
+METHOD int mac_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_mac_close;
+
+METHOD int mac_reset {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_mac_reset;
+
+METHOD int mac_mdio_read {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t phy;
+ uint16_t reg;
+ uint16_t *val;
+} DEFAULT bypass_mac_mdio_read;
+
+METHOD int mac_mdio_write {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t phy;
+ uint16_t reg;
+ uint16_t val;
+} DEFAULT bypass_mac_mdio_write;
+
+METHOD int mac_get_addr {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t *mac;
+} DEFAULT bypass_mac_get_addr;
+
+METHOD int mac_get_attributes {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_mac_attr *attr;
+} DEFAULT bypass_mac_get_attributes;
+
+METHOD int mac_set_link_state {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_mac_link_state *state;
+} DEFAULT bypass_mac_set_link_state;
+
+METHOD int mac_set_irq_mask {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint32_t mask;
+} DEFAULT bypass_mac_set_irq_mask;
+
+METHOD int mac_set_irq_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ bool en;
+} DEFAULT bypass_mac_set_irq_enable;
+
+METHOD int mac_get_irq_status {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint8_t irq_idx;
+ uint32_t *status;
+} DEFAULT bypass_mac_get_irq_status;
+
+/**
+ * @brief Data Path Concentrator (DPCON) commands.
+ */
+
+METHOD int con_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpcon_id;
+ uint16_t *token;
+} DEFAULT bypass_con_open;
+
+METHOD int con_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_con_close;
+
+METHOD int con_reset {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_con_reset;
+
+METHOD int con_enable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_con_enable;
+
+METHOD int con_disable {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_con_disable;
+
+METHOD int con_get_attributes {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_con_attr *attr;
+} DEFAULT bypass_con_get_attributes;
+
+METHOD int con_set_notif {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_con_notif_cfg *cfg;
+} DEFAULT bypass_con_set_notif;
+
+/**
+ * @brief Data Path MC Portal (DPMCP) commands.
+ */
+
+METHOD int mcp_create {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t portal_id;
+ uint32_t options;
+ uint32_t *dpmcp_id;
+} DEFAULT bypass_mcp_create;
+
+METHOD int mcp_destroy {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpmcp_id;
+} DEFAULT bypass_mcp_destroy;
+
+METHOD int mcp_open {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+ uint32_t dpmcp_id;
+ uint16_t *token;
+} DEFAULT bypass_mcp_open;
+
+METHOD int mcp_close {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_mcp_close;
+
+METHOD int mcp_reset {
+ device_t dev;
+ device_t child;
+ struct dpaa2_cmd *cmd;
+} DEFAULT bypass_mcp_reset;
diff --git a/sys/dev/dpaa2/dpaa2_con.c b/sys/dev/dpaa2/dpaa2_con.c
new file mode 100644
index 000000000000..602497c2c8de
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_con.c
@@ -0,0 +1,213 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Concentrator (DPCON) driver.
+ *
+ * Supports configuration of QBMan channels for advanced scheduling of ingress
+ * packets from one or more network interfaces.
+ *
+ * DPCONs are used to distribute Rx or Tx Confirmation traffic to different
+ * cores, via affine DPIO objects. The implication is that one DPCON must be
+ * available for each core where Rx or Tx Confirmation traffic should be
+ * distributed to.
+ *
+ * QBMan channel contains several work queues. The WQs within a channel have a
+ * priority relative to each other. Each channel consists of either eight or two
+ * WQs, and thus, there are either eight or two possible priorities in a channel.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_cmd_if.h"
+
+/* DPAA2 Concentrator resource specification. */
+struct resource_spec dpaa2_con_spec[] = {
+ /*
+ * DPMCP resources.
+ *
+ * NOTE: MC command portals (MCPs) are used to send commands to, and
+ * receive responses from, the MC firmware. One portal per DPCON.
+ */
+#define MCP_RES_NUM (1u)
+#define MCP_RID_OFF (0u)
+#define MCP_RID(rid) ((rid) + MCP_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ /* --- */
+ RESOURCE_SPEC_END
+};
+
+static int dpaa2_con_detach(device_t dev);
+
+/*
+ * Device interface.
+ */
+
+static int
+dpaa2_con_probe(device_t dev)
+{
+ /* DPCON device will be added by a parent resource container itself. */
+ device_set_desc(dev, "DPAA2 Concentrator");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_con_detach(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_con_softc *sc = device_get_softc(dev);
+
+ DPAA2_CMD_CON_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->con_token));
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+ dpaa2_mcp_free_command(sc->cmd);
+
+ sc->cmd = NULL;
+ sc->con_token = 0;
+ sc->rc_token = 0;
+
+ return (0);
+}
+
+static int
+dpaa2_con_attach(device_t dev)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ device_t mcp_dev;
+ struct dpaa2_con_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_devinfo *mcp_dinfo;
+ int error;
+
+ sc->dev = dev;
+
+ error = bus_alloc_resources(sc->dev, dpaa2_con_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources: "
+ "error=%d\n", __func__, error);
+ return (ENXIO);
+ }
+
+ /* Obtain MC portal. */
+ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
+ mcp_dinfo = device_get_ivars(mcp_dev);
+ dinfo->portal = mcp_dinfo->portal;
+
+ /* Allocate a command to send to MC hardware. */
+ error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
+ if (error) {
+ device_printf(dev, "Failed to allocate dpaa2_cmd: error=%d\n",
+ error);
+ goto err_exit;
+ }
+
+ /* Open resource container and DPCON object. */
+ error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
+ &sc->rc_token);
+ if (error) {
+ device_printf(dev, "Failed to open DPRC: error=%d\n", error);
+ goto err_free_cmd;
+ }
+ error = DPAA2_CMD_CON_OPEN(dev, child, sc->cmd, dinfo->id,
+ &sc->con_token);
+ if (error) {
+ device_printf(dev, "Failed to open DPCON: id=%d, error=%d\n",
+ dinfo->id, error);
+ goto err_close_rc;
+ }
+
+ /* Prepare DPCON object. */
+ error = DPAA2_CMD_CON_RESET(dev, child, sc->cmd);
+ if (error) {
+ device_printf(dev, "Failed to reset DPCON: id=%d, error=%d\n",
+ dinfo->id, error);
+ goto err_close_con;
+ }
+ error = DPAA2_CMD_CON_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr);
+ if (error) {
+ device_printf(dev, "Failed to get DPCON attributes: id=%d, "
+ "error=%d\n", dinfo->id, error);
+ goto err_close_con;
+ }
+
+ /* TODO: Enable debug output via sysctl (to reduce output). */
+ if (bootverbose)
+ device_printf(dev, "chan_id=%d, priorities=%d\n",
+ sc->attr.chan_id, sc->attr.prior_num);
+
+ return (0);
+
+ err_close_con:
+ DPAA2_CMD_CON_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->con_token));
+ err_close_rc:
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+ err_free_cmd:
+ dpaa2_mcp_free_command(sc->cmd);
+ err_exit:
+ return (ENXIO);
+}
+
+static device_method_t dpaa2_con_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_con_probe),
+ DEVMETHOD(device_attach, dpaa2_con_attach),
+ DEVMETHOD(device_detach, dpaa2_con_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_con_driver = {
+ "dpaa2_con",
+ dpaa2_con_methods,
+ sizeof(struct dpaa2_con_softc),
+};
+
+DRIVER_MODULE(dpaa2_con, dpaa2_rc, dpaa2_con_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_con.h b/sys/dev/dpaa2/dpaa2_con.h
new file mode 100644
index 000000000000..82fd50f4eaed
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_con.h
@@ -0,0 +1,70 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_CON_H
+#define _DPAA2_CON_H
+
+#include <sys/rman.h>
+#include <sys/bus.h>
+#include <sys/queue.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_mcp.h"
+
+/* Maximum resources per DPCON: 1 DPMCP. */
+#define DPAA2_CON_MAX_RESOURCES 1
+
+/**
+ * @brief Attributes of the DPCON object.
+ *
+ * id: DPCON object ID.
+ * chan_id: QBMan channel ID to be used for dequeue operations.
+ * prior_num: Number of priorities for the DPCON channel (1-8).
+ */
+struct dpaa2_con_attr {
+ uint32_t id;
+ uint16_t chan_id;
+ uint8_t prior_num;
+};
+
+/**
+ * @brief Software context for the DPAA2 Concentrator driver.
+ */
+struct dpaa2_con_softc {
+ device_t dev;
+ struct resource *res[DPAA2_CON_MAX_RESOURCES];
+ struct dpaa2_con_attr attr;
+
+ /* Help to send commands to MC. */
+ struct dpaa2_cmd *cmd;
+ uint16_t rc_token;
+ uint16_t con_token;
+};
+
+extern struct resource_spec dpaa2_con_spec[];
+
+#endif /* _DPAA2_CON_H */
diff --git a/sys/dev/dpaa2/dpaa2_io.c b/sys/dev/dpaa2/dpaa2_io.c
new file mode 100644
index 000000000000..e2b7992bfdb6
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_io.c
@@ -0,0 +1,570 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * QBMan command interface and the DPAA2 I/O (DPIO) driver.
+ *
+ * The DPIO object allows configuration of the QBMan software portal with
+ * optional notification capabilities.
+ *
+ * Software portals are used by the driver to communicate with the QBMan. The
+ * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue
+ * and dequeue operations, as well as buffer release and acquire operations –
+ * using QBMan.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/_cpuset.h>
+#include <sys/cpuset.h>
+#include <sys/taskqueue.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mc.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_cmd_if.h"
+#include "dpaa2_io.h"
+#include "dpaa2_ni.h"
+
+#define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */
+#define DPIO_POLL_MAX 32
+
+/*
+ * Memory:
+ * 0: cache-enabled part of the QBMan software portal.
+ * 1: cache-inhibited part of the QBMan software portal.
+ * 2: control registers of the QBMan software portal?
+ *
+ * Note that MSI should be allocated separately using pseudo-PCI interface.
+ */
+struct resource_spec dpaa2_io_spec[] = {
+ /*
+ * System Memory resources.
+ */
+#define MEM_RES_NUM (3u)
+#define MEM_RID_OFF (0u)
+#define MEM_RID(rid) ((rid) + MEM_RID_OFF)
+ { SYS_RES_MEMORY, MEM_RID(0), RF_ACTIVE | RF_UNMAPPED },
+ { SYS_RES_MEMORY, MEM_RID(1), RF_ACTIVE | RF_UNMAPPED },
+ { SYS_RES_MEMORY, MEM_RID(2), RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
+ /*
+ * DPMCP resources.
+ *
+ * NOTE: MC command portals (MCPs) are used to send commands to, and
+ * receive responses from, the MC firmware. One portal per DPIO.
+ */
+#define MCP_RES_NUM (1u)
+#define MCP_RID_OFF (MEM_RID_OFF + MEM_RES_NUM)
+#define MCP_RID(rid) ((rid) + MCP_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ /* --- */
+ RESOURCE_SPEC_END
+};
+
+/* Configuration routines. */
+static int dpaa2_io_setup_irqs(device_t dev);
+static int dpaa2_io_release_irqs(device_t dev);
+static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc);
+static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc);
+
+/* Interrupt handlers */
+static void dpaa2_io_intr(void *arg);
+
+static int
+dpaa2_io_probe(device_t dev)
+{
+ /* DPIO device will be added by a parent resource container itself. */
+ device_set_desc(dev, "DPAA2 I/O");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_io_detach(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_io_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ int error;
+
+ /* Tear down interrupt handler and release IRQ resources. */
+ dpaa2_io_release_irqs(dev);
+
+ /* Free software portal helper object. */
+ dpaa2_swp_free_portal(sc->swp);
+
+ /* Disable DPIO object. */
+ error = DPAA2_CMD_IO_DISABLE(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->io_token));
+ if (error && bootverbose)
+ device_printf(dev, "%s: failed to disable DPIO: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+
+ /* Close control sessions with the DPAA2 objects. */
+ DPAA2_CMD_IO_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->io_token));
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+
+ /* Free pre-allocated MC command. */
+ dpaa2_mcp_free_command(sc->cmd);
+ sc->cmd = NULL;
+ sc->io_token = 0;
+ sc->rc_token = 0;
+
+ /* Unmap memory resources of the portal. */
+ for (int i = 0; i < MEM_RES_NUM; i++) {
+ if (sc->res[MEM_RID(i)] == NULL)
+ continue;
+ error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY,
+ sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]);
+ if (error && bootverbose)
+ device_printf(dev, "%s: failed to unmap memory "
+ "resource: rid=%d, error=%d\n", __func__, MEM_RID(i),
+ error);
+ }
+
+ /* Release allocated resources. */
+ bus_release_resources(dev, dpaa2_io_spec, sc->res);
+
+ return (0);
+}
+
+static int
+dpaa2_io_attach(device_t dev)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ device_t mcp_dev;
+ struct dpaa2_io_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_devinfo *mcp_dinfo;
+ struct resource_map_request req;
+ struct {
+ vm_memattr_t memattr;
+ char *label;
+ } map_args[MEM_RES_NUM] = {
+ { VM_MEMATTR_WRITE_BACK, "cache-enabled part" },
+ { VM_MEMATTR_DEVICE, "cache-inhibited part" },
+ { VM_MEMATTR_DEVICE, "control registers" }
+ };
+ int error;
+
+ sc->dev = dev;
+ sc->swp = NULL;
+ sc->cmd = NULL;
+ sc->intr = NULL;
+ sc->irq_resource = NULL;
+
+ /* Allocate resources. */
+ error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources: "
+ "error=%d\n", __func__, error);
+ return (ENXIO);
+ }
+
+ /* Set allocated MC portal up. */
+ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
+ mcp_dinfo = device_get_ivars(mcp_dev);
+ dinfo->portal = mcp_dinfo->portal;
+
+ /* Map memory resources of the portal. */
+ for (int i = 0; i < MEM_RES_NUM; i++) {
+ if (sc->res[MEM_RID(i)] == NULL)
+ continue;
+
+ resource_init_map_request(&req);
+ req.memattr = map_args[i].memattr;
+ error = bus_map_resource(sc->dev, SYS_RES_MEMORY,
+ sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]);
+ if (error) {
+ device_printf(dev, "%s: failed to map %s: error=%d\n",
+ __func__, map_args[i].label, error);
+ goto err_exit;
+ }
+ }
+
+ /* Allocate a command to send to the MC hardware. */
+ error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate dpaa2_cmd: "
+ "error=%d\n", __func__, error);
+ goto err_exit;
+ }
+
+ /* Prepare DPIO object. */
+ error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
+ &sc->rc_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPRC: error=%d\n",
+ __func__, error);
+ goto err_exit;
+ }
+ error = DPAA2_CMD_IO_OPEN(dev, child, sc->cmd, dinfo->id, &sc->io_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n",
+ __func__, dinfo->id, error);
+ goto err_exit;
+ }
+ error = DPAA2_CMD_IO_RESET(dev, child, sc->cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n",
+ __func__, dinfo->id, error);
+ goto err_exit;
+ }
+ error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr);
+ if (error) {
+ device_printf(dev, "%s: failed to get DPIO attributes: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+ goto err_exit;
+ }
+ error = DPAA2_CMD_IO_ENABLE(dev, child, sc->cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to enable DPIO: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+ goto err_exit;
+ }
+
+ /* Prepare descriptor of the QBMan software portal. */
+ sc->swp_desc.dpio_dev = dev;
+ sc->swp_desc.swp_version = sc->attr.swp_version;
+ sc->swp_desc.swp_clk = sc->attr.swp_clk;
+ sc->swp_desc.swp_id = sc->attr.swp_id;
+ sc->swp_desc.has_notif = sc->attr.priors_num ? true : false;
+ sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false;
+
+ sc->swp_desc.cena_res = sc->res[0];
+ sc->swp_desc.cena_map = &sc->map[0];
+ sc->swp_desc.cinh_res = sc->res[1];
+ sc->swp_desc.cinh_map = &sc->map[1];
+
+ /*
+ * Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ sc->swp_desc.swp_cycles_ratio = 256000 /
+ (sc->swp_desc.swp_clk / 1000000);
+
+ /* Initialize QBMan software portal. */
+ error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to initialize dpaa2_swp: "
+ "error=%d\n", __func__, error);
+ goto err_exit;
+ }
+
+ error = dpaa2_io_setup_irqs(dev);
+ if (error) {
+ device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
+ __func__, error);
+ goto err_exit;
+ }
+
+#if 0
+ /* TODO: Enable debug output via sysctl (to reduce output). */
+ if (bootverbose)
+ device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, "
+ "notif_priors=%d, swp_version=0x%x\n",
+ sc->attr.id, sc->attr.swp_id,
+ sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL
+ ? "local_channel" : "no_channel", sc->attr.priors_num,
+ sc->attr.swp_version);
+#endif
+ return (0);
+
+err_exit:
+ dpaa2_io_detach(dev);
+ return (ENXIO);
+}
+
+/**
+ * @brief Enqueue multiple frames to a frame queue using one FQID.
+ */
+static int
+dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid,
+ struct dpaa2_fd *fd, int frames_n)
+{
+ struct dpaa2_io_softc *sc = device_get_softc(iodev);
+ struct dpaa2_swp *swp = sc->swp;
+ struct dpaa2_eq_desc ed;
+ uint32_t flags = 0;
+
+ memset(&ed, 0, sizeof(ed));
+
+ /* Setup enqueue descriptor. */
+ dpaa2_swp_set_ed_norp(&ed, false);
+ dpaa2_swp_set_ed_fq(&ed, fqid);
+
+ return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n));
+}
+
+/**
+ * @brief Configure the channel data availability notification (CDAN)
+ * in a particular WQ channel paired with DPIO.
+ */
+static int
+dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx)
+{
+ struct dpaa2_io_softc *sc = device_get_softc(iodev);
+
+ /* Enable generation of the CDAN notifications. */
+ if (ctx->cdan_en)
+ return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id,
+ DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en,
+ ctx->qman_ctx));
+
+ return (0);
+}
+
+/**
+ * @brief Query current configuration/state of the buffer pool.
+ */
+static int
+dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf)
+{
+ struct dpaa2_io_softc *sc = device_get_softc(iodev);
+
+ return (dpaa2_swp_query_bp(sc->swp, bpid, conf));
+}
+
+/**
+ * @brief Release one or more buffer pointers to the QBMan buffer pool.
+ */
+static int
+dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf,
+ uint32_t buf_num)
+{
+ struct dpaa2_io_softc *sc = device_get_softc(iodev);
+
+ return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num));
+}
+
+/**
+ * @brief Configure DPNI object to generate interrupts.
+ */
+static int
+dpaa2_io_setup_irqs(device_t dev)
+{
+ struct dpaa2_io_softc *sc = device_get_softc(dev);
+ int error;
+
+ /*
+ * Setup interrupts generated by the software portal.
+ */
+ dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI);
+ dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu);
+
+ /* Configure IRQs. */
+ error = dpaa2_io_setup_msi(sc);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate MSI: error=%d\n",
+ __func__, error);
+ return (error);
+ }
+ if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
+ device_printf(dev, "%s: failed to allocate IRQ resource\n",
+ __func__);
+ return (ENXIO);
+ }
+ if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE |
+ INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) {
+ device_printf(dev, "%s: failed to setup IRQ resource\n",
+ __func__);
+ return (ENXIO);
+ }
+
+ /* Wrap DPIO ID around number of CPUs. */
+ bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus);
+
+ /*
+ * Setup and enable Static Dequeue Command to receive CDANs from
+ * channel 0.
+ */
+ if (sc->swp_desc.has_notif)
+ dpaa2_swp_set_push_dequeue(sc->swp, 0, true);
+
+ return (0);
+}
+
+static int
+dpaa2_io_release_irqs(device_t dev)
+{
+ struct dpaa2_io_softc *sc = device_get_softc(dev);
+
+ /* Disable receiving CDANs from channel 0. */
+ if (sc->swp_desc.has_notif)
+ dpaa2_swp_set_push_dequeue(sc->swp, 0, false);
+
+ /* Release IRQ resources. */
+ if (sc->intr != NULL)
+ bus_teardown_intr(dev, sc->irq_resource, &sc->intr);
+ if (sc->irq_resource != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0],
+ sc->irq_resource);
+
+ (void)dpaa2_io_release_msi(device_get_softc(dev));
+
+ /* Configure software portal to stop generating interrupts. */
+ dpaa2_swp_set_intr_trigger(sc->swp, 0);
+ dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu);
+
+ return (0);
+}
+
+/**
+ * @brief Allocate MSI interrupts for this DPAA2 I/O object.
+ */
+static int
+dpaa2_io_setup_msi(struct dpaa2_io_softc *sc)
+{
+ int val;
+
+ val = pci_msi_count(sc->dev);
+ if (val < DPAA2_IO_MSI_COUNT)
+ device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
+ DPAA2_IO_MSI_COUNT);
+ val = MIN(val, DPAA2_IO_MSI_COUNT);
+
+ if (pci_alloc_msi(sc->dev, &val) != 0)
+ return (EINVAL);
+
+ for (int i = 0; i < val; i++)
+ sc->irq_rid[i] = i + 1;
+
+ return (0);
+}
+
+static int
+dpaa2_io_release_msi(struct dpaa2_io_softc *sc)
+{
+ int error;
+
+ error = pci_release_msi(sc->dev);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to release MSI: error=%d/n",
+ __func__, error);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief DPAA2 I/O interrupt handler.
+ */
+static void
+dpaa2_io_intr(void *arg)
+{
+ struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg;
+ struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX];
+ struct dpaa2_dq dq;
+ uint32_t idx, status;
+ uint16_t flags;
+ int rc, cdan_n = 0;
+
+ status = dpaa2_swp_read_intr_status(sc->swp);
+ if (status == 0) {
+ return;
+ }
+
+ DPAA2_SWP_LOCK(sc->swp, &flags);
+ if (flags & DPAA2_SWP_DESTROYED) {
+ /* Terminate operation if portal is destroyed. */
+ DPAA2_SWP_UNLOCK(sc->swp);
+ return;
+ }
+
+ for (int i = 0; i < DPIO_POLL_MAX; i++) {
+ rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx);
+ if (rc) {
+ break;
+ }
+
+ if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) ==
+ DPAA2_DQRR_RESULT_CDAN) {
+ ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx;
+ } else {
+ /* TODO: Report unknown DQRR entry. */
+ }
+ dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx);
+ }
+ DPAA2_SWP_UNLOCK(sc->swp);
+
+ for (int i = 0; i < cdan_n; i++) {
+ ctx[i]->poll(ctx[i]->channel);
+ }
+
+ /* Enable software portal interrupts back */
+ dpaa2_swp_clear_intr_status(sc->swp, status);
+ dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0);
+}
+
+static device_method_t dpaa2_io_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_io_probe),
+ DEVMETHOD(device_attach, dpaa2_io_attach),
+ DEVMETHOD(device_detach, dpaa2_io_detach),
+
+ /* QBMan software portal interface */
+ DEVMETHOD(dpaa2_swp_enq_multiple_fq, dpaa2_io_enq_multiple_fq),
+ DEVMETHOD(dpaa2_swp_conf_wq_channel, dpaa2_io_conf_wq_channel),
+ DEVMETHOD(dpaa2_swp_query_bp, dpaa2_io_query_bp),
+ DEVMETHOD(dpaa2_swp_release_bufs, dpaa2_io_release_bufs),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_io_driver = {
+ "dpaa2_io",
+ dpaa2_io_methods,
+ sizeof(struct dpaa2_io_softc),
+};
+
+DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_io.h b/sys/dev/dpaa2/dpaa2_io.h
new file mode 100644
index 000000000000..d02dab8144df
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_io.h
@@ -0,0 +1,110 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_IO_H
+#define _DPAA2_IO_H
+
+#include <sys/rman.h>
+#include <sys/bus.h>
+#include <sys/queue.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_mcp.h"
+
+/* Maximum resources per DPIO: 3 SYS_MEM + 1 DPMCP. */
+#define DPAA2_IO_MAX_RESOURCES 4
+/* Maximum number of MSIs supported by the DPIO objects. */
+#define DPAA2_IO_MSI_COUNT 1
+
+enum dpaa2_io_chan_mode {
+ DPAA2_IO_NO_CHANNEL,
+ DPAA2_IO_LOCAL_CHANNEL
+};
+
+/**
+ * @brief Attributes of the DPIO object.
+ *
+ * swp_ce_paddr: Physical address of the cache-enabled area.
+ * swp_ci_paddr: Physical address of the cache-inhibited area.
+ * swp_version: Hardware IP version of the software portal.
+ * swp_clk: QBMAN clock frequency value in Hz.
+ * id: DPIO object ID.
+ * swp_id: Software portal ID.
+ * priors_num: Number of priorities for the notification channel (1-8);
+ * relevant only if channel mode is "local channel".
+ * chan_mode: Notification channel mode.
+ */
+struct dpaa2_io_attr {
+ uint64_t swp_ce_paddr;
+ uint64_t swp_ci_paddr;
+ uint32_t swp_version;
+ uint32_t swp_clk;
+ uint32_t id;
+ uint16_t swp_id;
+ uint8_t priors_num;
+ enum dpaa2_io_chan_mode chan_mode;
+};
+
+/**
+ * @brief Context used by DPIO to configure data availability notifications
+ * (CDAN) on a particular WQ channel.
+ */
+struct dpaa2_io_notif_ctx {
+ void (*poll)(void *);
+
+ device_t io_dev;
+ void *channel;
+ uint64_t qman_ctx;
+ uint16_t fq_chan_id;
+ bool cdan_en;
+};
+
+/**
+ * @brief Software context for the DPAA2 I/O driver.
+ */
+struct dpaa2_io_softc {
+ device_t dev;
+ struct dpaa2_swp_desc swp_desc;
+ struct dpaa2_swp *swp;
+ struct dpaa2_io_attr attr;
+
+ /* Help to send commands to MC. */
+ struct dpaa2_cmd *cmd;
+ uint16_t rc_token;
+ uint16_t io_token;
+
+ struct resource *res[DPAA2_IO_MAX_RESOURCES];
+ struct resource_map map[DPAA2_IO_MAX_RESOURCES];
+
+ int irq_rid[DPAA2_IO_MSI_COUNT];
+ struct resource *irq_resource;
+ void *intr; /* interrupt handle */
+};
+
+extern struct resource_spec dpaa2_io_spec[];
+
+#endif /* _DPAA2_IO_H */
diff --git a/sys/dev/dpaa2/dpaa2_mac.c b/sys/dev/dpaa2/dpaa2_mac.c
new file mode 100644
index 000000000000..d6e381c0dd15
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mac.c
@@ -0,0 +1,376 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 MAC driver.
+ *
+ * For every DPAA2 MAC, there is an MC object named DPMAC, for MDIO and link
+ * state updates. The DPMAC virtualizes the MDIO interface, so each PHY driver
+ * may see a private interface (removing the need for synchronization in GPP on
+ * the multiplexed MDIO hardware).
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mc.h"
+#include "dpaa2_ni.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_cmd_if.h"
+
+/* Index of the only DPMAC IRQ. */
+#define DPMAC_IRQ_INDEX 0
+
+/* DPMAC IRQ statuses. */
+#define DPMAC_IRQ_LINK_CFG_REQ 0x00000001 /* change in requested link config. */
+#define DPMAC_IRQ_LINK_CHANGED 0x00000002 /* link state changed */
+#define DPMAC_IRQ_LINK_UP_REQ 0x00000004 /* link up request */
+#define DPMAC_IRQ_LINK_DOWN_REQ 0x00000008 /* link down request */
+#define DPMAC_IRQ_EP_CHANGED 0x00000010 /* DPAA2 endpoint dis/connected */
+
+/* DPAA2 MAC resource specification. */
+struct resource_spec dpaa2_mac_spec[] = {
+ /*
+ * DPMCP resources.
+ *
+ * NOTE: MC command portals (MCPs) are used to send commands to, and
+ * receive responses from, the MC firmware. One portal per DPMAC.
+ */
+#define MCP_RES_NUM (1u)
+#define MCP_RID_OFF (0u)
+#define MCP_RID(rid) ((rid) + MCP_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ /* --- */
+ RESOURCE_SPEC_END
+};
+
+/* Interrupt configuration routines. */
+static int dpaa2_mac_setup_irq(device_t);
+static int dpaa2_mac_setup_msi(struct dpaa2_mac_softc *);
+
+/* Subroutines to get text representation. */
+static const char *dpaa2_mac_ethif_to_str(enum dpaa2_mac_eth_if);
+static const char *dpaa2_mac_link_type_to_str(enum dpaa2_mac_link_type);
+
+/* Interrupt handlers */
+static void dpaa2_mac_intr(void *arg);
+
+static int
+dpaa2_mac_probe(device_t dev)
+{
+ /* DPIO device will be added by a parent resource container itself. */
+ device_set_desc(dev, "DPAA2 MAC");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_mac_attach(device_t dev)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ device_t mcp_dev;
+ struct dpaa2_mac_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_devinfo *mcp_dinfo;
+ int error;
+
+ sc->dev = dev;
+
+ memset(sc->addr, 0, ETHER_ADDR_LEN);
+
+ error = bus_alloc_resources(sc->dev, dpaa2_mac_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources: "
+ "error=%d\n", __func__, error);
+ return (ENXIO);
+ }
+
+ /* Obtain MC portal. */
+ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
+ mcp_dinfo = device_get_ivars(mcp_dev);
+ dinfo->portal = mcp_dinfo->portal;
+
+ /* Allocate a command to send to MC hardware. */
+ error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
+ if (error) {
+ device_printf(dev, "Failed to allocate dpaa2_cmd: error=%d\n",
+ error);
+ goto err_exit;
+ }
+
+ /* Open resource container and DPMAC object. */
+ error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
+ &sc->rc_token);
+ if (error) {
+ device_printf(dev, "Failed to open DPRC: error=%d\n", error);
+ goto err_free_cmd;
+ }
+ error = DPAA2_CMD_MAC_OPEN(dev, child, sc->cmd, dinfo->id,
+ &sc->mac_token);
+ if (error) {
+ device_printf(dev, "Failed to open DPMAC: id=%d, error=%d\n",
+ dinfo->id, error);
+ goto err_close_rc;
+ }
+
+ error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child, sc->cmd, &sc->attr);
+ if (error) {
+ device_printf(dev, "Failed to get DPMAC attributes: id=%d, "
+ "error=%d\n", dinfo->id, error);
+ goto err_close_mac;
+ }
+ error = DPAA2_CMD_MAC_GET_ADDR(dev, child, sc->cmd, sc->addr);
+ if (error)
+ device_printf(dev, "Failed to get physical address: error=%d\n",
+ error);
+ /*
+ * TODO: Enable debug output via sysctl.
+ */
+ if (bootverbose) {
+ device_printf(dev, "ether %6D\n", sc->addr, ":");
+ device_printf(dev, "max_rate=%d, eth_if=%s, link_type=%s\n",
+ sc->attr.max_rate,
+ dpaa2_mac_ethif_to_str(sc->attr.eth_if),
+ dpaa2_mac_link_type_to_str(sc->attr.link_type));
+ }
+
+ error = dpaa2_mac_setup_irq(dev);
+ if (error) {
+ device_printf(dev, "Failed to setup IRQs: error=%d\n", error);
+ goto err_close_mac;
+ }
+
+ return (0);
+
+err_close_mac:
+ DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->mac_token));
+err_close_rc:
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+err_free_cmd:
+ dpaa2_mcp_free_command(sc->cmd);
+err_exit:
+ return (ENXIO);
+}
+
+static int
+dpaa2_mac_detach(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_mac_softc *sc = device_get_softc(dev);
+
+ DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->mac_token));
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+ dpaa2_mcp_free_command(sc->cmd);
+
+ sc->cmd = NULL;
+ sc->rc_token = 0;
+ sc->mac_token = 0;
+
+ return (0);
+}
+
+/**
+ * @brief Configure DPMAC object to generate interrupts.
+ */
+static int
+dpaa2_mac_setup_irq(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_mac_softc *sc = device_get_softc(dev);
+ struct dpaa2_cmd *cmd = sc->cmd;
+ uint16_t mac_token = sc->mac_token;
+ uint32_t irq_mask;
+ int error;
+
+ /* Configure IRQs. */
+ error = dpaa2_mac_setup_msi(sc);
+ if (error) {
+ device_printf(dev, "Failed to allocate MSI\n");
+ return (error);
+ }
+ if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
+ device_printf(dev, "Failed to allocate IRQ resource\n");
+ return (ENXIO);
+ }
+ if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, dpaa2_mac_intr, sc, &sc->intr)) {
+ device_printf(dev, "Failed to setup IRQ resource\n");
+ return (ENXIO);
+ }
+
+ /* Configure DPNI to generate interrupts. */
+ irq_mask =
+ DPMAC_IRQ_LINK_CFG_REQ |
+ DPMAC_IRQ_LINK_CHANGED |
+ DPMAC_IRQ_LINK_UP_REQ |
+ DPMAC_IRQ_LINK_DOWN_REQ |
+ DPMAC_IRQ_EP_CHANGED;
+ error = DPAA2_CMD_MAC_SET_IRQ_MASK(dev, child, dpaa2_mcp_tk(cmd,
+ mac_token), DPMAC_IRQ_INDEX, irq_mask);
+ if (error) {
+ device_printf(dev, "Failed to set IRQ mask\n");
+ return (error);
+ }
+
+ /* Enable IRQ. */
+ error = DPAA2_CMD_MAC_SET_IRQ_ENABLE(dev, child, cmd, DPMAC_IRQ_INDEX,
+ true);
+ if (error) {
+ device_printf(dev, "Failed to enable IRQ\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Allocate MSI interrupts for DPMAC.
+ */
+static int
+dpaa2_mac_setup_msi(struct dpaa2_mac_softc *sc)
+{
+ int val;
+
+ val = pci_msi_count(sc->dev);
+ if (val < DPAA2_MAC_MSI_COUNT)
+ device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
+ DPAA2_MAC_MSI_COUNT);
+ val = MIN(val, DPAA2_MAC_MSI_COUNT);
+
+ if (pci_alloc_msi(sc->dev, &val) != 0)
+ return (EINVAL);
+
+ for (int i = 0; i < val; i++)
+ sc->irq_rid[i] = i + 1;
+
+ return (0);
+}
+
+static void
+dpaa2_mac_intr(void *arg)
+{
+ struct dpaa2_mac_softc *sc = (struct dpaa2_mac_softc *) arg;
+ device_t child = sc->dev;
+ uint32_t status = ~0u; /* clear all IRQ status bits */
+ int error;
+
+ error = DPAA2_CMD_MAC_GET_IRQ_STATUS(sc->dev, child,
+ dpaa2_mcp_tk(sc->cmd, sc->mac_token), DPMAC_IRQ_INDEX, &status);
+ if (error)
+ device_printf(sc->dev, "%s: failed to obtain IRQ status: "
+ "error=%d\n", __func__, error);
+}
+
+static const char *
+dpaa2_mac_ethif_to_str(enum dpaa2_mac_eth_if eth_if)
+{
+ switch (eth_if) {
+ case DPAA2_MAC_ETH_IF_MII:
+ return ("MII");
+ case DPAA2_MAC_ETH_IF_RMII:
+ return ("RMII");
+ case DPAA2_MAC_ETH_IF_SMII:
+ return ("SMII");
+ case DPAA2_MAC_ETH_IF_GMII:
+ return ("GMII");
+ case DPAA2_MAC_ETH_IF_RGMII:
+ return ("RGMII");
+ case DPAA2_MAC_ETH_IF_SGMII:
+ return ("SGMII");
+ case DPAA2_MAC_ETH_IF_QSGMII:
+ return ("QSGMII");
+ case DPAA2_MAC_ETH_IF_XAUI:
+ return ("XAUI");
+ case DPAA2_MAC_ETH_IF_XFI:
+ return ("XFI");
+ case DPAA2_MAC_ETH_IF_CAUI:
+ return ("CAUI");
+ case DPAA2_MAC_ETH_IF_1000BASEX:
+ return ("1000BASE-X");
+ case DPAA2_MAC_ETH_IF_USXGMII:
+ return ("USXGMII");
+ default:
+ return ("unknown");
+ }
+}
+
+static const char *
+dpaa2_mac_link_type_to_str(enum dpaa2_mac_link_type link_type)
+{
+ switch (link_type) {
+ case DPAA2_MAC_LINK_TYPE_NONE:
+ return ("NONE");
+ case DPAA2_MAC_LINK_TYPE_FIXED:
+ return ("FIXED");
+ case DPAA2_MAC_LINK_TYPE_PHY:
+ return ("PHY");
+ case DPAA2_MAC_LINK_TYPE_BACKPLANE:
+ return ("BACKPLANE");
+ default:
+ return ("unknown");
+ }
+}
+
+static device_method_t dpaa2_mac_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_mac_probe),
+ DEVMETHOD(device_attach, dpaa2_mac_attach),
+ DEVMETHOD(device_detach, dpaa2_mac_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_mac_driver = {
+ "dpaa2_mac",
+ dpaa2_mac_methods,
+ sizeof(struct dpaa2_mac_softc),
+};
+
+DRIVER_MODULE(dpaa2_mac, dpaa2_rc, dpaa2_mac_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_mac.h b/sys/dev/dpaa2/dpaa2_mac.h
new file mode 100644
index 000000000000..cbdf2d824045
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mac.h
@@ -0,0 +1,124 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_MAC_H
+#define _DPAA2_MAC_H
+
+#include <sys/rman.h>
+#include <sys/bus.h>
+#include <sys/queue.h>
+
+#include <net/ethernet.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_mcp.h"
+
+#define DPAA2_MAC_MAX_RESOURCES 1 /* Maximum resources per DPMAC: 1 DPMCP. */
+#define DPAA2_MAC_MSI_COUNT 1 /* MSIs per DPMAC */
+
+/* DPMAC link configuration options. */
+#define DPAA2_MAC_LINK_OPT_AUTONEG ((uint64_t) 0x01u)
+#define DPAA2_MAC_LINK_OPT_HALF_DUPLEX ((uint64_t) 0x02u)
+#define DPAA2_MAC_LINK_OPT_PAUSE ((uint64_t) 0x04u)
+#define DPAA2_MAC_LINK_OPT_ASYM_PAUSE ((uint64_t) 0x08u)
+
+enum dpaa2_mac_eth_if {
+ DPAA2_MAC_ETH_IF_MII,
+ DPAA2_MAC_ETH_IF_RMII,
+ DPAA2_MAC_ETH_IF_SMII,
+ DPAA2_MAC_ETH_IF_GMII,
+ DPAA2_MAC_ETH_IF_RGMII,
+ DPAA2_MAC_ETH_IF_SGMII,
+ DPAA2_MAC_ETH_IF_QSGMII,
+ DPAA2_MAC_ETH_IF_XAUI,
+ DPAA2_MAC_ETH_IF_XFI,
+ DPAA2_MAC_ETH_IF_CAUI,
+ DPAA2_MAC_ETH_IF_1000BASEX,
+ DPAA2_MAC_ETH_IF_USXGMII
+};
+
+enum dpaa2_mac_link_type {
+ DPAA2_MAC_LINK_TYPE_NONE,
+ DPAA2_MAC_LINK_TYPE_FIXED,
+ DPAA2_MAC_LINK_TYPE_PHY,
+ DPAA2_MAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * @brief Attributes of the DPMAC object.
+ *
+ * id: DPMAC object ID.
+ * max_rate: Maximum supported rate (in Mbps).
+ * eth_if: Type of the Ethernet interface.
+ * link_type: Type of the link.
+ */
+struct dpaa2_mac_attr {
+ uint32_t id;
+ uint32_t max_rate;
+ enum dpaa2_mac_eth_if eth_if;
+ enum dpaa2_mac_link_type link_type;
+};
+
+/**
+ * @brief Link state of the DPMAC object.
+ */
+struct dpaa2_mac_link_state {
+ uint64_t options;
+ uint64_t supported;
+ uint64_t advert;
+ uint32_t rate;
+ bool up;
+ bool state_valid;
+};
+
+/**
+ * @brief Software context for the DPAA2 MAC driver.
+ *
+ * dev: Device associated with this software context.
+ * addr: Physical address assigned to the DPMAC object.
+ * attr: Attributes of the DPMAC object.
+ */
+struct dpaa2_mac_softc {
+ device_t dev;
+ uint8_t addr[ETHER_ADDR_LEN];
+ struct resource *res[DPAA2_MAC_MAX_RESOURCES];
+ struct dpaa2_mac_attr attr;
+
+ /* Help to send commands to MC. */
+ struct dpaa2_cmd *cmd;
+ uint16_t rc_token;
+ uint16_t mac_token;
+
+ /* Interrupts. */
+ int irq_rid[DPAA2_MAC_MSI_COUNT];
+ struct resource *irq_res;
+ void *intr; /* interrupt handle */
+};
+
+extern struct resource_spec dpaa2_mac_spec[];
+
+#endif /* _DPAA2_MAC_H */
diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c
new file mode 100644
index 000000000000..e895d2585a3a
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mc.c
@@ -0,0 +1,973 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Management Complex (MC) bus driver.
+ *
+ * MC is a hardware resource manager which can be found in several NXP
+ * SoCs (LX2160A, for example) and provides an access to the specialized
+ * hardware objects used in network-oriented packet processing applications.
+ */
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+
+#include <vm/vm.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#ifdef DEV_ACPI
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+#endif
+
+#ifdef FDT
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_pci.h>
+#endif
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mc.h"
+
+/* Macros to read/write MC registers */
+#define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
+#define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
+
+#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0)
+
+#define IORT_DEVICE_NAME "MCE"
+
+/* MC Registers */
+#define MC_REG_GCR1 0x0000u
+#define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */
+#define MC_REG_GSR 0x0008u
+#define MC_REG_FAPR 0x0028u
+
+/* General Control Register 1 (GCR1) */
+#define GCR1_P1_STOP 0x80000000u
+#define GCR1_P2_STOP 0x40000000u
+
+/* General Status Register (GSR) */
+#define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31)
+#define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30)
+#define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8)
+#define GSR_MCS(v) (((v) & 0xFFu) >> 0)
+
+/* Timeouts to wait for the MC status. */
+#define MC_STAT_TIMEOUT 1000u /* us */
+#define MC_STAT_ATTEMPTS 100u
+
+/**
+ * @brief Structure to describe a DPAA2 device as a managed resource.
+ */
+struct dpaa2_mc_devinfo {
+ STAILQ_ENTRY(dpaa2_mc_devinfo) link;
+ device_t dpaa2_dev;
+ uint32_t flags;
+ uint32_t owners;
+};
+
+MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex");
+
+static struct resource_spec dpaa2_mc_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED },
+ { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
+ RESOURCE_SPEC_END
+};
+
+static u_int dpaa2_mc_get_xref(device_t, device_t);
+static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *);
+static struct rman *dpaa2_mc_rman(device_t, int);
+
+static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *);
+static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *);
+static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *,
+ uint32_t *);
+
+/*
+ * For device interface.
+ */
+
+int
+dpaa2_mc_attach(device_t dev)
+{
+ struct dpaa2_mc_softc *sc;
+ struct resource_map_request req;
+ uint32_t val;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->msi_allocated = false;
+ sc->msi_owner = NULL;
+
+ error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources\n",
+ __func__);
+ return (ENXIO);
+ }
+
+ if (sc->res[1]) {
+ resource_init_map_request(&req);
+ req.memattr = VM_MEMATTR_DEVICE;
+ error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1],
+ &req, &sc->map[1]);
+ if (error) {
+ device_printf(dev, "%s: failed to map control "
+ "registers\n", __func__);
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ if (bootverbose)
+ device_printf(dev,
+ "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
+ mcreg_read_4(sc, MC_REG_GCR1),
+ mcreg_read_4(sc, MC_REG_GCR2),
+ mcreg_read_4(sc, MC_REG_GSR),
+ mcreg_read_4(sc, MC_REG_FAPR));
+
+ /* Reset P1_STOP and P2_STOP bits to resume MC processor. */
+ val = mcreg_read_4(sc, MC_REG_GCR1) &
+ ~(GCR1_P1_STOP | GCR1_P2_STOP);
+ mcreg_write_4(sc, MC_REG_GCR1, val);
+
+ /* Poll MC status. */
+ if (bootverbose)
+ device_printf(dev, "polling MC status...\n");
+ for (int i = 0; i < MC_STAT_ATTEMPTS; i++) {
+ val = mcreg_read_4(sc, MC_REG_GSR);
+ if (GSR_MCS(val) != 0u)
+ break;
+ DELAY(MC_STAT_TIMEOUT);
+ }
+
+ if (bootverbose)
+ device_printf(dev,
+ "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
+ mcreg_read_4(sc, MC_REG_GCR1),
+ mcreg_read_4(sc, MC_REG_GCR2),
+ mcreg_read_4(sc, MC_REG_GSR),
+ mcreg_read_4(sc, MC_REG_FAPR));
+ }
+
+ /* At least 64 bytes of the command portal should be available. */
+ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) {
+ device_printf(dev, "%s: MC portal memory region too small: "
+ "%jd\n", __func__, rman_get_size(sc->res[0]));
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Map MC portal memory resource. */
+ resource_init_map_request(&req);
+ req.memattr = VM_MEMATTR_DEVICE;
+ error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0],
+ &req, &sc->map[0]);
+ if (error) {
+ device_printf(dev, "Failed to map MC portal memory\n");
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Initialize a resource manager for the DPAA2 I/O objects. */
+ sc->dpio_rman.rm_type = RMAN_ARRAY;
+ sc->dpio_rman.rm_descr = "DPAA2 DPIO objects";
+ error = rman_init(&sc->dpio_rman);
+ if (error) {
+ device_printf(dev, "Failed to initialize a resource manager for "
+ "the DPAA2 I/O objects: error=%d\n", error);
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Initialize a resource manager for the DPAA2 buffer pools. */
+ sc->dpbp_rman.rm_type = RMAN_ARRAY;
+ sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects";
+ error = rman_init(&sc->dpbp_rman);
+ if (error) {
+ device_printf(dev, "Failed to initialize a resource manager for "
+ "the DPAA2 buffer pools: error=%d\n", error);
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Initialize a resource manager for the DPAA2 concentrators. */
+ sc->dpcon_rman.rm_type = RMAN_ARRAY;
+ sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects";
+ error = rman_init(&sc->dpcon_rman);
+ if (error) {
+ device_printf(dev, "Failed to initialize a resource manager for "
+ "the DPAA2 concentrators: error=%d\n", error);
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Initialize a resource manager for the DPAA2 MC portals. */
+ sc->dpmcp_rman.rm_type = RMAN_ARRAY;
+ sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects";
+ error = rman_init(&sc->dpmcp_rman);
+ if (error) {
+ device_printf(dev, "Failed to initialize a resource manager for "
+ "the DPAA2 MC portals: error=%d\n", error);
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+
+ /* Initialize a list of non-allocatable DPAA2 devices. */
+ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF);
+ STAILQ_INIT(&sc->mdev_list);
+
+ mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF);
+
+ /*
+ * Add a root resource container as the only child of the bus. All of
+ * the direct descendant containers will be attached to the root one
+ * instead of the MC device.
+ */
+ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0);
+ if (sc->rcdev == NULL) {
+ dpaa2_mc_detach(dev);
+ return (ENXIO);
+ }
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+
+ return (0);
+}
+
+int
+dpaa2_mc_detach(device_t dev)
+{
+ struct dpaa2_mc_softc *sc;
+ struct dpaa2_devinfo *dinfo = NULL;
+ int error;
+
+ bus_generic_detach(dev);
+
+ sc = device_get_softc(dev);
+ if (sc->rcdev)
+ device_delete_child(dev, sc->rcdev);
+ bus_release_resources(dev, dpaa2_mc_spec, sc->res);
+
+ dinfo = device_get_ivars(dev);
+ if (dinfo)
+ free(dinfo, M_DPAA2_MC);
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
+ return (device_delete_children(dev));
+}
+
+/*
+ * For bus interface.
+ */
+
+struct resource *
+dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct resource *res;
+ struct rman *rm;
+ int error;
+
+ rm = dpaa2_mc_rman(mcdev, type);
+ if (!rm)
+ return (BUS_ALLOC_RESOURCE(device_get_parent(mcdev), child,
+ type, rid, start, end, count, flags));
+
+ /*
+ * Skip managing DPAA2-specific resource. It must be provided to MC by
+ * calling DPAA2_MC_MANAGE_DEV() beforehand.
+ */
+ if (type <= DPAA2_DEV_MC) {
+ error = rman_manage_region(rm, start, end);
+ if (error) {
+ device_printf(mcdev, "rman_manage_region() failed: "
+ "start=%#jx, end=%#jx, error=%d\n", start, end,
+ error);
+ goto fail;
+ }
+ }
+
+ res = rman_reserve_resource(rm, start, end, count, flags, child);
+ if (!res) {
+ device_printf(mcdev, "rman_reserve_resource() failed: "
+ "start=%#jx, end=%#jx, count=%#jx\n", start, end, count);
+ goto fail;
+ }
+
+ rman_set_rid(res, *rid);
+
+ if (flags & RF_ACTIVE) {
+ if (bus_activate_resource(child, type, *rid, res)) {
+ device_printf(mcdev, "bus_activate_resource() failed: "
+ "rid=%d, res=%#jx\n", *rid, (uintmax_t) res);
+ rman_release_resource(res);
+ goto fail;
+ }
+ }
+
+ return (res);
+ fail:
+ device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, "
+ "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end,
+ count, flags);
+ return (NULL);
+}
+
+int
+dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type,
+ struct resource *r, rman_res_t start, rman_res_t end)
+{
+ struct rman *rm;
+
+ rm = dpaa2_mc_rman(mcdev, type);
+ if (rm)
+ return (rman_adjust_resource(r, start, end));
+ return (bus_generic_adjust_resource(mcdev, child, type, r, start, end));
+}
+
+int
+dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ struct rman *rm;
+
+ rm = dpaa2_mc_rman(mcdev, type);
+ if (rm) {
+ KASSERT(rman_is_region_manager(r, rm), ("rman mismatch"));
+ rman_release_resource(r);
+ }
+
+ return (bus_generic_release_resource(mcdev, child, type, rid, r));
+}
+
+int
+dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int rc;
+
+ if ((rc = rman_activate_resource(r)) != 0)
+ return (rc);
+
+ return (BUS_ACTIVATE_RESOURCE(device_get_parent(mcdev), child, type,
+ rid, r));
+}
+
+int
+dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int rc;
+
+ if ((rc = rman_deactivate_resource(r)) != 0)
+ return (rc);
+
+ return (BUS_DEACTIVATE_RESOURCE(device_get_parent(mcdev), child, type,
+ rid, r));
+}
+
+/*
+ * For pseudo-pcib interface.
+ */
+
+int
+dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount,
+ int *irqs)
+{
+#if defined(INTRNG)
+ return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs));
+#else
+ return (ENXIO);
+#endif
+}
+
+int
+dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs)
+{
+#if defined(INTRNG)
+ return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs));
+#else
+ return (ENXIO);
+#endif
+}
+
+int
+dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr,
+ uint32_t *data)
+{
+#if defined(INTRNG)
+ return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data));
+#else
+ return (ENXIO);
+#endif
+}
+
+int
+dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type,
+ uintptr_t *id)
+{
+ struct dpaa2_devinfo *dinfo;
+
+ dinfo = device_get_ivars(child);
+
+ if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (ENXIO);
+
+ if (type == PCI_ID_MSI)
+ return (dpaa2_mc_map_id(mcdev, child, id));
+
+ *id = dinfo->icid;
+ return (0);
+}
+
+/*
+ * For DPAA2 Management Complex bus driver interface.
+ */
+
+int
+dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
+{
+ struct dpaa2_mc_softc *sc;
+ struct dpaa2_devinfo *dinfo;
+ struct dpaa2_mc_devinfo *di;
+ struct rman *rm;
+ int error;
+
+ sc = device_get_softc(mcdev);
+ dinfo = device_get_ivars(dpaa2_dev);
+
+ if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (EINVAL);
+
+ di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
+ if (!di)
+ return (ENOMEM);
+ di->dpaa2_dev = dpaa2_dev;
+ di->flags = flags;
+ di->owners = 0;
+
+ /* Append a new managed DPAA2 device to the queue. */
+ mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
+ mtx_lock(&sc->mdev_lock);
+ STAILQ_INSERT_TAIL(&sc->mdev_list, di, link);
+ mtx_unlock(&sc->mdev_lock);
+
+ if (flags & DPAA2_MC_DEV_ALLOCATABLE) {
+ /* Select rman based on a type of the DPAA2 device. */
+ rm = dpaa2_mc_rman(mcdev, dinfo->dtype);
+ if (!rm)
+ return (ENOENT);
+ /* Manage DPAA2 device as an allocatable resource. */
+ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev,
+ (rman_res_t) dpaa2_dev);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+int
+dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+{
+ struct rman *rm;
+ rman_res_t start, end;
+ int error;
+
+ if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (EINVAL);
+
+ /* Select resource manager based on a type of the DPAA2 device. */
+ rm = dpaa2_mc_rman(mcdev, devtype);
+ if (!rm)
+ return (ENOENT);
+ /* Find first free DPAA2 device of the given type. */
+ error = rman_first_free_region(rm, &start, &end);
+ if (error)
+ return (error);
+
+ KASSERT(start == end, ("start != end, but should be the same pointer "
+ "to the DPAA2 device: start=%jx, end=%jx", start, end));
+
+ *dpaa2_dev = (device_t) start;
+
+ return (0);
+}
+
+int
+dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype, uint32_t obj_id)
+{
+ struct dpaa2_mc_softc *sc;
+ struct dpaa2_devinfo *dinfo;
+ struct dpaa2_mc_devinfo *di;
+ int error = ENOENT;
+
+ sc = device_get_softc(mcdev);
+
+ if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (EINVAL);
+
+ mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
+ mtx_lock(&sc->mdev_lock);
+
+ STAILQ_FOREACH(di, &sc->mdev_list, link) {
+ dinfo = device_get_ivars(di->dpaa2_dev);
+ if (dinfo->dtype == devtype && dinfo->id == obj_id) {
+ *dpaa2_dev = di->dpaa2_dev;
+ error = 0;
+ break;
+ }
+ }
+
+ mtx_unlock(&sc->mdev_lock);
+
+ return (error);
+}
+
+int
+dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+{
+ struct dpaa2_mc_softc *sc;
+ struct dpaa2_devinfo *dinfo;
+ struct dpaa2_mc_devinfo *di;
+ device_t dev = NULL;
+ uint32_t owners = UINT32_MAX;
+ int error = ENOENT;
+
+ sc = device_get_softc(mcdev);
+
+ if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (EINVAL);
+
+ mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
+ mtx_lock(&sc->mdev_lock);
+
+ STAILQ_FOREACH(di, &sc->mdev_list, link) {
+ dinfo = device_get_ivars(di->dpaa2_dev);
+
+ if ((dinfo->dtype == devtype) &&
+ (di->flags & DPAA2_MC_DEV_SHAREABLE) &&
+ (di->owners < owners)) {
+ dev = di->dpaa2_dev;
+ owners = di->owners;
+ }
+ }
+ if (dev) {
+ *dpaa2_dev = dev;
+ error = 0;
+ }
+
+ mtx_unlock(&sc->mdev_lock);
+
+ return (error);
+}
+
+int
+dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+{
+ struct dpaa2_mc_softc *sc;
+ struct dpaa2_mc_devinfo *di;
+ int error = ENOENT;
+
+ sc = device_get_softc(mcdev);
+
+ if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (EINVAL);
+
+ mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
+ mtx_lock(&sc->mdev_lock);
+
+ STAILQ_FOREACH(di, &sc->mdev_list, link) {
+ if (di->dpaa2_dev == dpaa2_dev &&
+ (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
+ di->owners++;
+ error = 0;
+ break;
+ }
+ }
+
+ mtx_unlock(&sc->mdev_lock);
+
+ return (error);
+}
+
+int
+dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+{
+ struct dpaa2_mc_softc *sc;
+ struct dpaa2_mc_devinfo *di;
+ int error = ENOENT;
+
+ sc = device_get_softc(mcdev);
+
+ if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
+ return (EINVAL);
+
+ mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
+ mtx_lock(&sc->mdev_lock);
+
+ STAILQ_FOREACH(di, &sc->mdev_list, link) {
+ if (di->dpaa2_dev == dpaa2_dev &&
+ (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
+ di->owners -= di->owners > 0 ? 1 : 0;
+ error = 0;
+ break;
+ }
+ }
+
+ mtx_unlock(&sc->mdev_lock);
+
+ return (error);
+}
+
+/**
+ * @brief Convert DPAA2 device type to string.
+ */
+const char *
+dpaa2_ttos(enum dpaa2_dev_type type)
+{
+ switch (type) {
+ case DPAA2_DEV_MC:
+ return ("mc"); /* NOTE: to print as information only. */
+ case DPAA2_DEV_RC:
+ return ("dprc");
+ case DPAA2_DEV_IO:
+ return ("dpio");
+ case DPAA2_DEV_NI:
+ return ("dpni");
+ case DPAA2_DEV_MCP:
+ return ("dpmcp");
+ case DPAA2_DEV_BP:
+ return ("dpbp");
+ case DPAA2_DEV_CON:
+ return ("dpcon");
+ case DPAA2_DEV_MAC:
+ return ("dpmac");
+ case DPAA2_DEV_MUX:
+ return ("dpdmux");
+ case DPAA2_DEV_SW:
+ return ("dpsw");
+ default:
+ break;
+ }
+ return ("notype");
+}
+
+/**
+ * @brief Convert string to DPAA2 device type.
+ */
+enum dpaa2_dev_type
+dpaa2_stot(const char *str)
+{
+ if (COMPARE_TYPE(str, "dprc")) {
+ return (DPAA2_DEV_RC);
+ } else if (COMPARE_TYPE(str, "dpio")) {
+ return (DPAA2_DEV_IO);
+ } else if (COMPARE_TYPE(str, "dpni")) {
+ return (DPAA2_DEV_NI);
+ } else if (COMPARE_TYPE(str, "dpmcp")) {
+ return (DPAA2_DEV_MCP);
+ } else if (COMPARE_TYPE(str, "dpbp")) {
+ return (DPAA2_DEV_BP);
+ } else if (COMPARE_TYPE(str, "dpcon")) {
+ return (DPAA2_DEV_CON);
+ } else if (COMPARE_TYPE(str, "dpmac")) {
+ return (DPAA2_DEV_MAC);
+ } else if (COMPARE_TYPE(str, "dpdmux")) {
+ return (DPAA2_DEV_MUX);
+ } else if (COMPARE_TYPE(str, "dpsw")) {
+ return (DPAA2_DEV_SW);
+ }
+
+ return (DPAA2_DEV_NOTYPE);
+}
+
+/**
+ * @internal
+ */
+static u_int
+dpaa2_mc_get_xref(device_t mcdev, device_t child)
+{
+ struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+#ifdef DEV_ACPI
+ u_int xref, devid;
+#endif
+#ifdef FDT
+ phandle_t msi_parent;
+#endif
+ int error;
+
+ if (sc && dinfo) {
+#ifdef DEV_ACPI
+ if (sc->acpi_based) {
+ /*
+ * NOTE: The first named component from the IORT table
+ * with the given name (as a substring) will be used.
+ */
+ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME,
+ dinfo->icid, &xref, &devid);
+ if (error)
+ return (0);
+ return (xref);
+ }
+#endif
+#ifdef FDT
+ if (!sc->acpi_based) {
+ /* FDT-based driver. */
+ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid,
+ &msi_parent, NULL);
+ if (error)
+ return (0);
+ return ((u_int) msi_parent);
+ }
+#endif
+ }
+ return (0);
+}
+
+/**
+ * @internal
+ */
+static u_int
+dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id)
+{
+ struct dpaa2_devinfo *dinfo;
+#ifdef DEV_ACPI
+ u_int xref, devid;
+ int error;
+#endif
+
+ dinfo = device_get_ivars(child);
+ if (dinfo) {
+ /*
+ * The first named components from IORT table with the given
+ * name (as a substring) will be used.
+ */
+#ifdef DEV_ACPI
+ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid,
+ &xref, &devid);
+ if (error == 0)
+ *id = devid;
+ else
+#endif
+ *id = dinfo->icid; /* RID not in IORT, likely FW bug */
+
+ return (0);
+ }
+ return (ENXIO);
+}
+
+/**
+ * @internal
+ * @brief Obtain a resource manager based on the given type of the resource.
+ */
+static struct rman *
+dpaa2_mc_rman(device_t mcdev, int type)
+{
+ struct dpaa2_mc_softc *sc;
+
+ sc = device_get_softc(mcdev);
+
+ switch (type) {
+ case DPAA2_DEV_IO:
+ return (&sc->dpio_rman);
+ case DPAA2_DEV_BP:
+ return (&sc->dpbp_rman);
+ case DPAA2_DEV_CON:
+ return (&sc->dpcon_rman);
+ case DPAA2_DEV_MCP:
+ return (&sc->dpmcp_rman);
+ default:
+ break;
+ }
+
+ return (NULL);
+}
+
+#if defined(INTRNG) && !defined(IOMMU)
+
+/**
+ * @internal
+ * @brief Allocates requested number of MSIs.
+ *
+ * NOTE: This function is a part of fallback solution when IOMMU isn't available.
+ * Total number of IRQs is limited to 32.
+ */
+static int
+dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount,
+ int *irqs)
+{
+ struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
+ int msi_irqs[DPAA2_MC_MSI_COUNT];
+ int error;
+
+ /* Pre-allocate a bunch of MSIs for MC to be used by its children. */
+ if (!sc->msi_allocated) {
+ error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev,
+ child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs);
+ if (error) {
+ device_printf(mcdev, "failed to pre-allocate %d MSIs: "
+ "error=%d\n", DPAA2_MC_MSI_COUNT, error);
+ return (error);
+ }
+
+ mtx_assert(&sc->msi_lock, MA_NOTOWNED);
+ mtx_lock(&sc->msi_lock);
+ for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
+ sc->msi[i].child = NULL;
+ sc->msi[i].irq = msi_irqs[i];
+ }
+ sc->msi_owner = child;
+ sc->msi_allocated = true;
+ mtx_unlock(&sc->msi_lock);
+ }
+
+ error = ENOENT;
+
+ /* Find the first free MSIs from the pre-allocated pool. */
+ mtx_assert(&sc->msi_lock, MA_NOTOWNED);
+ mtx_lock(&sc->msi_lock);
+ for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
+ if (sc->msi[i].child != NULL)
+ continue;
+ error = 0;
+ for (int j = 0; j < count; j++) {
+ if (i + j >= DPAA2_MC_MSI_COUNT) {
+ device_printf(mcdev, "requested %d MSIs exceed "
+ "limit of %d available\n", count,
+ DPAA2_MC_MSI_COUNT);
+ error = E2BIG;
+ break;
+ }
+ sc->msi[i + j].child = child;
+ irqs[j] = sc->msi[i + j].irq;
+ }
+ break;
+ }
+ mtx_unlock(&sc->msi_lock);
+
+ return (error);
+}
+
+/**
+ * @internal
+ * @brief Marks IRQs as free in the pre-allocated pool of MSIs.
+ *
+ * NOTE: This function is a part of fallback solution when IOMMU isn't available.
+ * Total number of IRQs is limited to 32.
+ * NOTE: MSIs are kept allocated in the kernel as a part of the pool.
+ */
+static int
+dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs)
+{
+ struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
+
+ mtx_assert(&sc->msi_lock, MA_NOTOWNED);
+ mtx_lock(&sc->msi_lock);
+ for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
+ if (sc->msi[i].child != child)
+ continue;
+ for (int j = 0; j < count; j++) {
+ if (sc->msi[i].irq == irqs[j]) {
+ sc->msi[i].child = NULL;
+ break;
+ }
+ }
+ }
+ mtx_unlock(&sc->msi_lock);
+
+ return (0);
+}
+
+/**
+ * @internal
+ * @brief Provides address to write to and data according to the given MSI from
+ * the pre-allocated pool.
+ *
+ * NOTE: This function is a part of fallback solution when IOMMU isn't available.
+ * Total number of IRQs is limited to 32.
+ */
+static int
+dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr,
+ uint32_t *data)
+{
+ struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
+ int error = EINVAL;
+
+ mtx_assert(&sc->msi_lock, MA_NOTOWNED);
+ mtx_lock(&sc->msi_lock);
+ for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
+ if (sc->msi[i].child == child && sc->msi[i].irq == irq) {
+ error = 0;
+ break;
+ }
+ }
+ mtx_unlock(&sc->msi_lock);
+ if (error)
+ return (error);
+
+ return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev,
+ sc->msi_owner), irq, addr, data));
+}
+
+#endif /* defined(INTRNG) && !defined(IOMMU) */
+
+static device_method_t dpaa2_mc_methods[] = {
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods,
+ sizeof(struct dpaa2_mc_softc));
diff --git a/sys/dev/dpaa2/dpaa2_mc.h b/sys/dev/dpaa2/dpaa2_mc.h
new file mode 100644
index 000000000000..9a21c9724b82
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mc.h
@@ -0,0 +1,218 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_MC_H
+#define _DPAA2_MC_H
+
+#include <sys/rman.h>
+#include <sys/bus.h>
+#include <sys/queue.h>
+
+#include <net/ethernet.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include "pci_if.h"
+
+#include "dpaa2_types.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_ni.h"
+#include "dpaa2_io.h"
+#include "dpaa2_mac.h"
+#include "dpaa2_con.h"
+#include "dpaa2_bp.h"
+
+/*
+ * Maximum number of MSIs supported by the MC for its children without IOMMU.
+ *
+ * TODO: Should be much more with IOMMU translation.
+ */
+#define DPAA2_MC_MSI_COUNT 32
+
+/* Flags for DPAA2 devices as resources. */
+#define DPAA2_MC_DEV_ALLOCATABLE 0x01u /* to be managed by DPAA2-specific rman */
+#define DPAA2_MC_DEV_ASSOCIATED 0x02u /* to obtain info about DPAA2 device */
+#define DPAA2_MC_DEV_SHAREABLE 0x04u /* to be shared among DPAA2 devices */
+
+struct dpaa2_mc_devinfo; /* about managed DPAA2 devices */
+
+/**
+ * @brief Software context for the DPAA2 Management Complex (MC) driver.
+ *
+ * dev: Device associated with this software context.
+ * rcdev: Child device associated with the root resource container.
+ * acpi_based: Attached using ACPI (true) or FDT (false).
+ * ofw_node: FDT node of the Management Complex (acpi_based == false).
+ *
+ * res: Unmapped MC command portal and control registers resources.
+ * map: Mapped MC command portal and control registers resources.
+ *
+ * dpio_rman: I/O objects resource manager.
+ * dpbp_rman: Buffer Pools resource manager.
+ * dpcon_rman: Concentrators resource manager.
+ * dpmcp_rman: MC portals resource manager.
+ */
+struct dpaa2_mc_softc {
+ device_t dev;
+ device_t rcdev;
+ bool acpi_based;
+ phandle_t ofw_node;
+
+ struct resource *res[2];
+ struct resource_map map[2];
+
+ /* For allocatable managed DPAA2 objects. */
+ struct rman dpio_rman;
+ struct rman dpbp_rman;
+ struct rman dpcon_rman;
+ struct rman dpmcp_rman;
+
+ /* For managed DPAA2 objects. */
+ struct mtx mdev_lock;
+ STAILQ_HEAD(, dpaa2_mc_devinfo) mdev_list;
+
+ /* NOTE: Workaround in case of no IOMMU available. */
+#ifndef IOMMU
+ device_t msi_owner;
+ bool msi_allocated;
+ struct mtx msi_lock;
+ struct {
+ device_t child;
+ int irq;
+ } msi[DPAA2_MC_MSI_COUNT];
+#endif
+};
+
+/**
+ * @brief Software context for the DPAA2 Resource Container (RC) driver.
+ *
+ * dev: Device associated with this software context.
+ * portal: Helper object to send commands to the MC portal.
+ * unit: Helps to distinguish between root (0) and child DRPCs.
+ * cont_id: Container ID.
+ */
+struct dpaa2_rc_softc {
+ device_t dev;
+ int unit;
+ uint32_t cont_id;
+};
+
+/**
+ * @brief Information about MSI messages supported by the DPAA2 object.
+ *
+ * msi_msgnum: Number of MSI messages supported by the DPAA2 object.
+ * msi_alloc: Number of MSI messages allocated for the DPAA2 object.
+ * msi_handlers: Number of MSI message handlers configured.
+ */
+struct dpaa2_msinfo {
+ uint8_t msi_msgnum;
+ uint8_t msi_alloc;
+ uint32_t msi_handlers;
+};
+
+/**
+ * @brief Information about DPAA2 device.
+ *
+ * pdev: Parent device.
+ * dev: Device this devinfo is associated with.
+ *
+ * id: ID of a logical DPAA2 object resource.
+ * portal_id: ID of the MC portal which belongs to the object's container.
+ * icid: Isolation context ID of the DPAA2 object. It is shared
+ * between a resource container and all of its children.
+ *
+ * dtype: Type of the DPAA2 object.
+ * resources: Resources available for this DPAA2 device.
+ * msi: Information about MSI messages supported by the DPAA2 object.
+ */
+struct dpaa2_devinfo {
+ device_t pdev;
+ device_t dev;
+
+ uint32_t id;
+ uint32_t portal_id;
+ uint32_t icid;
+
+ enum dpaa2_dev_type dtype;
+ struct resource_list resources;
+ struct dpaa2_msinfo msi;
+
+ /*
+ * DPAA2 object might or might not have its own portal allocated to
+ * execute MC commands. If the portal has been allocated, it takes
+ * precedence over the portal owned by the resource container.
+ */
+ struct dpaa2_mcp *portal;
+};
+
+DECLARE_CLASS(dpaa2_mc_driver);
+
+/* For device interface. */
+
+int dpaa2_mc_attach(device_t dev);
+int dpaa2_mc_detach(device_t dev);
+
+/* For bus interface. */
+
+struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child,
+ int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count,
+ u_int flags);
+int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type,
+ struct resource *r, rman_res_t start, rman_res_t end);
+int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type,
+ int rid, struct resource *r);
+int dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type,
+ int rid, struct resource *r);
+int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type,
+ int rid, struct resource *r);
+
+/* For pseudo-pcib interface. */
+
+int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount,
+ int *irqs);
+int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs);
+int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr,
+ uint32_t *data);
+int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type,
+ uintptr_t *id);
+
+/* For DPAA2 MC bus interface. */
+
+int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags);
+int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype);
+int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype, uint32_t obj_id);
+int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype);
+int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev,
+ enum dpaa2_dev_type devtype);
+int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
+ enum dpaa2_dev_type devtype);
+
+#endif /* _DPAA2_MC_H */
diff --git a/sys/dev/dpaa2/dpaa2_mc_acpi.c b/sys/dev/dpaa2/dpaa2_mc_acpi.c
new file mode 100644
index 000000000000..fb0b467b5009
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mc_acpi.c
@@ -0,0 +1,393 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2021 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Management Complex (MC) Bus Driver (ACPI-based).
+ *
+ * MC is a hardware resource manager which can be found in several NXP
+ * SoCs (LX2160A, for example) and provides an access to the specialized
+ * hardware objects used in network-oriented packet processing applications.
+ */
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include "acpi_bus_if.h"
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mcp.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_mc_if.h"
+
+struct dpaa2_mac_dev_softc {
+ int uid;
+ uint64_t reg;
+ char managed[64];
+ char phy_conn_type[64];
+ char phy_mode[64];
+ ACPI_HANDLE phy_channel;
+};
+
+static int
+dpaa2_mac_dev_probe(device_t dev)
+{
+ uint64_t reg;
+ ssize_t s;
+
+ s = device_get_property(dev, "reg", &reg, sizeof(reg),
+ DEVICE_PROP_UINT64);
+ if (s == -1)
+ return (ENXIO);
+
+ device_set_desc(dev, "DPAA2 MAC DEV");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_mac_dev_attach(device_t dev)
+{
+ struct dpaa2_mac_dev_softc *sc;
+ ACPI_HANDLE h;
+ ssize_t s;
+
+ sc = device_get_softc(dev);
+ h = acpi_get_handle(dev);
+ if (h == NULL)
+ return (ENXIO);
+
+ s = acpi_GetInteger(h, "_UID", &sc->uid);
+ if (ACPI_FAILURE(s)) {
+ device_printf(dev, "Cannot find '_UID' property: %zd\n", s);
+ return (ENXIO);
+ }
+
+ s = device_get_property(dev, "reg", &sc->reg, sizeof(sc->reg),
+ DEVICE_PROP_UINT64);
+ if (s == -1) {
+ device_printf(dev, "Cannot find 'reg' property: %zd\n", s);
+ return (ENXIO);
+ }
+
+ s = device_get_property(dev, "managed", sc->managed,
+ sizeof(sc->managed), DEVICE_PROP_ANY);
+ s = device_get_property(dev, "phy-connection-type", sc->phy_conn_type,
+ sizeof(sc->phy_conn_type), DEVICE_PROP_ANY);
+ s = device_get_property(dev, "phy-mode", sc->phy_mode,
+ sizeof(sc->phy_mode), DEVICE_PROP_ANY);
+ s = device_get_property(dev, "phy-handle", &sc->phy_channel,
+ sizeof(sc->phy_channel), DEVICE_PROP_HANDLE);
+
+ if (bootverbose)
+ device_printf(dev, "UID %#04x reg %#04jx managed '%s' "
+ "phy-connection-type '%s' phy-mode '%s' phy-handle '%s'\n",
+ sc->uid, sc->reg, sc->managed[0] != '\0' ? sc->managed : "",
+ sc->phy_conn_type[0] != '\0' ? sc->phy_conn_type : "",
+ sc->phy_mode[0] != '\0' ? sc->phy_mode : "",
+ sc->phy_channel != NULL ? acpi_name(sc->phy_channel) : "");
+
+ return (0);
+}
+
+static bool
+dpaa2_mac_dev_match_id(device_t dev, uint32_t id)
+{
+ struct dpaa2_mac_dev_softc *sc;
+
+ if (dev == NULL)
+ return (false);
+
+ sc = device_get_softc(dev);
+ if (sc->uid == id)
+ return (true);
+
+ return (false);
+}
+
+static device_t
+dpaa2_mac_dev_get_phy_dev(device_t dev)
+{
+ struct dpaa2_mac_dev_softc *sc;
+
+ if (dev == NULL)
+ return (NULL);
+
+ sc = device_get_softc(dev);
+ if (sc->phy_channel == NULL)
+ return (NULL);
+
+ return (acpi_get_device(sc->phy_channel));
+}
+
+static device_method_t dpaa2_mac_dev_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_mac_dev_probe),
+ DEVMETHOD(device_attach, dpaa2_mac_dev_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(dpaa2_mac_dev, dpaa2_mac_dev_driver, dpaa2_mac_dev_methods,
+ sizeof(struct dpaa2_mac_dev_softc));
+
+DRIVER_MODULE(dpaa2_mac_dev, dpaa2_mc, dpaa2_mac_dev_driver, 0, 0);
+
+MODULE_DEPEND(dpaa2_mac_dev, memac_mdio_acpi, 1, 1, 1);
+
+/*
+ * Device interface.
+ */
+
+static int
+dpaa2_mc_acpi_probe(device_t dev)
+{
+ static char *dpaa2_mc_ids[] = { "NXP0008", NULL };
+ int rc;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
+
+ rc = ACPI_ID_PROBE(device_get_parent(dev), dev, dpaa2_mc_ids, NULL);
+ if (rc <= 0)
+ device_set_desc(dev, "DPAA2 Management Complex");
+
+ return (rc);
+}
+
+/* Context for walking PRxx child devices. */
+struct dpaa2_mc_acpi_prxx_walk_ctx {
+ device_t dev;
+ int count;
+ int countok;
+};
+
+static ACPI_STATUS
+dpaa2_mc_acpi_probe_child(ACPI_HANDLE h, device_t *dev, int level, void *arg)
+{
+ struct dpaa2_mc_acpi_prxx_walk_ctx *ctx;
+ struct acpi_device *ad;
+ device_t child;
+ uint32_t uid;
+
+ ctx = (struct dpaa2_mc_acpi_prxx_walk_ctx *)arg;
+ ctx->count++;
+
+#if 0
+ device_printf(ctx->dev, "%s: %s level %d count %d\n", __func__,
+ acpi_name(h), level, ctx->count);
+#endif
+
+ if (ACPI_FAILURE(acpi_GetInteger(h, "_UID", &uid)))
+ return (AE_OK);
+#if 0
+ if (bootverbose)
+ device_printf(ctx->dev, "%s: Found child Ports _UID %u\n",
+ __func__, uid);
+#endif
+
+ /* Technically M_ACPIDEV */
+ if ((ad = malloc(sizeof(*ad), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL)
+ return (AE_OK);
+
+ child = device_add_child(ctx->dev, "dpaa2_mac_dev", -1);
+ if (child == NULL) {
+ free(ad, M_DEVBUF);
+ return (AE_OK);
+ }
+ ad->ad_handle = h;
+ ad->ad_cls_class = 0xffffff;
+ resource_list_init(&ad->ad_rl);
+ device_set_ivars(child, ad);
+ *dev = child;
+
+ ctx->countok++;
+ return (AE_OK);
+}
+
+static int
+dpaa2_mc_acpi_attach(device_t dev)
+{
+ struct dpaa2_mc_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->acpi_based = true;
+
+ struct dpaa2_mc_acpi_prxx_walk_ctx ctx;
+ ctx.dev = dev;
+ ctx.count = 0;
+ ctx.countok = 0;
+ ACPI_SCAN_CHILDREN(device_get_parent(dev), dev, 2,
+ dpaa2_mc_acpi_probe_child, &ctx);
+
+#if 0
+ device_printf(dev, "Found %d child Ports in ASL, %d ok\n",
+ ctx.count, ctx.countok);
+#endif
+
+ return (dpaa2_mc_attach(dev));
+}
+
+/*
+ * ACPI compat layer.
+ */
+
+static device_t
+dpaa2_mc_acpi_find_dpaa2_mac_dev(device_t dev, uint32_t id)
+{
+ int devcount, error, i, len;
+ device_t *devlist, mdev;
+ const char *mdevname;
+
+ error = device_get_children(dev, &devlist, &devcount);
+ if (error != 0)
+ return (NULL);
+
+ for (i = 0; i < devcount; i++) {
+ mdev = devlist[i];
+ mdevname = device_get_name(mdev);
+ if (mdevname != NULL) {
+ len = strlen(mdevname);
+ if (strncmp("dpaa2_mac_dev", mdevname, len) != 0)
+ continue;
+ } else {
+ continue;
+ }
+ if (!device_is_attached(mdev))
+ continue;
+
+ if (dpaa2_mac_dev_match_id(mdev, id))
+ return (mdev);
+ }
+
+ return (NULL);
+}
+
+static int
+dpaa2_mc_acpi_get_phy_dev(device_t dev, device_t *phy_dev, uint32_t id)
+{
+ device_t mdev, pdev;
+
+ mdev = dpaa2_mc_acpi_find_dpaa2_mac_dev(dev, id);
+ if (mdev == NULL) {
+ device_printf(dev, "%s: error finding dpmac device with id=%u\n",
+ __func__, id);
+ return (ENXIO);
+ }
+
+ pdev = dpaa2_mac_dev_get_phy_dev(mdev);
+ if (pdev == NULL) {
+ device_printf(dev, "%s: error getting MDIO device for dpamc %s "
+ "(id=%u)\n", __func__, device_get_nameunit(mdev), id);
+ return (ENXIO);
+ }
+
+ if (phy_dev != NULL)
+ *phy_dev = pdev;
+
+ return (0);
+}
+
+static ssize_t
+dpaa2_mc_acpi_get_property(device_t dev, device_t child, const char *propname,
+ void *propvalue, size_t size, device_property_type_t type)
+{
+ return (bus_generic_get_property(dev, child, propname, propvalue, size,
+ type));
+}
+
+static int
+dpaa2_mc_acpi_read_ivar(device_t dev, device_t child, int index,
+ uintptr_t *result)
+{
+ /*
+ * This is special in that it passes "child" as second argument rather
+ * than "dev". acpi_get_handle() in dpaa2_mac_dev_attach() calls the
+ * read on parent(dev), dev and gets us here not to ACPI. Hence we
+ * need to keep child as-is and pass it to our parent which is ACPI.
+ * Only that gives the desired result.
+ */
+ return (BUS_READ_IVAR(device_get_parent(dev), child, index, result));
+}
+
+static device_method_t dpaa2_mc_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_mc_acpi_probe),
+ DEVMETHOD(device_attach, dpaa2_mc_acpi_attach),
+ DEVMETHOD(device_detach, dpaa2_mc_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, dpaa2_mc_alloc_resource),
+ DEVMETHOD(bus_adjust_resource, dpaa2_mc_adjust_resource),
+ DEVMETHOD(bus_release_resource, dpaa2_mc_release_resource),
+ DEVMETHOD(bus_activate_resource, dpaa2_mc_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, dpaa2_mc_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ /* Pseudo-PCIB interface */
+ DEVMETHOD(pcib_alloc_msi, dpaa2_mc_alloc_msi),
+ DEVMETHOD(pcib_release_msi, dpaa2_mc_release_msi),
+ DEVMETHOD(pcib_map_msi, dpaa2_mc_map_msi),
+ DEVMETHOD(pcib_get_id, dpaa2_mc_get_id),
+
+ /* DPAA2 MC bus interface */
+ DEVMETHOD(dpaa2_mc_manage_dev, dpaa2_mc_manage_dev),
+ DEVMETHOD(dpaa2_mc_get_free_dev,dpaa2_mc_get_free_dev),
+ DEVMETHOD(dpaa2_mc_get_dev, dpaa2_mc_get_dev),
+ DEVMETHOD(dpaa2_mc_get_shared_dev, dpaa2_mc_get_shared_dev),
+ DEVMETHOD(dpaa2_mc_reserve_dev, dpaa2_mc_reserve_dev),
+ DEVMETHOD(dpaa2_mc_release_dev, dpaa2_mc_release_dev),
+ DEVMETHOD(dpaa2_mc_get_phy_dev, dpaa2_mc_acpi_get_phy_dev),
+
+ /* ACPI compar layer. */
+ DEVMETHOD(bus_read_ivar, dpaa2_mc_acpi_read_ivar),
+ DEVMETHOD(bus_get_property, dpaa2_mc_acpi_get_property),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(dpaa2_mc, dpaa2_mc_acpi_driver, dpaa2_mc_acpi_methods,
+ sizeof(struct dpaa2_mc_softc), dpaa2_mc_driver);
+
+/* Make sure miibus gets procesed first. */
+DRIVER_MODULE_ORDERED(dpaa2_mc, acpi, dpaa2_mc_acpi_driver, NULL, NULL,
+ SI_ORDER_ANY);
+MODULE_DEPEND(dpaa2_mc, memac_mdio_acpi, 1, 1, 1);
diff --git a/sys/dev/dpaa2/dpaa2_mc_fdt.c b/sys/dev/dpaa2/dpaa2_mc_fdt.c
new file mode 100644
index 000000000000..ef3ff14c2dd6
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mc_fdt.c
@@ -0,0 +1,399 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Management Complex (MC) Bus Driver (FDT-based).
+ *
+ * MC is a hardware resource manager which can be found in several NXP
+ * SoCs (LX2160A, for example) and provides an access to the specialized
+ * hardware objects used in network-oriented packet processing applications.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/fdt/simplebus.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+#include "ofw_bus_if.h"
+
+#include "dpaa2_mcp.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_mc_if.h"
+
+struct dpaa2_mac_fdt_softc {
+ uint32_t reg;
+ phandle_t sfp;
+ phandle_t pcs_handle;
+ phandle_t phy_handle;
+ char managed[64];
+ char phy_conn_type[64];
+};
+
+#if 0
+ ethernet@1 {
+
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x1>;
+ sfp = <0x14>;
+ pcs-handle = <0x15>;
+ phy-connection-type = "10gbase-r";
+ managed = "in-band-status";
+ };
+ ethernet@3 {
+
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x3>;
+ phy-handle = <0x18>;
+ phy-connection-type = "qsgmii";
+ managed = "in-band-status";
+ pcs-handle = <0x19>;
+ };
+#endif
+
+static int
+dpaa2_mac_dev_probe(device_t dev)
+{
+ phandle_t node;
+ uint64_t reg;
+ ssize_t s;
+
+ node = ofw_bus_get_node(dev);
+ if (!ofw_bus_node_is_compatible(node, "fsl,qoriq-mc-dpmac")) {
+ device_printf(dev, "'%s' not fsl,qoriq-mc-dpmac compatible\n",
+ ofw_bus_get_name(dev));
+ return (ENXIO);
+ }
+
+ s = device_get_property(dev, "reg", &reg, sizeof(reg),
+ DEVICE_PROP_UINT32);
+ if (s == -1) {
+ device_printf(dev, "%s: '%s' has no 'reg' property, s %zd\n",
+ __func__, ofw_bus_get_name(dev), s);
+ return (ENXIO);
+ }
+
+ device_set_desc(dev, "DPAA2 MAC DEV");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_mac_fdt_attach(device_t dev)
+{
+ struct dpaa2_mac_fdt_softc *sc;
+ phandle_t node;
+ ssize_t s;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+
+ s = device_get_property(dev, "reg", &sc->reg, sizeof(sc->reg),
+ DEVICE_PROP_UINT32);
+ if (s == -1) {
+ device_printf(dev, "Cannot find 'reg' property: %zd\n", s);
+ return (ENXIO);
+ }
+
+ s = device_get_property(dev, "managed", sc->managed,
+ sizeof(sc->managed), DEVICE_PROP_ANY);
+ s = device_get_property(dev, "phy-connection-type", sc->phy_conn_type,
+ sizeof(sc->phy_conn_type), DEVICE_PROP_ANY);
+ s = device_get_property(dev, "pcs-handle", &sc->pcs_handle,
+ sizeof(sc->pcs_handle), DEVICE_PROP_HANDLE);
+
+ /* 'sfp' and 'phy-handle' are optional but we need one or the other. */
+ s = device_get_property(dev, "sfp", &sc->sfp, sizeof(sc->sfp),
+ DEVICE_PROP_HANDLE);
+ s = device_get_property(dev, "phy-handle", &sc->phy_handle,
+ sizeof(sc->phy_handle), DEVICE_PROP_HANDLE);
+
+ if (bootverbose)
+ device_printf(dev, "node %#x '%s': reg %#x sfp %#x pcs-handle "
+ "%#x phy-handle %#x managed '%s' phy-conn-type '%s'\n",
+ node, ofw_bus_get_name(dev),
+ sc->reg, sc->sfp, sc->pcs_handle, sc->phy_handle,
+ sc->managed, sc->phy_conn_type);
+
+ return (0);
+}
+
+static bool
+dpaa2_mac_fdt_match_id(device_t dev, uint32_t id)
+{
+ struct dpaa2_mac_fdt_softc *sc;
+
+ if (dev == NULL)
+ return (false);
+
+ sc = device_get_softc(dev);
+ if (sc->reg == id)
+ return (true);
+
+ return (false);
+}
+
+static device_t
+dpaa2_mac_fdt_get_phy_dev(device_t dev)
+{
+ struct dpaa2_mac_fdt_softc *sc;
+
+ if (dev == NULL)
+ return (NULL);
+
+ sc = device_get_softc(dev);
+ if (sc->phy_handle == 0 && sc->sfp == 0)
+ return (NULL);
+
+#ifdef __not_yet__ /* No sff,sfp support yet. */
+ if (sc->sfp != 0) {
+ device_t xdev;
+
+ xdev = OF_device_from_xref(OF_xref_from_node(sc->sfp));
+ if (xdev != NULL)
+ return (xdev);
+ }
+#endif
+ return (OF_device_from_xref(OF_xref_from_node(sc->phy_handle)));
+}
+
+static device_method_t dpaa2_mac_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_mac_dev_probe),
+ DEVMETHOD(device_attach, dpaa2_mac_fdt_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(dpaa2_mac_fdt, dpaa2_mac_fdt_driver, dpaa2_mac_fdt_methods,
+ sizeof(struct dpaa2_mac_fdt_softc));
+DRIVER_MODULE(dpaa2_mac_fdt, dpaa2_mc, dpaa2_mac_fdt_driver, 0, 0);
+MODULE_DEPEND(dpaa2_mac_fdt, memac_mdio_fdt, 1, 1, 1);
+
+/*
+ * Device interface.
+ */
+
+static int
+dpaa2_mc_fdt_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "fsl,qoriq-mc"))
+ return (ENXIO);
+
+ device_set_desc(dev, "DPAA2 Management Complex");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_mc_fdt_probe_child(device_t bus, phandle_t child)
+{
+ device_t childdev;
+
+ /* make sure we do not aliready have a device. */
+ childdev = ofw_bus_find_child_device_by_phandle(bus, child);
+ if (childdev != NULL)
+ return (0);
+
+ childdev = simplebus_add_device(bus, child, 0, "dpaa2_mac_fdt", -1,
+ NULL);
+ if (childdev == NULL)
+ return (ENXIO);
+
+ return (device_probe_and_attach(childdev));
+}
+
+static int
+dpaa2_mc_fdt_attach(device_t dev)
+{
+ struct dpaa2_mc_softc *sc;
+ phandle_t node;
+ phandle_t child;
+
+ sc = device_get_softc(dev);
+ sc->acpi_based = false;
+ sc->ofw_node = ofw_bus_get_node(dev);
+
+ bus_generic_probe(dev);
+ bus_enumerate_hinted_children(dev);
+
+ bus_generic_probe(dev);
+ bus_enumerate_hinted_children(dev);
+ /*
+ * Attach the children represented in the device tree.
+ */
+ /* fsl-mc -> dpamcs */
+ node = OF_child(sc->ofw_node);
+ simplebus_init(dev, node);
+
+ /* Attach the dpmac children represented in the device tree. */
+ child = ofw_bus_find_compatible(node, "fsl,qoriq-mc-dpmac");
+ for (; child > 0; child = OF_peer(child)) {
+ if (!ofw_bus_node_is_compatible(child, "fsl,qoriq-mc-dpmac"))
+ continue;
+ if (!OF_hasprop(child, "reg"))
+ continue;
+ if (!OF_hasprop(child, "pcs-handle"))
+ continue;
+ if (dpaa2_mc_fdt_probe_child(dev, child) != 0)
+ continue;
+ }
+
+ return (dpaa2_mc_attach(dev));
+}
+
+/*
+ * FDT compat layer.
+ */
+static device_t
+dpaa2_mc_fdt_find_dpaa2_mac_dev(device_t dev, uint32_t id)
+{
+ int devcount, error, i, len;
+ device_t *devlist, mdev;
+ const char *mdevname;
+
+ error = device_get_children(dev, &devlist, &devcount);
+ if (error != 0)
+ return (NULL);
+
+ for (i = 0; i < devcount; i++) {
+ mdev = devlist[i];
+ mdevname = device_get_name(mdev);
+ if (mdevname == NULL)
+ continue;
+ len = strlen(mdevname);
+ if (strncmp("dpaa2_mac_fdt", mdevname, len) != 0)
+ continue;
+ if (!device_is_attached(mdev))
+ continue;
+
+ if (dpaa2_mac_fdt_match_id(mdev, id))
+ return (mdev);
+ }
+
+ return (NULL);
+}
+
+static int
+dpaa2_mc_fdt_get_phy_dev(device_t dev, device_t *phy_dev, uint32_t id)
+{
+ device_t mdev, pdev;
+
+ mdev = dpaa2_mc_fdt_find_dpaa2_mac_dev(dev, id);
+ if (mdev == NULL) {
+ device_printf(dev, "%s: error finding dpmac device with id=%u\n",
+ __func__, id);
+ return (ENXIO);
+ }
+
+ pdev = dpaa2_mac_fdt_get_phy_dev(mdev);
+ if (pdev == NULL) {
+ device_printf(dev, "%s: error getting MDIO device for dpamc %s "
+ "(id=%u)\n", __func__, device_get_nameunit(mdev), id);
+ return (ENXIO);
+ }
+
+ if (phy_dev != NULL)
+ *phy_dev = pdev;
+
+ if (bootverbose)
+ device_printf(dev, "dpmac_id %u mdev %p (%s) pdev %p (%s)\n",
+ id, mdev, device_get_nameunit(mdev),
+ pdev, device_get_nameunit(pdev));
+
+ return (0);
+}
+
+static const struct ofw_bus_devinfo *
+dpaa2_mc_simplebus_get_devinfo(device_t bus, device_t child)
+{
+
+ return (OFW_BUS_GET_DEVINFO(device_get_parent(bus), child));
+}
+
+static device_method_t dpaa2_mc_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_mc_fdt_probe),
+ DEVMETHOD(device_attach, dpaa2_mc_fdt_attach),
+ DEVMETHOD(device_detach, dpaa2_mc_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, dpaa2_mc_alloc_resource),
+ DEVMETHOD(bus_adjust_resource, dpaa2_mc_adjust_resource),
+ DEVMETHOD(bus_release_resource, dpaa2_mc_release_resource),
+ DEVMETHOD(bus_activate_resource, dpaa2_mc_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, dpaa2_mc_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ /* Pseudo-PCIB interface */
+ DEVMETHOD(pcib_alloc_msi, dpaa2_mc_alloc_msi),
+ DEVMETHOD(pcib_release_msi, dpaa2_mc_release_msi),
+ DEVMETHOD(pcib_map_msi, dpaa2_mc_map_msi),
+ DEVMETHOD(pcib_get_id, dpaa2_mc_get_id),
+
+ /* DPAA2 MC bus interface */
+ DEVMETHOD(dpaa2_mc_manage_dev, dpaa2_mc_manage_dev),
+ DEVMETHOD(dpaa2_mc_get_free_dev,dpaa2_mc_get_free_dev),
+ DEVMETHOD(dpaa2_mc_get_dev, dpaa2_mc_get_dev),
+ DEVMETHOD(dpaa2_mc_get_shared_dev, dpaa2_mc_get_shared_dev),
+ DEVMETHOD(dpaa2_mc_reserve_dev, dpaa2_mc_reserve_dev),
+ DEVMETHOD(dpaa2_mc_release_dev, dpaa2_mc_release_dev),
+ DEVMETHOD(dpaa2_mc_get_phy_dev, dpaa2_mc_fdt_get_phy_dev),
+
+ /* OFW/simplebus */
+ DEVMETHOD(ofw_bus_get_devinfo, dpaa2_mc_simplebus_get_devinfo),
+ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
+ DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
+ DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
+ DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
+ DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(dpaa2_mc, dpaa2_mc_fdt_driver, dpaa2_mc_fdt_methods,
+ sizeof(struct dpaa2_mc_softc), dpaa2_mc_driver);
+
+DRIVER_MODULE(dpaa2_mc, simplebus, dpaa2_mc_fdt_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_mc_if.m b/sys/dev/dpaa2/dpaa2_mc_if.m
new file mode 100644
index 000000000000..f81d96b5b32f
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mc_if.m
@@ -0,0 +1,152 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright © 2021-2022 Dmitry Salychev
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+#include <machine/bus.h>
+#include <dev/dpaa2/dpaa2_mc.h>
+
+/**
+ * @brief Interface of the DPAA2 Management Complex (MC) bus driver.
+ *
+ * It helps to manipulate DPAA2-specific resources (DPIOs, DPBPs, etc.)
+ */
+INTERFACE dpaa2_mc;
+
+#
+# Default implementation of the commands.
+#
+CODE {
+ static int
+ bypass_manage_dev(device_t dev, device_t dpaa2_dev, uint32_t flags)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_MANAGE_DEV(device_get_parent(dev),
+ dpaa2_dev, flags));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_get_free_dev(device_t dev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_GET_FREE_DEV(device_get_parent(dev),
+ dpaa2_dev, devtype));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_get_dev(device_t dev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype, uint32_t obj_id)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_GET_DEV(device_get_parent(dev),
+ dpaa2_dev, devtype, obj_id));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_get_shared_dev(device_t dev, device_t *dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_GET_SHARED_DEV(device_get_parent(dev),
+ dpaa2_dev, devtype));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_reserve_dev(device_t dev, device_t dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_RESERVE_DEV(device_get_parent(dev),
+ dpaa2_dev, devtype));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_release_dev(device_t dev, device_t dpaa2_dev,
+ enum dpaa2_dev_type devtype)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_RELEASE_DEV(device_get_parent(dev),
+ dpaa2_dev, devtype));
+ return (ENXIO);
+ }
+
+ static int
+ bypass_get_phy_dev(device_t dev, device_t *phy_dev, uint32_t id)
+ {
+ if (device_get_parent(dev) != NULL)
+ return (DPAA2_MC_GET_PHY_DEV(device_get_parent(dev),
+ phy_dev, id));
+ return (ENXIO);
+ }
+}
+
+METHOD int manage_dev {
+ device_t dev;
+ device_t dpaa2_dev;
+ uint32_t flags;
+} DEFAULT bypass_manage_dev;
+
+METHOD int get_free_dev {
+ device_t dev;
+ device_t *dpaa2_dev;
+ enum dpaa2_dev_type devtype;
+} DEFAULT bypass_get_free_dev;
+
+METHOD int get_dev {
+ device_t dev;
+ device_t *dpaa2_dev;
+ enum dpaa2_dev_type devtype;
+ uint32_t obj_id;
+} DEFAULT bypass_get_dev;
+
+METHOD int get_shared_dev {
+ device_t dev;
+ device_t *dpaa2_dev;
+ enum dpaa2_dev_type devtype;
+} DEFAULT bypass_get_shared_dev;
+
+METHOD int reserve_dev {
+ device_t dev;
+ device_t dpaa2_dev;
+ enum dpaa2_dev_type devtype;
+} DEFAULT bypass_reserve_dev;
+
+METHOD int release_dev {
+ device_t dev;
+ device_t dpaa2_dev;
+ enum dpaa2_dev_type devtype;
+} DEFAULT bypass_release_dev;
+
+METHOD int get_phy_dev {
+ device_t dev;
+ device_t *phy_dev;
+ uint32_t id;
+} DEFAULT bypass_get_phy_dev;
diff --git a/sys/dev/dpaa2/dpaa2_mcp.c b/sys/dev/dpaa2/dpaa2_mcp.c
new file mode 100644
index 000000000000..f41d9a7d21b0
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mcp.c
@@ -0,0 +1,318 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * DPAA2 MC command portal and helper routines.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/condvar.h>
+#include <sys/lock.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mcp.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_cmd_if.h"
+
+MALLOC_DEFINE(M_DPAA2_MCP, "dpaa2_mcp", "DPAA2 Management Complex Portal");
+
+static struct resource_spec dpaa2_mcp_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED },
+ RESOURCE_SPEC_END
+};
+
+int
+dpaa2_mcp_init_portal(struct dpaa2_mcp **mcp, struct resource *res,
+ struct resource_map *map, uint16_t flags)
+{
+ const int mflags = flags & DPAA2_PORTAL_NOWAIT_ALLOC
+ ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO);
+ struct dpaa2_mcp *p;
+
+ if (!mcp || !res || !map)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ p = malloc(sizeof(struct dpaa2_mcp), M_DPAA2_MCP, mflags);
+ if (p == NULL)
+ return (DPAA2_CMD_STAT_NO_MEMORY);
+
+ mtx_init(&p->lock, "mcp_sleep_lock", NULL, MTX_DEF);
+
+ p->res = res;
+ p->map = map;
+ p->flags = flags;
+ p->rc_api_major = 0; /* DPRC API version to be cached later. */
+ p->rc_api_minor = 0;
+
+ *mcp = p;
+
+ return (0);
+}
+
+void
+dpaa2_mcp_free_portal(struct dpaa2_mcp *mcp)
+{
+ uint16_t flags;
+
+ KASSERT(mcp != NULL, ("%s: mcp is NULL", __func__));
+
+ DPAA2_MCP_LOCK(mcp, &flags);
+ mcp->flags |= DPAA2_PORTAL_DESTROYED;
+ DPAA2_MCP_UNLOCK(mcp);
+
+ /* Let threads stop using this portal. */
+ DELAY(DPAA2_PORTAL_TIMEOUT);
+
+ mtx_destroy(&mcp->lock);
+ free(mcp, M_DPAA2_MCP);
+}
+
+int
+dpaa2_mcp_init_command(struct dpaa2_cmd **cmd, uint16_t flags)
+{
+ const int mflags = flags & DPAA2_CMD_NOWAIT_ALLOC
+ ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO);
+ struct dpaa2_cmd *c;
+ struct dpaa2_cmd_header *hdr;
+
+ if (!cmd)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ c = malloc(sizeof(struct dpaa2_cmd), M_DPAA2_MCP, mflags);
+ if (!c)
+ return (DPAA2_CMD_STAT_NO_MEMORY);
+
+ hdr = (struct dpaa2_cmd_header *) &c->header;
+ hdr->srcid = 0;
+ hdr->status = DPAA2_CMD_STAT_OK;
+ hdr->token = 0;
+ hdr->cmdid = 0;
+ hdr->flags_hw = DPAA2_CMD_DEF;
+ hdr->flags_sw = DPAA2_CMD_DEF;
+ if (flags & DPAA2_CMD_HIGH_PRIO)
+ hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO;
+ if (flags & DPAA2_CMD_INTR_DIS)
+ hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS;
+ for (uint32_t i = 0; i < DPAA2_CMD_PARAMS_N; i++)
+ c->params[i] = 0;
+ *cmd = c;
+
+ return (0);
+}
+
+void
+dpaa2_mcp_free_command(struct dpaa2_cmd *cmd)
+{
+ if (cmd != NULL)
+ free(cmd, M_DPAA2_MCP);
+}
+
+struct dpaa2_cmd *
+dpaa2_mcp_tk(struct dpaa2_cmd *cmd, uint16_t token)
+{
+ struct dpaa2_cmd_header *hdr;
+ if (cmd != NULL) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ hdr->token = token;
+ }
+ return (cmd);
+}
+
+struct dpaa2_cmd *
+dpaa2_mcp_f(struct dpaa2_cmd *cmd, uint16_t flags)
+{
+ struct dpaa2_cmd_header *hdr;
+ if (cmd) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ hdr->flags_hw = DPAA2_CMD_DEF;
+ hdr->flags_sw = DPAA2_CMD_DEF;
+
+ if (flags & DPAA2_CMD_HIGH_PRIO)
+ hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO;
+ if (flags & DPAA2_CMD_INTR_DIS)
+ hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS;
+ }
+ return (cmd);
+}
+
+static int
+dpaa2_mcp_probe(device_t dev)
+{
+ /* DPMCP device will be added by the parent resource container. */
+ device_set_desc(dev, "DPAA2 MC portal");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_mcp_detach(device_t dev)
+{
+ return (0);
+}
+
+static int
+dpaa2_mcp_attach(device_t dev)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ struct dpaa2_mcp_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_cmd *cmd;
+ struct dpaa2_mcp *portal;
+ struct resource_map_request req;
+ uint16_t rc_token, mcp_token;
+ int error;
+
+ sc->dev = dev;
+
+ error = bus_alloc_resources(sc->dev, dpaa2_mcp_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources\n",
+ __func__);
+ goto err_exit;
+ }
+
+ /* At least 64 bytes of the command portal should be available. */
+ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) {
+ device_printf(dev, "%s: MC portal memory region too small: "
+ "%jd\n", __func__, rman_get_size(sc->res[0]));
+ goto err_exit;
+ }
+
+ /* Map MC portal memory resource. */
+ resource_init_map_request(&req);
+ req.memattr = VM_MEMATTR_DEVICE;
+ error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req,
+ &sc->map[0]);
+ if (error) {
+ device_printf(dev, "%s: failed to map MC portal memory\n",
+ __func__);
+ goto err_exit;
+ }
+
+ /* Initialize portal to send commands to MC. */
+ error = dpaa2_mcp_init_portal(&portal, sc->res[0], &sc->map[0],
+ DPAA2_PORTAL_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to initialize dpaa2_mcp: "
+ "error=%d\n", __func__, error);
+ goto err_exit;
+ }
+
+ /* Allocate a command to send to MC hardware. */
+ error = dpaa2_mcp_init_command(&cmd, DPAA2_CMD_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate dpaa2_cmd: "
+ "error=%d\n", __func__, error);
+ goto err_exit;
+ }
+
+ /* Open resource container and DPMCP object. */
+ error = DPAA2_CMD_RC_OPEN(dev, child, cmd, rcinfo->id, &rc_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPRC: error=%d\n",
+ __func__, error);
+ goto err_free_cmd;
+ }
+ error = DPAA2_CMD_MCP_OPEN(dev, child, cmd, dinfo->id, &mcp_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open DPMCP: id=%d, error=%d\n",
+ __func__, dinfo->id, error);
+ goto err_close_rc;
+ }
+
+ /* Prepare DPMCP object. */
+ error = DPAA2_CMD_MCP_RESET(dev, child, cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to reset DPMCP: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+ goto err_close_mcp;
+ }
+
+ /* Close the DPMCP object and the resource container. */
+ error = DPAA2_CMD_MCP_CLOSE(dev, child, cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to close DPMCP: id=%d, "
+ "error=%d\n", __func__, dinfo->id, error);
+ goto err_close_rc;
+ }
+ error = DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(cmd, rc_token));
+ if (error) {
+ device_printf(dev, "%s: failed to close DPRC: error=%d\n",
+ __func__, error);
+ goto err_free_cmd;
+ }
+
+ dpaa2_mcp_free_command(cmd);
+ dinfo->portal = portal;
+
+ return (0);
+
+err_close_mcp:
+ DPAA2_CMD_MCP_CLOSE(dev, child, dpaa2_mcp_tk(cmd, mcp_token));
+err_close_rc:
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(cmd, rc_token));
+err_free_cmd:
+ dpaa2_mcp_free_command(cmd);
+err_exit:
+ dpaa2_mcp_detach(dev);
+ return (ENXIO);
+}
+
+static device_method_t dpaa2_mcp_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_mcp_probe),
+ DEVMETHOD(device_attach, dpaa2_mcp_attach),
+ DEVMETHOD(device_detach, dpaa2_mcp_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_mcp_driver = {
+ "dpaa2_mcp",
+ dpaa2_mcp_methods,
+ sizeof(struct dpaa2_mcp_softc),
+};
+
+DRIVER_MODULE(dpaa2_mcp, dpaa2_rc, dpaa2_mcp_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_mcp.h b/sys/dev/dpaa2/dpaa2_mcp.h
new file mode 100644
index 000000000000..55052ca7afb2
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_mcp.h
@@ -0,0 +1,449 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_MCP_H
+#define _DPAA2_MCP_H
+
+#include <sys/rman.h>
+#include <sys/condvar.h>
+#include <sys/mutex.h>
+
+#include "dpaa2_types.h"
+
+/*
+ * DPAA2 MC command interface helper routines.
+ */
+
+#define DPAA2_PORTAL_TIMEOUT 100000 /* us */
+#define DPAA2_MCP_MEM_WIDTH 0x40 /* Minimal size of the MC portal. */
+#define DPAA2_MCP_MAX_RESOURCES 1 /* resources per DPMCP: 1 SYS_MEM */
+
+/*
+ * Portal flags.
+ *
+ * TODO: Use the same flags for both MC and software portals.
+ */
+#define DPAA2_PORTAL_DEF 0x0u
+#define DPAA2_PORTAL_NOWAIT_ALLOC 0x2u /* Do not sleep during init */
+#define DPAA2_PORTAL_LOCKED 0x4000u /* Wait till portal's unlocked */
+#define DPAA2_PORTAL_DESTROYED 0x8000u /* Terminate any operations */
+
+/* Command flags. */
+#define DPAA2_CMD_DEF 0x0u
+#define DPAA2_CMD_HIGH_PRIO 0x80u /* High priority command */
+#define DPAA2_CMD_INTR_DIS 0x100u /* Disable cmd finished intr */
+#define DPAA2_CMD_NOWAIT_ALLOC 0x8000u /* Do not sleep during init */
+
+/* DPAA2 command return codes. */
+#define DPAA2_CMD_STAT_OK 0x0 /* Set by MC on success */
+#define DPAA2_CMD_STAT_READY 0x1 /* Ready to be processed */
+#define DPAA2_CMD_STAT_AUTH_ERR 0x3 /* Illegal object-portal-icid */
+#define DPAA2_CMD_STAT_NO_PRIVILEGE 0x4 /* No privilege */
+#define DPAA2_CMD_STAT_DMA_ERR 0x5 /* DMA or I/O error */
+#define DPAA2_CMD_STAT_CONFIG_ERR 0x6 /* Invalid/conflicting params */
+#define DPAA2_CMD_STAT_TIMEOUT 0x7 /* Command timed out */
+#define DPAA2_CMD_STAT_NO_RESOURCE 0x8 /* No DPAA2 resources */
+#define DPAA2_CMD_STAT_NO_MEMORY 0x9 /* No memory available */
+#define DPAA2_CMD_STAT_BUSY 0xA /* Device is busy */
+#define DPAA2_CMD_STAT_UNSUPPORTED_OP 0xB /* Unsupported operation */
+#define DPAA2_CMD_STAT_INVALID_STATE 0xC /* Invalid state */
+/* Driver-specific return codes. */
+#define DPAA2_CMD_STAT_UNKNOWN_OBJ 0xFD /* Unknown DPAA2 object. */
+#define DPAA2_CMD_STAT_EINVAL 0xFE /* Invalid argument */
+#define DPAA2_CMD_STAT_ERR 0xFF /* General error */
+
+/* Object's memory region flags. */
+#define DPAA2_RC_REG_CACHEABLE 0x1 /* Cacheable memory mapping */
+
+#define DPAA2_HW_FLAG_HIGH_PRIO 0x80u
+#define DPAA2_SW_FLAG_INTR_DIS 0x01u
+
+#define DPAA2_CMD_PARAMS_N 7u
+#define DPAA2_LABEL_SZ 16
+
+/* ------------------------- MNG command IDs -------------------------------- */
+#define CMD_MNG_BASE_VERSION 1
+#define CMD_MNG_ID_OFFSET 4
+
+#define CMD_MNG(id) (((id) << CMD_MNG_ID_OFFSET) | CMD_MNG_BASE_VERSION)
+
+#define CMDID_MNG_GET_VER CMD_MNG(0x831)
+#define CMDID_MNG_GET_SOC_VER CMD_MNG(0x832)
+#define CMDID_MNG_GET_CONT_ID CMD_MNG(0x830)
+
+/* ------------------------- DPRC command IDs ------------------------------- */
+#define CMD_RC_BASE_VERSION 1
+#define CMD_RC_2ND_VERSION 2
+#define CMD_RC_3RD_VERSION 3
+#define CMD_RC_ID_OFFSET 4
+
+#define CMD_RC(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_BASE_VERSION)
+#define CMD_RC_V2(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_2ND_VERSION)
+#define CMD_RC_V3(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_3RD_VERSION)
+
+#define CMDID_RC_OPEN CMD_RC(0x805)
+#define CMDID_RC_CLOSE CMD_RC(0x800)
+#define CMDID_RC_GET_API_VERSION CMD_RC(0xA05)
+#define CMDID_RC_GET_ATTR CMD_RC(0x004)
+#define CMDID_RC_RESET_CONT CMD_RC(0x005)
+#define CMDID_RC_RESET_CONT_V2 CMD_RC_V2(0x005)
+#define CMDID_RC_SET_IRQ CMD_RC(0x010)
+#define CMDID_RC_SET_IRQ_ENABLE CMD_RC(0x012)
+#define CMDID_RC_SET_IRQ_MASK CMD_RC(0x014)
+#define CMDID_RC_GET_IRQ_STATUS CMD_RC(0x016)
+#define CMDID_RC_CLEAR_IRQ_STATUS CMD_RC(0x017)
+#define CMDID_RC_GET_CONT_ID CMD_RC(0x830)
+#define CMDID_RC_GET_OBJ_COUNT CMD_RC(0x159)
+#define CMDID_RC_GET_OBJ CMD_RC(0x15A)
+#define CMDID_RC_GET_OBJ_DESC CMD_RC(0x162)
+#define CMDID_RC_GET_OBJ_REG CMD_RC(0x15E)
+#define CMDID_RC_GET_OBJ_REG_V2 CMD_RC_V2(0x15E)
+#define CMDID_RC_GET_OBJ_REG_V3 CMD_RC_V3(0x15E)
+#define CMDID_RC_SET_OBJ_IRQ CMD_RC(0x15F)
+#define CMDID_RC_GET_CONN CMD_RC(0x16C)
+
+/* ------------------------- DPIO command IDs ------------------------------- */
+#define CMD_IO_BASE_VERSION 1
+#define CMD_IO_ID_OFFSET 4
+
+#define CMD_IO(id) (((id) << CMD_IO_ID_OFFSET) | CMD_IO_BASE_VERSION)
+
+#define CMDID_IO_OPEN CMD_IO(0x803)
+#define CMDID_IO_CLOSE CMD_IO(0x800)
+#define CMDID_IO_ENABLE CMD_IO(0x002)
+#define CMDID_IO_DISABLE CMD_IO(0x003)
+#define CMDID_IO_GET_ATTR CMD_IO(0x004)
+#define CMDID_IO_RESET CMD_IO(0x005)
+#define CMDID_IO_SET_IRQ_ENABLE CMD_IO(0x012)
+#define CMDID_IO_SET_IRQ_MASK CMD_IO(0x014)
+#define CMDID_IO_GET_IRQ_STATUS CMD_IO(0x016)
+#define CMDID_IO_ADD_STATIC_DQ_CHAN CMD_IO(0x122)
+
+/* ------------------------- DPNI command IDs ------------------------------- */
+#define CMD_NI_BASE_VERSION 1
+#define CMD_NI_2ND_VERSION 2
+#define CMD_NI_4TH_VERSION 4
+#define CMD_NI_ID_OFFSET 4
+
+#define CMD_NI(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_BASE_VERSION)
+#define CMD_NI_V2(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_2ND_VERSION)
+#define CMD_NI_V4(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_4TH_VERSION)
+
+#define CMDID_NI_OPEN CMD_NI(0x801)
+#define CMDID_NI_CLOSE CMD_NI(0x800)
+#define CMDID_NI_ENABLE CMD_NI(0x002)
+#define CMDID_NI_DISABLE CMD_NI(0x003)
+#define CMDID_NI_GET_API_VER CMD_NI(0xA01)
+#define CMDID_NI_RESET CMD_NI(0x005)
+#define CMDID_NI_GET_ATTR CMD_NI(0x004)
+#define CMDID_NI_SET_BUF_LAYOUT CMD_NI(0x265)
+#define CMDID_NI_GET_TX_DATA_OFF CMD_NI(0x212)
+#define CMDID_NI_GET_PORT_MAC_ADDR CMD_NI(0x263)
+#define CMDID_NI_SET_PRIM_MAC_ADDR CMD_NI(0x224)
+#define CMDID_NI_GET_PRIM_MAC_ADDR CMD_NI(0x225)
+#define CMDID_NI_SET_LINK_CFG CMD_NI(0x21A)
+#define CMDID_NI_GET_LINK_CFG CMD_NI(0x278)
+#define CMDID_NI_GET_LINK_STATE CMD_NI(0x215)
+#define CMDID_NI_SET_QOS_TABLE CMD_NI(0x240)
+#define CMDID_NI_CLEAR_QOS_TABLE CMD_NI(0x243)
+#define CMDID_NI_SET_POOLS CMD_NI(0x200)
+#define CMDID_NI_SET_ERR_BEHAVIOR CMD_NI(0x20B)
+#define CMDID_NI_GET_QUEUE CMD_NI(0x25F)
+#define CMDID_NI_SET_QUEUE CMD_NI(0x260)
+#define CMDID_NI_GET_QDID CMD_NI(0x210)
+#define CMDID_NI_ADD_MAC_ADDR CMD_NI(0x226)
+#define CMDID_NI_REMOVE_MAC_ADDR CMD_NI(0x227)
+#define CMDID_NI_CLEAR_MAC_FILTERS CMD_NI(0x228)
+#define CMDID_NI_SET_MFL CMD_NI(0x216)
+#define CMDID_NI_SET_OFFLOAD CMD_NI(0x26C)
+#define CMDID_NI_SET_IRQ_MASK CMD_NI(0x014)
+#define CMDID_NI_SET_IRQ_ENABLE CMD_NI(0x012)
+#define CMDID_NI_GET_IRQ_STATUS CMD_NI(0x016)
+#define CMDID_NI_SET_UNI_PROMISC CMD_NI(0x222)
+#define CMDID_NI_SET_MULTI_PROMISC CMD_NI(0x220)
+#define CMDID_NI_GET_STATISTICS CMD_NI(0x25D)
+#define CMDID_NI_SET_RX_TC_DIST CMD_NI(0x235)
+
+/* ------------------------- DPBP command IDs ------------------------------- */
+#define CMD_BP_BASE_VERSION 1
+#define CMD_BP_ID_OFFSET 4
+
+#define CMD_BP(id) (((id) << CMD_BP_ID_OFFSET) | CMD_BP_BASE_VERSION)
+
+#define CMDID_BP_OPEN CMD_BP(0x804)
+#define CMDID_BP_CLOSE CMD_BP(0x800)
+#define CMDID_BP_ENABLE CMD_BP(0x002)
+#define CMDID_BP_DISABLE CMD_BP(0x003)
+#define CMDID_BP_GET_ATTR CMD_BP(0x004)
+#define CMDID_BP_RESET CMD_BP(0x005)
+
+/* ------------------------- DPMAC command IDs ------------------------------ */
+#define CMD_MAC_BASE_VERSION 1
+#define CMD_MAC_2ND_VERSION 2
+#define CMD_MAC_ID_OFFSET 4
+
+#define CMD_MAC(id) (((id) << CMD_MAC_ID_OFFSET) | CMD_MAC_BASE_VERSION)
+#define CMD_MAC_V2(id) (((id) << CMD_MAC_ID_OFFSET) | CMD_MAC_2ND_VERSION)
+
+#define CMDID_MAC_OPEN CMD_MAC(0x80C)
+#define CMDID_MAC_CLOSE CMD_MAC(0x800)
+#define CMDID_MAC_RESET CMD_MAC(0x005)
+#define CMDID_MAC_MDIO_READ CMD_MAC(0x0C0)
+#define CMDID_MAC_MDIO_WRITE CMD_MAC(0x0C1)
+#define CMDID_MAC_GET_ADDR CMD_MAC(0x0C5)
+#define CMDID_MAC_GET_ATTR CMD_MAC(0x004)
+#define CMDID_MAC_SET_LINK_STATE CMD_MAC_V2(0x0C3)
+#define CMDID_MAC_SET_IRQ_MASK CMD_MAC(0x014)
+#define CMDID_MAC_SET_IRQ_ENABLE CMD_MAC(0x012)
+#define CMDID_MAC_GET_IRQ_STATUS CMD_MAC(0x016)
+
+/* ------------------------- DPCON command IDs ------------------------------ */
+#define CMD_CON_BASE_VERSION 1
+#define CMD_CON_ID_OFFSET 4
+
+#define CMD_CON(id) (((id) << CMD_CON_ID_OFFSET) | CMD_CON_BASE_VERSION)
+
+#define CMDID_CON_OPEN CMD_CON(0x808)
+#define CMDID_CON_CLOSE CMD_CON(0x800)
+#define CMDID_CON_ENABLE CMD_CON(0x002)
+#define CMDID_CON_DISABLE CMD_CON(0x003)
+#define CMDID_CON_GET_ATTR CMD_CON(0x004)
+#define CMDID_CON_RESET CMD_CON(0x005)
+#define CMDID_CON_SET_NOTIF CMD_CON(0x100)
+
+/* ------------------------- DPMCP command IDs ------------------------------ */
+#define CMD_MCP_BASE_VERSION 1
+#define CMD_MCP_2ND_VERSION 2
+#define CMD_MCP_ID_OFFSET 4
+
+#define CMD_MCP(id) (((id) << CMD_MCP_ID_OFFSET) | CMD_MCP_BASE_VERSION)
+#define CMD_MCP_V2(id) (((id) << CMD_MCP_ID_OFFSET) | CMD_MCP_2ND_VERSION)
+
+#define CMDID_MCP_CREATE CMD_MCP_V2(0x90B)
+#define CMDID_MCP_DESTROY CMD_MCP(0x98B)
+#define CMDID_MCP_OPEN CMD_MCP(0x80B)
+#define CMDID_MCP_CLOSE CMD_MCP(0x800)
+#define CMDID_MCP_RESET CMD_MCP(0x005)
+
+#define DPAA2_MCP_LOCK(__mcp, __flags) do { \
+ mtx_assert(&(__mcp)->lock, MA_NOTOWNED); \
+ mtx_lock(&(__mcp)->lock); \
+ *(__flags) = (__mcp)->flags; \
+ (__mcp)->flags |= DPAA2_PORTAL_LOCKED; \
+} while (0)
+
+#define DPAA2_MCP_UNLOCK(__mcp) do { \
+ mtx_assert(&(__mcp)->lock, MA_OWNED); \
+ (__mcp)->flags &= ~DPAA2_PORTAL_LOCKED; \
+ mtx_unlock(&(__mcp)->lock); \
+} while (0)
+
+enum dpaa2_rc_region_type {
+ DPAA2_RC_REG_MC_PORTAL,
+ DPAA2_RC_REG_QBMAN_PORTAL
+};
+
+/**
+ * @brief Helper object to interact with the MC portal.
+ *
+ * res: Unmapped portal's I/O memory.
+ * map: Mapped portal's I/O memory.
+ * lock: Lock to send a command to the portal and wait for the
+ * result.
+ * flags: Current state of the object.
+ * rc_api_major: Major version of the DPRC API.
+ * rc_api_minor: Minor version of the DPRC API.
+ */
+struct dpaa2_mcp {
+ struct resource *res;
+ struct resource_map *map;
+ struct mtx lock;
+ uint16_t flags;
+ uint16_t rc_api_major;
+ uint16_t rc_api_minor;
+};
+
+/**
+ * @brief Command object holds data to be written to the MC portal.
+ *
+ * header: 8 least significant bytes of the MC portal.
+ * params: Parameters to pass together with the command to MC. Might keep
+ * command execution results.
+ *
+ * NOTE: 64 bytes.
+ */
+struct dpaa2_cmd {
+ uint64_t header;
+ uint64_t params[DPAA2_CMD_PARAMS_N];
+};
+
+/**
+ * @brief Helper object to access fields of the MC command header.
+ *
+ * srcid: The SoC architected source ID of the submitter. This field is
+ * reserved and cannot be written by the driver.
+ * flags_hw: Bits from 8 to 15 of the command header. Most of them are
+ * reserved at the moment.
+ * status: Command ready/status. This field is used as the handshake field
+ * between MC and the driver. MC reports command completion with
+ * success/error codes in this field.
+ * flags_sw: ...
+ * token: ...
+ * cmdid: ...
+ *
+ * NOTE: 8 bytes.
+ */
+struct dpaa2_cmd_header {
+ uint8_t srcid;
+ uint8_t flags_hw;
+ uint8_t status;
+ uint8_t flags_sw;
+ uint16_t token;
+ uint16_t cmdid;
+} __packed;
+
+/**
+ * @brief Information about DPAA2 object.
+ *
+ * id: ID of a logical object resource.
+ * vendor: Object vendor identifier.
+ * irq_count: Number of interrupts supported by the object.
+ * reg_count: Number of mappable regions supported by the object.
+ * state: Object state (combination of states).
+ * ver_major: Major version of the object.
+ * ver_minor: Minor version of the object.
+ * flags: Object attributes flags.
+ * type: ...
+ * label: ...
+ */
+struct dpaa2_obj {
+ uint32_t id;
+ uint16_t vendor;
+ uint8_t irq_count;
+ uint8_t reg_count;
+ uint32_t state;
+ uint16_t ver_major;
+ uint16_t ver_minor;
+ uint16_t flags;
+ uint8_t label[DPAA2_LABEL_SZ];
+ enum dpaa2_dev_type type;
+};
+
+/**
+ * @brief Attributes of the DPRC object.
+ *
+ * cont_id: Container ID.
+ * portal_id: Container's portal ID.
+ * options: Container's options as set at container's creation.
+ * icid: Container's isolation context ID.
+ */
+struct dpaa2_rc_attr {
+ uint32_t cont_id;
+ uint32_t portal_id;
+ uint32_t options;
+ uint32_t icid;
+};
+
+/**
+ * @brief Description of the object's memory region.
+ *
+ * base_paddr: Region base physical address.
+ * base_offset: Region base offset.
+ * size: Region size (in bytes).
+ * flags: Region flags (cacheable, etc.)
+ * type: Type of a software portal this region belongs to.
+ */
+struct dpaa2_rc_obj_region {
+ uint64_t base_paddr;
+ uint64_t base_offset;
+ uint32_t size;
+ uint32_t flags;
+ enum dpaa2_rc_region_type type;
+};
+
+/**
+ * @brief DPAA2 endpoint descriptor.
+ *
+ * obj_id: Endpoint object ID.
+ * if_id: Interface ID; for endpoints with multiple interfaces
+ * (DPSW, DPDMUX), 0 - otherwise.
+ * type: Endpoint object type, null-terminated string.
+ */
+struct dpaa2_ep_desc {
+ uint32_t obj_id;
+ uint32_t if_id;
+ enum dpaa2_dev_type type;
+};
+
+/**
+ * @brief Configuration of the channel data availability notification (CDAN).
+ *
+ * qman_ctx: Context value provided with each CDAN message.
+ * dpio_id: DPIO object ID configured with a notification channel.
+ * prior: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel.
+ */
+struct dpaa2_con_notif_cfg {
+ uint64_t qman_ctx;
+ uint32_t dpio_id;
+ uint8_t prior;
+};
+
+/**
+ * @brief Attributes of the DPMCP object.
+ *
+ * id: DPMCP object ID.
+ * options: Options of the MC portal (disabled high-prio commands, etc.).
+ */
+struct dpaa2_mcp_attr {
+ uint32_t id;
+ uint32_t options;
+};
+
+/**
+ * @brief Software context for the DPAA2 MC portal.
+ */
+struct dpaa2_mcp_softc {
+ device_t dev;
+ struct dpaa2_mcp_attr attr;
+
+ struct resource *res[DPAA2_MCP_MAX_RESOURCES];
+ struct resource_map map[DPAA2_MCP_MAX_RESOURCES];
+};
+
+int dpaa2_mcp_init_portal(struct dpaa2_mcp **mcp, struct resource *res,
+ struct resource_map *map, uint16_t flags);
+int dpaa2_mcp_init_command(struct dpaa2_cmd **cmd, uint16_t flags);
+void dpaa2_mcp_free_portal(struct dpaa2_mcp *mcp);
+void dpaa2_mcp_free_command(struct dpaa2_cmd *cmd);
+
+/* to quickly update command token */
+struct dpaa2_cmd *dpaa2_mcp_tk(struct dpaa2_cmd *cmd, uint16_t token);
+/* to quickly update command flags */
+struct dpaa2_cmd *dpaa2_mcp_f(struct dpaa2_cmd *cmd, uint16_t flags);
+
+#endif /* _DPAA2_MCP_H */
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
new file mode 100644
index 000000000000..f62d6c1b6f29
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -0,0 +1,3670 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2022 Mathew McBride
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Network Interface (DPNI) driver.
+ *
+ * The DPNI object is a network interface that is configurable to support a wide
+ * range of features from a very basic Ethernet interface up to a
+ * high-functioning network interface. The DPNI supports features that are
+ * expected by standard network stacks, from basic features to offloads.
+ *
+ * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
+ * functions are provided for standard network protocols (L2, L3, L4, etc.).
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/mbuf.h>
+#include <sys/taskqueue.h>
+#include <sys/sysctl.h>
+#include <sys/buf_ring.h>
+#include <sys/smp.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include <net/ethernet.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include <dev/mdio/mdio.h>
+
+#include "opt_acpi.h"
+#include "opt_platform.h"
+
+#include "pcib_if.h"
+#include "pci_if.h"
+#include "miibus_if.h"
+#include "memac_mdio_if.h"
+
+#include "dpaa2_types.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_mc_if.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_swp_if.h"
+#include "dpaa2_cmd_if.h"
+#include "dpaa2_ni.h"
+
+#define BIT(x) (1ul << (x))
+#define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Frame Dequeue Response status bits. */
+#define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0)
+
+#define ALIGN_UP(x, y) roundup2((x), (y))
+#define ALIGN_DOWN(x, y) rounddown2((x), (y))
+#define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE)
+
+#define DPNI_LOCK(__sc) do { \
+ mtx_assert(&(__sc)->lock, MA_NOTOWNED); \
+ mtx_lock(&(__sc)->lock); \
+} while (0)
+#define DPNI_UNLOCK(__sc) do { \
+ mtx_assert(&(__sc)->lock, MA_OWNED); \
+ mtx_unlock(&(__sc)->lock); \
+} while (0)
+
+#define TX_LOCK(__tx) do { \
+ mtx_assert(&(__tx)->lock, MA_NOTOWNED); \
+ mtx_lock(&(__tx)->lock); \
+} while (0)
+#define TX_UNLOCK(__tx) do { \
+ mtx_assert(&(__tx)->lock, MA_OWNED); \
+ mtx_unlock(&(__tx)->lock); \
+} while (0)
+
+#define DPAA2_TX_RING(sc, chan, tc) \
+ (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
+
+#define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */
+#define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
+#define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
+
+/* Default maximum frame length. */
+#define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN)
+
+/* Minimally supported version of the DPNI API. */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
+
+/* Rx/Tx buffers configuration. */
+#define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */
+#define BUF_ALIGN 64
+#define BUF_SWA_SIZE 64 /* SW annotation size */
+#define BUF_RX_HWA_SIZE 64 /* HW annotation size */
+#define BUF_TX_HWA_SIZE 128 /* HW annotation size */
+#define BUF_SIZE (MJUM9BYTES)
+#define BUF_MAXADDR_49BIT 0x1FFFFFFFFFFFFul
+#define BUF_MAXADDR (BUS_SPACE_MAXADDR)
+
+#define DPAA2_TX_BUFRING_SZ (4096u)
+#define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
+#define DPAA2_TX_SEG_SZ (4096u)
+#define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
+#define DPAA2_TX_SGT_SZ (512u) /* bytes */
+
+/* Size of a buffer to keep a QoS table key configuration. */
+#define ETH_QOS_KCFG_BUF_SIZE 256
+
+/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+/* Channel storage buffer configuration. */
+#define ETH_STORE_FRAMES 16u
+#define ETH_STORE_SIZE ((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
+#define ETH_STORE_ALIGN 64u
+
+/* Buffers layout options. */
+#define BUF_LOPT_TIMESTAMP 0x1
+#define BUF_LOPT_PARSER_RESULT 0x2
+#define BUF_LOPT_FRAME_STATUS 0x4
+#define BUF_LOPT_PRIV_DATA_SZ 0x8
+#define BUF_LOPT_DATA_ALIGN 0x10
+#define BUF_LOPT_DATA_HEAD_ROOM 0x20
+#define BUF_LOPT_DATA_TAIL_ROOM 0x40
+
+#define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
+#define DPAA2_NI_BUF_CHAN_MASK (0xFu)
+#define DPAA2_NI_BUF_CHAN_SHIFT (60)
+#define DPAA2_NI_BUF_IDX_MASK (0x7FFFu)
+#define DPAA2_NI_BUF_IDX_SHIFT (49)
+#define DPAA2_NI_TX_IDX_MASK (0x7u)
+#define DPAA2_NI_TX_IDX_SHIFT (57)
+#define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
+#define DPAA2_NI_TXBUF_IDX_SHIFT (49)
+
+#define DPAA2_NI_FD_FMT_MASK (0x3u)
+#define DPAA2_NI_FD_FMT_SHIFT (12)
+#define DPAA2_NI_FD_ERR_MASK (0xFFu)
+#define DPAA2_NI_FD_ERR_SHIFT (0)
+#define DPAA2_NI_FD_SL_MASK (0x1u)
+#define DPAA2_NI_FD_SL_SHIFT (14)
+#define DPAA2_NI_FD_LEN_MASK (0x3FFFFu)
+#define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
+
+/* Enables TCAM for Flow Steering and QoS look-ups. */
+#define DPNI_OPT_HAS_KEY_MASKING 0x10
+
+/* Unique IDs for the supported Rx classification header fields. */
+#define DPAA2_ETH_DIST_ETHDST BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
+#define DPAA2_ETH_DIST_VLAN BIT(3)
+#define DPAA2_ETH_DIST_IPSRC BIT(4)
+#define DPAA2_ETH_DIST_IPDST BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
+#define DPAA2_ETH_DIST_L4SRC BIT(7)
+#define DPAA2_ETH_DIST_L4DST BIT(8)
+#define DPAA2_ETH_DIST_ALL (~0ULL)
+
+/* L3-L4 network traffic flow hash options. */
+#define RXH_L2DA (1 << 1)
+#define RXH_VLAN (1 << 2)
+#define RXH_L3_PROTO (1 << 3)
+#define RXH_IP_SRC (1 << 4)
+#define RXH_IP_DST (1 << 5)
+#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
+#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_DISCARD (1 << 31)
+
+/* Default Rx hash options, set during attaching. */
+#define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
+
+/* DPAA2 Network Interface resource specification. */
+struct resource_spec dpaa2_ni_spec[] = {
+ /*
+ * DPMCP resources.
+ *
+ * NOTE: MC command portals (MCPs) are used to send commands to, and
+ * receive responses from, the MC firmware. One portal per DPNI.
+ */
+#define MCP_RES_NUM (1u)
+#define MCP_RID_OFF (0u)
+#define MCP_RID(rid) ((rid) + MCP_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ /*
+ * DPIO resources (software portals).
+ *
+ * NOTE: One per running core. While DPIOs are the source of data
+ * availability interrupts, the DPCONs are used to identify the
+ * network interface that has produced ingress data to that core.
+ */
+#define IO_RES_NUM (16u)
+#define IO_RID_OFF (MCP_RID_OFF + MCP_RES_NUM)
+#define IO_RID(rid) ((rid) + IO_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_IO, IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
+ { DPAA2_DEV_IO, IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ { DPAA2_DEV_IO, IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
+ /*
+ * DPBP resources (buffer pools).
+ *
+ * NOTE: One per network interface.
+ */
+#define BP_RES_NUM (1u)
+#define BP_RID_OFF (IO_RID_OFF + IO_RES_NUM)
+#define BP_RID(rid) ((rid) + BP_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_BP, BP_RID(0), RF_ACTIVE },
+ /*
+ * DPCON resources (channels).
+ *
+ * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
+ * distributed to.
+ * NOTE: Since it is necessary to distinguish between traffic from
+ * different network interfaces arriving on the same core, the
+ * DPCONs must be private to the DPNIs.
+ */
+#define CON_RES_NUM (16u)
+#define CON_RID_OFF (BP_RID_OFF + BP_RES_NUM)
+#define CON_RID(rid) ((rid) + CON_RID_OFF)
+ /* --- */
+ { DPAA2_DEV_CON, CON_RID(0), RF_ACTIVE },
+ { DPAA2_DEV_CON, CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
+ { DPAA2_DEV_CON, CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
+ /* --- */
+ RESOURCE_SPEC_END
+};
+
+/* Supported header fields for Rx hash distribution key */
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
+ {
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
+ .size = 6,
+ }, {
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
+ .size = 6,
+ }, {
+ /* This is the last ethertype field parsed:
+ * depending on frame format, it can be the MAC ethertype
+ * or the VLAN etype.
+ */
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
+ .size = 2,
+ }, {
+ /* VLAN header */
+ .rxnfc_field = RXH_VLAN,
+ .cls_prot = NET_PROT_VLAN,
+ .cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
+ .size = 2,
+ }, {
+ /* IP header */
+ .rxnfc_field = RXH_IP_SRC,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_IP_DST,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_L3_PROTO,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
+ .size = 1,
+ }, {
+ /* Using UDP ports, this is functionally equivalent to raw
+ * byte pairs from L4 header.
+ */
+ .rxnfc_field = RXH_L4_B_0_1,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
+ .size = 2,
+ }, {
+ .rxnfc_field = RXH_L4_B_2_3,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
+ .size = 2,
+ },
+};
+
+static struct dpni_stat {
+ int page;
+ int cnt;
+ char *name;
+ char *desc;
+} dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
+ /* PAGE, COUNTER, NAME, DESCRIPTION */
+ { 0, 0, "in_all_frames", "All accepted ingress frames" },
+ { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" },
+ { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" },
+ { 1, 0, "eg_all_frames", "All egress frames transmitted" },
+ { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" },
+ { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" },
+ { 2, 0, "in_filtered_frames", "All ingress frames discarded due to "
+ "filtering" },
+ { 2, 1, "in_discarded_frames", "All frames discarded due to errors" },
+ { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer "
+ "depletion in DPNI buffer pools" },
+};
+
+/* Device interface */
+static int dpaa2_ni_probe(device_t);
+static int dpaa2_ni_attach(device_t);
+static int dpaa2_ni_detach(device_t);
+
+/* DPAA2 network interface setup and configuration */
+static int dpaa2_ni_setup(device_t);
+static int dpaa2_ni_setup_channels(device_t);
+static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
+ enum dpaa2_ni_queue_type);
+static int dpaa2_ni_bind(device_t);
+static int dpaa2_ni_setup_rx_dist(device_t);
+static int dpaa2_ni_setup_irqs(device_t);
+static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
+static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
+static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
+static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
+static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
+
+/* Tx/Rx flow configuration */
+static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_cmd *,
+ struct dpaa2_ni_fq *);
+static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_cmd *,
+ struct dpaa2_ni_fq *);
+static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_cmd *,
+ struct dpaa2_ni_fq *);
+
+/* Configuration subroutines */
+static int dpaa2_ni_set_buf_layout(device_t, struct dpaa2_cmd *);
+static int dpaa2_ni_set_pause_frame(device_t, struct dpaa2_cmd *);
+static int dpaa2_ni_set_qos_table(device_t, struct dpaa2_cmd *);
+static int dpaa2_ni_set_mac_addr(device_t, struct dpaa2_cmd *, uint16_t,
+ uint16_t);
+static int dpaa2_ni_set_hash(device_t, uint64_t);
+static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
+
+/* Buffers and buffer pools */
+static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
+static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
+static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
+static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
+ struct dpaa2_ni_channel *);
+
+/* Frame descriptor routines */
+static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
+ struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
+static int dpaa2_ni_fd_err(struct dpaa2_fd *);
+static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
+static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *);
+static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *);
+static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *);
+static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *);
+static int dpaa2_ni_fd_format(struct dpaa2_fd *);
+static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
+static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
+
+/* Various subroutines */
+static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
+static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
+static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
+ struct dpaa2_dq **);
+
+/* Network interface routines */
+static void dpaa2_ni_init(void *);
+static int dpaa2_ni_transmit(struct ifnet *, struct mbuf *);
+static void dpaa2_ni_qflush(struct ifnet *);
+static int dpaa2_ni_ioctl(struct ifnet *, u_long, caddr_t);
+static int dpaa2_ni_update_mac_filters(struct ifnet *);
+static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
+
+/* Interrupt handlers */
+static void dpaa2_ni_intr(void *);
+
+/* MII handlers */
+static void dpaa2_ni_miibus_statchg(device_t);
+static int dpaa2_ni_media_change(struct ifnet *);
+static void dpaa2_ni_media_status(struct ifnet *, struct ifmediareq *);
+static void dpaa2_ni_media_tick(void *);
+
+/* DMA mapping callback */
+static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+
+/* Tx/Rx routines. */
+static void dpaa2_ni_poll(void *);
+static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
+ struct dpaa2_ni_tx_ring *, struct mbuf *);
+static void dpaa2_ni_bp_task(void *, int);
+
+/* Tx/Rx subroutines */
+static int dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
+ struct dpaa2_ni_fq **, uint32_t *);
+static int dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
+ struct dpaa2_fd *);
+static int dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
+ struct dpaa2_fd *);
+static int dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
+ struct dpaa2_fd *);
+
+/* sysctl(9) */
+static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
+static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
+static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
+
+static int
+dpaa2_ni_probe(device_t dev)
+{
+ /* DPNI device will be added by a parent resource container itself. */
+ device_set_desc(dev, "DPAA2 Network Interface");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_ni_attach(device_t dev)
+{
+ device_t pdev = device_get_parent(dev);
+ device_t child = dev;
+ device_t mcp_dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_devinfo *mcp_dinfo;
+ struct ifnet *ifp;
+ char tq_name[32];
+ int error;
+
+ sc->dev = dev;
+ sc->ifp = NULL;
+ sc->miibus = NULL;
+ sc->mii = NULL;
+ sc->media_status = 0;
+ sc->if_flags = 0;
+ sc->link_state = LINK_STATE_UNKNOWN;
+ sc->buf_align = 0;
+
+ /* For debug purposes only! */
+ sc->rx_anomaly_frames = 0;
+ sc->rx_single_buf_frames = 0;
+ sc->rx_sg_buf_frames = 0;
+ sc->rx_enq_rej_frames = 0;
+ sc->rx_ieoi_err_frames = 0;
+ sc->tx_single_buf_frames = 0;
+ sc->tx_sg_frames = 0;
+
+ DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
+ DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
+
+ sc->bp_dmat = NULL;
+ sc->st_dmat = NULL;
+ sc->rxd_dmat = NULL;
+ sc->qos_dmat = NULL;
+
+ sc->qos_kcfg.type = DPAA2_BUF_STORE;
+ sc->qos_kcfg.store.dmap = NULL;
+ sc->qos_kcfg.store.paddr = 0;
+ sc->qos_kcfg.store.vaddr = NULL;
+
+ sc->rxd_kcfg.type = DPAA2_BUF_STORE;
+ sc->rxd_kcfg.store.dmap = NULL;
+ sc->rxd_kcfg.store.paddr = 0;
+ sc->rxd_kcfg.store.vaddr = NULL;
+
+ sc->mac.dpmac_id = 0;
+ sc->mac.phy_dev = NULL;
+ memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
+
+ error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate resources: "
+ "error=%d\n", __func__, error);
+ return (ENXIO);
+ }
+
+ /* Obtain MC portal. */
+ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
+ mcp_dinfo = device_get_ivars(mcp_dev);
+ dinfo->portal = mcp_dinfo->portal;
+
+ mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
+
+ /* Allocate network interface */
+ ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "%s: failed to allocate network interface\n",
+ __func__);
+ return (ENXIO);
+ }
+ sc->ifp = ifp;
+ if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
+
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
+ ifp->if_init = dpaa2_ni_init;
+ ifp->if_ioctl = dpaa2_ni_ioctl;
+ ifp->if_transmit = dpaa2_ni_transmit;
+ ifp->if_qflush = dpaa2_ni_qflush;
+
+ ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /* Allocate a command to send to MC hardware. */
+ error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate dpaa2_cmd: "
+ "error=%d\n", __func__, error);
+ goto err_exit;
+ }
+
+ /* Open resource container and network interface object. */
+ error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
+ &sc->rc_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open resource container: "
+ "id=%d, error=%d\n", __func__, rcinfo->id, error);
+ goto err_free_cmd;
+ }
+ error = DPAA2_CMD_NI_OPEN(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->rc_token), dinfo->id, &sc->ni_token);
+ if (error) {
+ device_printf(dev, "%s: failed to open network interface: "
+ "id=%d, error=%d\n", __func__, dinfo->id, error);
+ goto err_close_rc;
+ }
+
+ /* Create a taskqueue thread to release new buffers to the pool. */
+ TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
+ bzero(tq_name, sizeof (tq_name));
+ snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
+ device_get_nameunit(dev));
+ sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
+ taskqueue_thread_enqueue, &sc->bp_taskq);
+ if (sc->bp_taskq == NULL) {
+ device_printf(dev, "%s: failed to allocate task queue: %s\n",
+ __func__, tq_name);
+ goto err_close_ni;
+ }
+ taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
+
+ error = dpaa2_ni_setup(dev);
+ if (error) {
+ device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
+ __func__, error);
+ goto err_close_ni;
+ }
+ error = dpaa2_ni_setup_channels(dev);
+ if (error) {
+ device_printf(dev, "%s: failed to setup QBMan channels: "
+ "error=%d\n", __func__, error);
+ goto err_close_ni;
+ }
+
+ error = dpaa2_ni_bind(dev);
+ if (error) {
+ device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
+ __func__, error);
+ goto err_close_ni;
+ }
+ error = dpaa2_ni_setup_irqs(dev);
+ if (error) {
+ device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
+ __func__, error);
+ goto err_close_ni;
+ }
+ error = dpaa2_ni_setup_sysctls(sc);
+ if (error) {
+ device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
+ __func__, error);
+ goto err_close_ni;
+ }
+
+ ether_ifattach(sc->ifp, sc->mac.addr);
+ callout_init(&sc->mii_callout, 0);
+
+ return (0);
+
+err_close_ni:
+ DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token));
+err_close_rc:
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+err_free_cmd:
+ dpaa2_mcp_free_command(sc->cmd);
+err_exit:
+ return (ENXIO);
+}
+
+static void
+dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
+{
+ struct dpaa2_ni_softc *sc = ifp->if_softc;
+
+ DPNI_LOCK(sc);
+ ifmr->ifm_count = 0;
+ ifmr->ifm_mask = 0;
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ ifmr->ifm_current = ifmr->ifm_active =
+ sc->fixed_ifmedia.ifm_cur->ifm_media;
+
+ /*
+ * In non-PHY usecases, we need to signal link state up, otherwise
+ * certain things requiring a link event (e.g async DHCP client) from
+ * devd do not happen.
+ */
+ if (ifp->if_link_state == LINK_STATE_UNKNOWN) {
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+
+ /*
+ * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
+ * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
+ * the MC firmware sets the status, instead of us telling the MC what
+ * it is.
+ */
+ DPNI_UNLOCK(sc);
+
+ return;
+}
+
+static void
+dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
+{
+ /*
+ * FIXME: When the DPNI is connected to a DPMAC, we can get the
+ * 'apparent' speed from it.
+ */
+ sc->fixed_link = true;
+
+ ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
+ dpaa2_ni_fixed_media_status);
+ ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
+}
+
+static int
+dpaa2_ni_detach(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+
+ DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token));
+ DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
+ dpaa2_mcp_free_command(sc->cmd);
+
+ sc->cmd = NULL;
+ sc->ni_token = 0;
+ sc->rc_token = 0;
+
+ return (0);
+}
+
+/**
+ * @brief Configure DPAA2 network interface object.
+ */
+static int
+dpaa2_ni_setup(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
+ struct dpaa2_cmd *cmd = sc->cmd;
+ uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
+ uint16_t rc_token = sc->rc_token;
+ uint16_t ni_token = sc->ni_token;
+ uint16_t mac_token;
+ struct dpaa2_mac_attr attr;
+ enum dpaa2_mac_link_type link_type;
+ uint32_t link;
+ int error;
+
+ /* Check if we can work with this DPNI object. */
+ error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, dpaa2_mcp_tk(cmd,
+ ni_token), &sc->api_major, &sc->api_minor);
+ if (error) {
+ device_printf(dev, "%s: failed to get DPNI API version\n",
+ __func__);
+ return (error);
+ }
+ if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
+ device_printf(dev, "%s: DPNI API version %u.%u not supported, "
+ "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
+ DPNI_VER_MAJOR, DPNI_VER_MINOR);
+ error = ENODEV;
+ return (error);
+ }
+
+ /* Reset the DPNI object. */
+ error = DPAA2_CMD_NI_RESET(dev, child, cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
+ __func__, dinfo->id);
+ return (error);
+ }
+
+ /* Obtain attributes of the DPNI object. */
+ error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, cmd, &sc->attr);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain DPNI attributes: "
+ "id=%d\n", __func__, dinfo->id);
+ return (error);
+ }
+ if (bootverbose) {
+ device_printf(dev, "options=0x%#x queues=%d tx_channels=%d "
+ "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
+ sc->attr.num.channels, sc->attr.wriop_ver);
+ device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
+ "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
+ sc->attr.num.cgs);
+ device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
+ "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
+ sc->attr.entries.qos, sc->attr.entries.fs);
+ device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
+ sc->attr.key_size.qos, sc->attr.key_size.fs);
+ }
+
+ /* Configure buffer layouts of the DPNI queues. */
+ error = dpaa2_ni_set_buf_layout(dev, cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to configure buffer layout\n",
+ __func__);
+ return (error);
+ }
+
+ /* Configure DMA resources. */
+ error = dpaa2_ni_setup_dma(sc);
+ if (error) {
+ device_printf(dev, "%s: failed to setup DMA\n", __func__);
+ return (error);
+ }
+
+ /* Setup link between DPNI and an object it's connected to. */
+ ep1_desc.obj_id = dinfo->id;
+ ep1_desc.if_id = 0; /* DPNI has the only endpoint */
+ ep1_desc.type = dinfo->dtype;
+
+ error = DPAA2_CMD_RC_GET_CONN(dev, child, dpaa2_mcp_tk(cmd, rc_token),
+ &ep1_desc, &ep2_desc, &link);
+ if (error)
+ device_printf(dev, "%s: failed to obtain an object DPNI is "
+ "connected to: error=%d\n", __func__, error);
+ else {
+ device_printf(dev, "connected to %s (id=%d)\n",
+ dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
+
+ error = dpaa2_ni_set_mac_addr(dev, cmd, rc_token, ni_token);
+ if (error)
+ device_printf(dev, "%s: failed to set MAC "
+ "address: error=%d\n", __func__, error);
+
+ if (ep2_desc.type == DPAA2_DEV_MAC) {
+ /*
+ * This is the simplest case when DPNI is connected to
+ * DPMAC directly.
+ */
+ sc->mac.dpmac_id = ep2_desc.obj_id;
+
+ link_type = DPAA2_MAC_LINK_TYPE_NONE;
+
+ /*
+ * Need to determine if DPMAC type is PHY (attached to
+ * conventional MII PHY) or FIXED (usually SFP/SerDes,
+ * link state managed by MC firmware).
+ */
+ error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
+ dpaa2_mcp_tk(sc->cmd, sc->rc_token),
+ sc->mac.dpmac_id, &mac_token);
+ /*
+ * Under VFIO, the DPMAC might be sitting in another
+ * container (DPRC) we don't have access to.
+ * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
+ * the case.
+ */
+ if (error) {
+ device_printf(dev, "%s: failed to open "
+ "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
+ sc->mac.dpmac_id);
+ link_type = DPAA2_MAC_LINK_TYPE_FIXED;
+ } else {
+ error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
+ sc->cmd, &attr);
+ if (error)
+ device_printf(dev, "%s: failed to get "
+ "DPMAC attributes: id=%d, "
+ "error=%d\n", __func__, dinfo->id,
+ error);
+ else
+ link_type = attr.link_type;
+ }
+ DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd,
+ mac_token));
+
+ if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
+ device_printf(dev, "connected DPMAC is in FIXED "
+ "mode\n");
+ dpaa2_ni_setup_fixed_link(sc);
+ } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
+ device_printf(dev, "connected DPMAC is in PHY "
+ "mode\n");
+ error = DPAA2_MC_GET_PHY_DEV(dev,
+ &sc->mac.phy_dev, sc->mac.dpmac_id);
+ if (error == 0) {
+ error = MEMAC_MDIO_SET_NI_DEV(
+ sc->mac.phy_dev, dev);
+ if (error != 0)
+ device_printf(dev, "%s: failed "
+ "to set dpni dev on memac "
+ "mdio dev %s: error=%d\n",
+ __func__,
+ device_get_nameunit(
+ sc->mac.phy_dev), error);
+ }
+ if (error == 0) {
+ error = MEMAC_MDIO_GET_PHY_LOC(
+ sc->mac.phy_dev, &sc->mac.phy_loc);
+ if (error == ENODEV)
+ error = 0;
+ if (error != 0)
+ device_printf(dev, "%s: failed "
+ "to get phy location from "
+ "memac mdio dev %s: error=%d\n",
+ __func__, device_get_nameunit(
+ sc->mac.phy_dev), error);
+ }
+ if (error == 0) {
+ error = mii_attach(sc->mac.phy_dev,
+ &sc->miibus, sc->ifp,
+ dpaa2_ni_media_change,
+ dpaa2_ni_media_status,
+ BMSR_DEFCAPMASK, sc->mac.phy_loc,
+ MII_OFFSET_ANY, 0);
+ if (error != 0)
+ device_printf(dev, "%s: failed "
+ "to attach to miibus: "
+ "error=%d\n",
+ __func__, error);
+ }
+ if (error == 0)
+ sc->mii = device_get_softc(sc->miibus);
+ } else {
+ device_printf(dev, "%s: DPMAC link type is not "
+ "supported\n", __func__);
+ }
+ } else if (ep2_desc.type == DPAA2_DEV_NI ||
+ ep2_desc.type == DPAA2_DEV_MUX ||
+ ep2_desc.type == DPAA2_DEV_SW) {
+ dpaa2_ni_setup_fixed_link(sc);
+ }
+ }
+
+ /* Select mode to enqueue frames. */
+ /* ... TBD ... */
+
+ /*
+ * Update link configuration to enable Rx/Tx pause frames support.
+ *
+ * NOTE: MC may generate an interrupt to the DPMAC and request changes
+ * in link configuration. It might be necessary to attach miibus
+ * and PHY before this point.
+ */
+ error = dpaa2_ni_set_pause_frame(dev, dpaa2_mcp_tk(cmd, ni_token));
+ if (error) {
+ device_printf(dev, "%s: failed to configure Rx/Tx pause "
+ "frames\n", __func__);
+ return (error);
+ }
+
+ /* Configure ingress traffic classification. */
+ error = dpaa2_ni_set_qos_table(dev, dpaa2_mcp_tk(cmd, ni_token));
+ if (error)
+ device_printf(dev, "%s: failed to configure QoS table: "
+ "error=%d\n", __func__, error);
+
+ /* Add broadcast physical address to the MAC filtering table. */
+ memset(eth_bca, 0xff, ETHER_ADDR_LEN);
+ error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, cmd, eth_bca);
+ if (error) {
+ device_printf(dev, "%s: failed to add broadcast physical "
+ "address to the MAC filtering table\n", __func__);
+ return (error);
+ }
+
+ /* Set the maximum allowed length for received frames. */
+ error = DPAA2_CMD_NI_SET_MFL(dev, child, cmd, DPAA2_ETH_MFL);
+ if (error) {
+ device_printf(dev, "%s: failed to set maximum length for "
+ "received frames\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Сonfigure QBMan channels and register data availability notifications.
+ */
+static int
+dpaa2_ni_setup_channels(device_t dev)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_con_softc *consc;
+ struct dpaa2_devinfo *io_info, *con_info;
+ device_t io_dev, con_dev, child = dev;
+ struct dpaa2_ni_channel *channel;
+ struct dpaa2_io_notif_ctx *ctx;
+ struct dpaa2_con_notif_cfg notif_cfg;
+ struct dpaa2_buf *buf;
+ int error;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *node;
+ struct sysctl_oid_list *parent;
+ uint32_t i, num_chan;
+
+ /* Calculate number of the channels based on the allocated resources. */
+ for (i = 0; i < IO_RES_NUM; i++)
+ if (!sc->res[IO_RID(i)])
+ break;
+ num_chan = i;
+ for (i = 0; i < CON_RES_NUM; i++)
+ if (!sc->res[CON_RID(i)])
+ break;
+ num_chan = i < num_chan ? i : num_chan;
+
+ /* Limit maximum channels. */
+ sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
+ ? DPAA2_NI_MAX_CHANNELS : num_chan;
+
+ /* Limit channels by number of the queues. */
+ sc->chan_n = sc->chan_n > sc->attr.num.queues
+ ? sc->attr.num.queues : sc->chan_n;
+
+ device_printf(dev, "channels=%d\n", sc->chan_n);
+
+ sysctl_ctx = device_get_sysctl_ctx(sc->dev);
+ parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
+
+ node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
+ parent = SYSCTL_CHILDREN(node);
+
+ /* Setup channels for the portal. */
+ for (uint32_t i = 0; i < sc->chan_n; i++) {
+ /* Select software portal. */
+ io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
+ io_info = device_get_ivars(io_dev);
+
+ /* Select DPCON (channel). */
+ con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
+ consc = device_get_softc(con_dev);
+ con_info = device_get_ivars(con_dev);
+
+ /* Enable selected channel. */
+ error = DPAA2_CMD_CON_ENABLE(dev, child, dpaa2_mcp_tk(consc->cmd,
+ consc->con_token));
+ if (error) {
+ device_printf(dev, "%s: failed to enable channel: "
+ "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
+ consc->attr.chan_id);
+ return (error);
+ }
+
+ channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
+ M_WAITOK | M_ZERO);
+ if (!channel) {
+ device_printf(dev, "%s: failed to allocate a channel\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ sc->channels[i] = channel;
+
+ channel->id = consc->attr.chan_id;
+ channel->flowid = i;
+ channel->ni_dev = dev;
+ channel->io_dev = io_dev;
+ channel->con_dev = con_dev;
+ channel->recycled_n = 0;
+
+ buf = &channel->store;
+ buf->type = DPAA2_BUF_STORE;
+ buf->store.dmat = NULL;
+ buf->store.dmap = NULL;
+ buf->store.paddr = 0;
+ buf->store.vaddr = NULL;
+
+ /* For debug purposes only! */
+ channel->tx_frames = 0;
+ channel->tx_dropped = 0;
+
+ /* None of the frame queues for this channel configured yet. */
+ channel->rxq_n = 0;
+
+ /* Setup WQ channel notification context. */
+ ctx = &channel->ctx;
+ ctx->qman_ctx = (uint64_t) ctx;
+ ctx->cdan_en = true;
+ ctx->fq_chan_id = channel->id;
+ ctx->io_dev = channel->io_dev;
+ ctx->channel = channel;
+ ctx->poll = dpaa2_ni_poll;
+
+ /* Register the new notification context. */
+ error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
+ if (error) {
+ device_printf(dev, "%s: failed to register notification "
+ "context\n", __func__);
+ return (error);
+ }
+
+ /* Register DPCON notification with Management Complex. */
+ notif_cfg.dpio_id = io_info->id;
+ notif_cfg.prior = 0;
+ notif_cfg.qman_ctx = ctx->qman_ctx;
+ error = DPAA2_CMD_CON_SET_NOTIF(dev, child, dpaa2_mcp_tk(
+ consc->cmd, consc->con_token), &notif_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to set DPCON "
+ "notification: dpcon_id=%d, chan_id=%d\n", __func__,
+ con_info->id, consc->attr.chan_id);
+ return (error);
+ }
+
+ /* Allocate initial # of Rx buffers and a channel storage. */
+ error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
+ if (error) {
+ device_printf(dev, "%s: failed to seed buffer pool\n",
+ __func__);
+ return (error);
+ }
+ error = dpaa2_ni_seed_chan_storage(sc, channel);
+ if (error) {
+ device_printf(dev, "%s: failed to seed channel "
+ "storage\n", __func__);
+ return (error);
+ }
+
+ /* Prepare queues for this channel. */
+ error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
+ if (error) {
+ device_printf(dev, "%s: failed to prepare TxConf "
+ "queue: error=%d\n", __func__, error);
+ return (error);
+ }
+ error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
+ if (error) {
+ device_printf(dev, "%s: failed to prepare Rx queue: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ if (bootverbose)
+ device_printf(dev, "channel: dpio_id=%d "
+ "dpcon_id=%d chan_id=%d, priorities=%d\n",
+ io_info->id, con_info->id, channel->id,
+ consc->attr.prior_num);
+ }
+
+ /* There is exactly one Rx error queue per DPNI. */
+ error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
+ if (error) {
+ device_printf(dev, "%s: failed to prepare RxError queue: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Performs an initial configuration of the frame queues.
+ */
+static int
+dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
+ enum dpaa2_ni_queue_type queue_type)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_ni_fq *fq;
+
+ switch (queue_type) {
+ case DPAA2_NI_QUEUE_TX_CONF:
+ /* One queue per channel. */
+ fq = &chan->txc_queue;
+
+ fq->consume = dpaa2_ni_tx_conf;
+ fq->chan = chan;
+ fq->flowid = chan->flowid;
+ fq->tc = 0; /* ignored */
+ fq->type = queue_type;
+
+ break;
+ case DPAA2_NI_QUEUE_RX:
+ KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
+ ("too many Rx traffic classes: rx_tcs=%d\n",
+ sc->attr.num.rx_tcs));
+
+ /* One queue per Rx traffic class within a channel. */
+ for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
+ fq = &chan->rx_queues[i];
+
+ fq->consume = dpaa2_ni_rx;
+ fq->chan = chan;
+ fq->flowid = chan->flowid;
+ fq->tc = (uint8_t) i;
+ fq->type = queue_type;
+
+ chan->rxq_n++;
+ }
+ break;
+ case DPAA2_NI_QUEUE_RX_ERR:
+ /* One queue per network interface. */
+ fq = &sc->rxe_queue;
+
+ fq->consume = dpaa2_ni_rx_err;
+ fq->chan = chan;
+ fq->flowid = 0; /* ignored */
+ fq->tc = 0; /* ignored */
+ fq->type = queue_type;
+ break;
+ default:
+ device_printf(dev, "%s: unexpected frame queue type: %d\n",
+ __func__, queue_type);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
+ */
+static int
+dpaa2_ni_bind(device_t dev)
+{
+ device_t bp_dev, child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *bp_info;
+ struct dpaa2_cmd *cmd = sc->cmd;
+ struct dpaa2_ni_pools_cfg pools_cfg;
+ struct dpaa2_ni_err_cfg err_cfg;
+ struct dpaa2_ni_channel *chan;
+ uint16_t ni_token = sc->ni_token;
+ int error;
+
+ /* Select buffer pool (only one available at the moment). */
+ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
+ bp_info = device_get_ivars(bp_dev);
+
+ /* Configure buffers pool. */
+ pools_cfg.pools_num = 1;
+ pools_cfg.pools[0].bp_obj_id = bp_info->id;
+ pools_cfg.pools[0].backup_flag = 0;
+ pools_cfg.pools[0].buf_sz = sc->buf_sz;
+ error = DPAA2_CMD_NI_SET_POOLS(dev, child, dpaa2_mcp_tk(cmd, ni_token),
+ &pools_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to set buffer pools\n", __func__);
+ return (error);
+ }
+
+ /* Setup ingress traffic distribution. */
+ error = dpaa2_ni_setup_rx_dist(dev);
+ if (error && error != EOPNOTSUPP) {
+ device_printf(dev, "%s: failed to setup ingress traffic "
+ "distribution\n", __func__);
+ return (error);
+ }
+ if (bootverbose && error == EOPNOTSUPP)
+ device_printf(dev, "Ingress traffic distribution not "
+ "supported\n");
+
+ /* Configure handling of error frames. */
+ err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
+ err_cfg.set_err_fas = false;
+ err_cfg.action = DPAA2_NI_ERR_DISCARD;
+ error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, cmd, &err_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to set errors behavior\n",
+ __func__);
+ return (error);
+ }
+
+ /* Configure channel queues to generate CDANs. */
+ for (uint32_t i = 0; i < sc->chan_n; i++) {
+ chan = sc->channels[i];
+
+ /* Setup Rx flows. */
+ for (uint32_t j = 0; j < chan->rxq_n; j++) {
+ error = dpaa2_ni_setup_rx_flow(dev, cmd,
+ &chan->rx_queues[j]);
+ if (error) {
+ device_printf(dev, "%s: failed to setup Rx "
+ "flow: error=%d\n", __func__, error);
+ return (error);
+ }
+ }
+
+ /* Setup Tx flow. */
+ error = dpaa2_ni_setup_tx_flow(dev, cmd, &chan->txc_queue);
+ if (error) {
+ device_printf(dev, "%s: failed to setup Tx "
+ "flow: error=%d\n", __func__, error);
+ return (error);
+ }
+ }
+
+ /* Configure RxError queue to generate CDAN. */
+ error = dpaa2_ni_setup_rx_err_flow(dev, cmd, &sc->rxe_queue);
+ if (error) {
+ device_printf(dev, "%s: failed to setup RxError flow: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ /*
+ * Get the Queuing Destination ID (QDID) that should be used for frame
+ * enqueue operations.
+ */
+ error = DPAA2_CMD_NI_GET_QDID(dev, child, cmd, DPAA2_NI_QUEUE_TX,
+ &sc->tx_qdid);
+ if (error) {
+ device_printf(dev, "%s: failed to get Tx queuing destination "
+ "ID\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Setup ingress traffic distribution.
+ *
+ * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
+ * hasn't been set for DPNI and a number of DPNI queues > 1.
+ */
+static int
+dpaa2_ni_setup_rx_dist(device_t dev)
+{
+ /*
+ * Have the interface implicitly distribute traffic based on the default
+ * hash key.
+ */
+ return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
+}
+
+static int
+dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_fq *fq)
+{
+ device_t child = dev;
+ struct dpaa2_devinfo *con_info;
+ struct dpaa2_ni_queue_cfg queue_cfg = {0};
+ int error;
+
+ /* Obtain DPCON associated with the FQ's channel. */
+ con_info = device_get_ivars(fq->chan->con_dev);
+
+ queue_cfg.type = DPAA2_NI_QUEUE_RX;
+ queue_cfg.tc = fq->tc;
+ queue_cfg.idx = fq->flowid;
+ error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain Rx queue "
+ "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
+ queue_cfg.idx);
+ return (error);
+ }
+
+ fq->fqid = queue_cfg.fqid;
+
+ queue_cfg.dest_id = con_info->id;
+ queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
+ queue_cfg.priority = 1;
+ queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
+ queue_cfg.options =
+ DPAA2_NI_QUEUE_OPT_USER_CTX |
+ DPAA2_NI_QUEUE_OPT_DEST;
+ error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to update Rx queue "
+ "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
+ queue_cfg.idx);
+ return (error);
+ }
+
+ if (bootverbose) {
+ device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
+ "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
+ fq->fqid, (uint64_t) fq);
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_fq *fq)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_devinfo *con_info;
+ struct dpaa2_ni_queue_cfg queue_cfg = {0};
+ struct dpaa2_ni_tx_ring *tx;
+ struct dpaa2_buf *buf;
+ uint32_t tx_rings_n = 0;
+ int error;
+
+ /* Obtain DPCON associated with the FQ's channel. */
+ con_info = device_get_ivars(fq->chan->con_dev);
+
+ KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
+ ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
+ sc->attr.num.tx_tcs));
+ KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
+ ("%s: too many Tx buffers (%d): max=%d\n", __func__,
+ DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
+
+ /* Setup Tx rings. */
+ for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
+ queue_cfg.type = DPAA2_NI_QUEUE_TX;
+ queue_cfg.tc = i;
+ queue_cfg.idx = fq->flowid;
+ queue_cfg.chan_id = fq->chan->id;
+
+ error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain Tx queue "
+ "configuration: tc=%d, flowid=%d\n", __func__,
+ queue_cfg.tc, queue_cfg.idx);
+ return (error);
+ }
+
+ tx = &fq->tx_rings[i];
+ tx->fq = fq;
+ tx->fqid = queue_cfg.fqid;
+ tx->txid = tx_rings_n;
+
+ if (bootverbose) {
+ device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
+ "fqid=%d\n", fq->flowid, i, fq->chan->id,
+ queue_cfg.fqid);
+ }
+
+ mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
+
+ /* Allocate Tx ring buffer. */
+ tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
+ M_NOWAIT, &tx->lock);
+ if (tx->idx_br == NULL) {
+ device_printf(dev, "%s: failed to setup Tx ring buffer"
+ " (2) fqid=%d\n", __func__, tx->fqid);
+ return (ENOMEM);
+ }
+
+ /* Configure Tx buffers. */
+ for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
+ buf = &tx->buf[j];
+ buf->type = DPAA2_BUF_TX;
+ buf->tx.dmat = buf->tx.sgt_dmat = NULL;
+ buf->tx.dmap = buf->tx.sgt_dmap = NULL;
+ buf->tx.paddr = buf->tx.sgt_paddr = 0;
+ buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
+ buf->tx.m = NULL;
+ buf->tx.idx = 0;
+
+ error = dpaa2_ni_seed_txbuf(sc, buf, j);
+
+ /* Add index of the Tx buffer to the ring. */
+ buf_ring_enqueue(tx->idx_br, (void *) j);
+ }
+
+ tx_rings_n++;
+ }
+
+ /* All Tx queues which belong to the same flowid have the same qdbin. */
+ fq->tx_qdbin = queue_cfg.qdbin;
+
+ queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
+ queue_cfg.tc = 0; /* ignored for TxConf queue */
+ queue_cfg.idx = fq->flowid;
+ error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain TxConf queue "
+ "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
+ queue_cfg.idx);
+ return (error);
+ }
+
+ fq->fqid = queue_cfg.fqid;
+
+ queue_cfg.dest_id = con_info->id;
+ queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
+ queue_cfg.priority = 0;
+ queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
+ queue_cfg.options =
+ DPAA2_NI_QUEUE_OPT_USER_CTX |
+ DPAA2_NI_QUEUE_OPT_DEST;
+ error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to update TxConf queue "
+ "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
+ queue_cfg.idx);
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_fq *fq)
+{
+ device_t child = dev;
+ struct dpaa2_devinfo *con_info;
+ struct dpaa2_ni_queue_cfg queue_cfg = {0};
+ int error;
+
+ /* Obtain DPCON associated with the FQ's channel. */
+ con_info = device_get_ivars(fq->chan->con_dev);
+
+ queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
+ queue_cfg.tc = fq->tc; /* ignored */
+ queue_cfg.idx = fq->flowid; /* ignored */
+ error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain RxErr queue "
+ "configuration\n", __func__);
+ return (error);
+ }
+
+ fq->fqid = queue_cfg.fqid;
+
+ queue_cfg.dest_id = con_info->id;
+ queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
+ queue_cfg.priority = 1;
+ queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
+ queue_cfg.options =
+ DPAA2_NI_QUEUE_OPT_USER_CTX |
+ DPAA2_NI_QUEUE_OPT_DEST;
+ error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to update RxErr queue "
+ "configuration\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Configure DPNI object to generate interrupts.
+ */
+static int
+dpaa2_ni_setup_irqs(device_t dev)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_cmd *cmd = sc->cmd;
+ uint16_t ni_token = sc->ni_token;
+ int error;
+
+ /* Configure IRQs. */
+ error = dpaa2_ni_setup_msi(sc);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate MSI\n", __func__);
+ return (error);
+ }
+ if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
+ device_printf(dev, "%s: failed to allocate IRQ resource\n",
+ __func__);
+ return (ENXIO);
+ }
+ if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, dpaa2_ni_intr, sc, &sc->intr)) {
+ device_printf(dev, "%s: failed to setup IRQ resource\n",
+ __func__);
+ return (ENXIO);
+ }
+
+ /* Configure DPNI to generate interrupts. */
+ error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, dpaa2_mcp_tk(cmd,
+ ni_token), DPNI_IRQ_INDEX,
+ DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
+ if (error) {
+ device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
+ __func__);
+ return (error);
+ }
+
+ /* Enable IRQ. */
+ error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, cmd, DPNI_IRQ_INDEX,
+ true);
+ if (error) {
+ device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Allocate MSI interrupts for DPNI.
+ */
+static int
+dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
+{
+ int val;
+
+ val = pci_msi_count(sc->dev);
+ if (val < DPAA2_NI_MSI_COUNT)
+ device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
+ DPAA2_IO_MSI_COUNT);
+ val = MIN(val, DPAA2_NI_MSI_COUNT);
+
+ if (pci_alloc_msi(sc->dev, &val) != 0)
+ return (EINVAL);
+
+ for (int i = 0; i < val; i++)
+ sc->irq_rid[i] = i + 1;
+
+ return (0);
+}
+
+/**
+ * @brief Update DPNI according to the updated interface capabilities.
+ */
+static int
+dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
+{
+ const bool en_rxcsum = sc->ifp->if_capenable & IFCAP_RXCSUM;
+ const bool en_txcsum = sc->ifp->if_capenable & IFCAP_TXCSUM;
+ device_t dev = sc->dev;
+ device_t child = dev;
+ int error;
+
+ /* Setup checksums validation. */
+ error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->ni_token), DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
+ if (error) {
+ device_printf(dev, "%s: failed to %s L3 checksum validation\n",
+ __func__, en_rxcsum ? "enable" : "disable");
+ return (error);
+ }
+ error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
+ DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
+ if (error) {
+ device_printf(dev, "%s: failed to %s L4 checksum validation\n",
+ __func__, en_rxcsum ? "enable" : "disable");
+ return (error);
+ }
+
+ /* Setup checksums generation. */
+ error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
+ DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
+ if (error) {
+ device_printf(dev, "%s: failed to %s L3 checksum generation\n",
+ __func__, en_txcsum ? "enable" : "disable");
+ return (error);
+ }
+ error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
+ DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
+ if (error) {
+ device_printf(dev, "%s: failed to %s L4 checksum generation\n",
+ __func__, en_txcsum ? "enable" : "disable");
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Update DPNI according to the updated interface flags.
+ */
+static int
+dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
+{
+ const bool en_promisc = sc->ifp->if_flags & IFF_PROMISC;
+ const bool en_allmulti = sc->ifp->if_flags & IFF_ALLMULTI;
+ device_t dev = sc->dev;
+ device_t child = dev;
+ int error;
+
+ error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->ni_token), en_promisc ? true : en_allmulti);
+ if (error) {
+ device_printf(dev, "%s: failed to %s multicast promiscuous "
+ "mode\n", __func__, en_allmulti ? "enable" : "disable");
+ return (error);
+ }
+
+ error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, sc->cmd, en_promisc);
+ if (error) {
+ device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
+ __func__, en_promisc ? "enable" : "disable");
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *node, *node2;
+ struct sysctl_oid_list *parent, *parent2;
+ char cbuf[128];
+ int i;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
+
+ /* Add DPNI statistics. */
+ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
+ parent = SYSCTL_CHILDREN(node);
+ for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
+ SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
+ CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
+ "IU", dpni_stat_sysctls[i].desc);
+ }
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
+ CTLFLAG_RD, &sc->rx_anomaly_frames,
+ "Rx frames in the buffers outside of the buffer pools");
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
+ CTLFLAG_RD, &sc->rx_single_buf_frames,
+ "Rx frames in single buffers");
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
+ CTLFLAG_RD, &sc->rx_sg_buf_frames,
+ "Rx frames in scatter/gather list");
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
+ CTLFLAG_RD, &sc->rx_enq_rej_frames,
+ "Enqueue rejected by QMan");
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
+ CTLFLAG_RD, &sc->rx_ieoi_err_frames,
+ "QMan IEOI error");
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
+ CTLFLAG_RD, &sc->tx_single_buf_frames,
+ "Tx single buffer frames");
+ SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
+ CTLFLAG_RD, &sc->tx_sg_frames,
+ "Tx S/G frames");
+
+ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
+ CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
+ "IU", "number of Rx buffers in the buffer pool");
+ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
+ CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
+ "IU", "number of free Rx buffers in the buffer pool");
+
+ parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
+
+ /* Add channels statistics. */
+ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
+ parent = SYSCTL_CHILDREN(node);
+ for (int i = 0; i < sc->chan_n; i++) {
+ snprintf(cbuf, sizeof(cbuf), "%d", i);
+
+ node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
+ parent2 = SYSCTL_CHILDREN(node2);
+
+ SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
+ CTLFLAG_RD, &sc->channels[i]->tx_frames,
+ "Tx frames counter");
+ SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
+ CTLFLAG_RD, &sc->channels[i]->tx_dropped,
+ "Tx dropped counter");
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
+{
+ device_t dev = sc->dev;
+ int error;
+
+ KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
+ ("unexpected buffer alignment: %d\n", sc->buf_align));
+
+ /*
+ * DMA tag to allocate buffers for buffer pool.
+ *
+ * NOTE: QBMan supports DMA addresses up to 49-bits maximum.
+ * Bits 63-49 are not used by QBMan.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev),
+ sc->buf_align, 0, /* alignment, boundary */
+ BUF_MAXADDR_49BIT, /* low restricted addr */
+ BUF_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ BUF_SIZE, 1, /* maxsize, nsegments */
+ BUF_SIZE, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->bp_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create DMA tag for buffer "
+ "pool\n", __func__);
+ return (error);
+ }
+
+ /* DMA tag to map Tx mbufs. */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev),
+ sc->buf_align, 0, /* alignment, boundary */
+ BUF_MAXADDR_49BIT, /* low restricted addr */
+ BUF_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ DPAA2_TX_SEGS_MAXSZ, /* maxsize */
+ DPAA2_TX_SEGLIMIT, /* nsegments */
+ DPAA2_TX_SEG_SZ, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->tx_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create DMA tag for Tx "
+ "buffers\n", __func__);
+ return (error);
+ }
+
+ /* DMA tag to allocate channel storage. */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev),
+ ETH_STORE_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ ETH_STORE_SIZE, 1, /* maxsize, nsegments */
+ ETH_STORE_SIZE, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->st_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create DMA tag for channel "
+ "storage\n", __func__);
+ return (error);
+ }
+
+ /* DMA tag for Rx distribution key. */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev),
+ PAGE_SIZE, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
+ DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rxd_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create DMA tag for Rx "
+ "distribution key\n", __func__);
+ return (error);
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev),
+ PAGE_SIZE, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */
+ ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->qos_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
+ __func__);
+ return (error);
+ }
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev),
+ PAGE_SIZE, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* low restricted addr */
+ BUS_SPACE_MAXADDR, /* high restricted addr */
+ NULL, NULL, /* filter, filterarg */
+ DPAA2_TX_SGT_SZ, 1, /* maxsize, nsegments */
+ DPAA2_TX_SGT_SZ, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sgt_dmat);
+ if (error) {
+ device_printf(dev, "%s: failed to create DMA tag for S/G "
+ "tables\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Configure buffer layouts of the different DPNI queues.
+ */
+static int
+dpaa2_ni_set_buf_layout(device_t dev, struct dpaa2_cmd *cmd)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_ni_buf_layout buf_layout = {0};
+ int error;
+
+ /*
+ * Select Rx/Tx buffer alignment. It's necessary to ensure that the
+ * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
+ * on the WRIOP version.
+ */
+ sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
+ sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
+ ? BUF_ALIGN_V1 : BUF_ALIGN;
+
+ /*
+ * We need to ensure that the buffer size seen by WRIOP is a multiple
+ * of 64 or 256 bytes depending on the WRIOP version.
+ */
+ sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
+
+ if (bootverbose)
+ device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
+ sc->buf_sz, sc->buf_align);
+
+ /*
+ * Frame Descriptor Tx buffer layout
+ *
+ * ADDR -> |---------------------|
+ * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
+ * |---------------------|
+ * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
+ * |---------------------|
+ * | DATA HEADROOM |
+ * ADDR + OFFSET -> |---------------------|
+ * | |
+ * | |
+ * | FRAME DATA |
+ * | |
+ * | |
+ * |---------------------|
+ * | DATA TAILROOM |
+ * |---------------------|
+ *
+ * NOTE: It's for a single buffer frame only.
+ */
+ buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
+ buf_layout.pd_size = BUF_SWA_SIZE;
+ buf_layout.pass_timestamp = true;
+ buf_layout.pass_frame_status = true;
+ buf_layout.options =
+ BUF_LOPT_PRIV_DATA_SZ |
+ BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
+ BUF_LOPT_FRAME_STATUS;
+ error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
+ if (error) {
+ device_printf(dev, "%s: failed to set Tx buffer layout\n",
+ __func__);
+ return (error);
+ }
+
+ /* Tx-confirmation buffer layout */
+ buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
+ buf_layout.options =
+ BUF_LOPT_TIMESTAMP |
+ BUF_LOPT_FRAME_STATUS;
+ error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
+ if (error) {
+ device_printf(dev, "%s: failed to set TxConf buffer layout\n",
+ __func__);
+ return (error);
+ }
+
+ /*
+ * Driver should reserve the amount of space indicated by this command
+ * as headroom in all Tx frames.
+ */
+ error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, cmd, &sc->tx_data_off);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain Tx data offset\n",
+ __func__);
+ return (error);
+ }
+
+ if (bootverbose)
+ device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
+ if ((sc->tx_data_off % 64) != 0)
+ device_printf(dev, "Tx data offset (%d) is not a multiplication "
+ "of 64 bytes\n", sc->tx_data_off);
+
+ /*
+ * Frame Descriptor Rx buffer layout
+ *
+ * ADDR -> |---------------------|
+ * | SW FRAME ANNOTATION | 0 bytes
+ * |---------------------|
+ * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
+ * |---------------------|
+ * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE
+ * ADDR + OFFSET -> |---------------------|
+ * | |
+ * | |
+ * | FRAME DATA |
+ * | |
+ * | |
+ * |---------------------|
+ * | DATA TAILROOM | 0 bytes
+ * |---------------------|
+ *
+ * NOTE: It's for a single buffer frame only.
+ */
+ buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
+ buf_layout.pd_size = 0;
+ buf_layout.fd_align = sc->buf_align;
+ buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE;
+ buf_layout.tail_size = 0;
+ buf_layout.pass_frame_status = true;
+ buf_layout.pass_parser_result = true;
+ buf_layout.pass_timestamp = true;
+ buf_layout.options =
+ BUF_LOPT_PRIV_DATA_SZ |
+ BUF_LOPT_DATA_ALIGN |
+ BUF_LOPT_DATA_HEAD_ROOM |
+ BUF_LOPT_DATA_TAIL_ROOM |
+ BUF_LOPT_FRAME_STATUS |
+ BUF_LOPT_PARSER_RESULT |
+ BUF_LOPT_TIMESTAMP;
+ error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
+ if (error) {
+ device_printf(dev, "%s: failed to set Rx buffer layout\n",
+ __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Enable Rx/Tx pause frames.
+ *
+ * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
+ * itself generates pause frames (Tx frame).
+ */
+static int
+dpaa2_ni_set_pause_frame(device_t dev, struct dpaa2_cmd *cmd)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_ni_link_cfg link_cfg = {0};
+ int error;
+
+ error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, cmd, &link_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain link configuration: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ /* Enable both Rx and Tx pause frames by default. */
+ link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
+ link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
+
+ error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, cmd, &link_cfg);
+ if (error) {
+ device_printf(dev, "%s: failed to set link configuration: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ sc->link_options = link_cfg.options;
+
+ return (0);
+}
+
+/**
+ * @brief Configure QoS table to determine the traffic class for the received
+ * frame.
+ */
+static int
+dpaa2_ni_set_qos_table(device_t dev, struct dpaa2_cmd *cmd)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpaa2_ni_qos_table tbl;
+ struct dpaa2_buf *buf = &sc->qos_kcfg;
+ int error;
+
+ if (sc->attr.num.rx_tcs == 1 ||
+ !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ if (bootverbose)
+ device_printf(dev, "Ingress traffic classification is "
+ "not supported\n");
+ return (0);
+ }
+
+ /*
+ * Allocate a buffer visible to the device to hold the QoS table key
+ * configuration.
+ */
+ KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
+ __func__));
+ if (__predict_true(buf->store.dmat == NULL))
+ buf->store.dmat = sc->qos_dmat;
+
+ error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
+ if (error) {
+ device_printf(dev, "%s: failed to allocate a buffer for QoS key "
+ "configuration\n", __func__);
+ return (error);
+ }
+
+ error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
+ buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
+ &buf->store.paddr, BUS_DMA_NOWAIT);
+ if (error) {
+ device_printf(dev, "%s: failed to map QoS key configuration "
+ "buffer into bus space\n", __func__);
+ return (error);
+ }
+
+ tbl.default_tc = 0;
+ tbl.discard_on_miss = false;
+ tbl.keep_entries = false;
+ tbl.kcfg_busaddr = buf->store.paddr;
+ error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, cmd, &tbl);
+ if (error) {
+ device_printf(dev, "%s: failed to set QoS table\n", __func__);
+ return (error);
+ }
+
+ error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, cmd);
+ if (error) {
+ device_printf(dev, "%s: failed to clear QoS table\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_ni_set_mac_addr(device_t dev, struct dpaa2_cmd *cmd, uint16_t rc_token,
+ uint16_t ni_token)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct ifnet *ifp = sc->ifp;
+ struct ether_addr rnd_mac_addr;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
+ int error;
+
+ /*
+ * Get the MAC address associated with the physical port, if the DPNI is
+ * connected to a DPMAC directly associated with one of the physical
+ * ports.
+ */
+ error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, dpaa2_mcp_tk(cmd,
+ ni_token), mac_addr);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain the MAC address "
+ "associated with the physical port\n", __func__);
+ return (error);
+ }
+
+ /* Get primary MAC address from the DPNI attributes. */
+ error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, cmd, dpni_mac_addr);
+ if (error) {
+ device_printf(dev, "%s: failed to obtain primary MAC address\n",
+ __func__);
+ return (error);
+ }
+
+ if (!ETHER_IS_ZERO(mac_addr)) {
+ /* Set MAC address of the physical port as DPNI's primary one. */
+ error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd,
+ mac_addr);
+ if (error) {
+ device_printf(dev, "%s: failed to set primary MAC "
+ "address\n", __func__);
+ return (error);
+ }
+ for (int i = 0; i < ETHER_ADDR_LEN; i++)
+ sc->mac.addr[i] = mac_addr[i];
+ } else if (ETHER_IS_ZERO(dpni_mac_addr)) {
+ /* Generate random MAC address as DPNI's primary one. */
+ ether_gen_addr(ifp, &rnd_mac_addr);
+ for (int i = 0; i < ETHER_ADDR_LEN; i++)
+ mac_addr[i] = rnd_mac_addr.octet[i];
+
+ error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd,
+ mac_addr);
+ if (error) {
+ device_printf(dev, "%s: failed to set random primary "
+ "MAC address\n", __func__);
+ return (error);
+ }
+ for (int i = 0; i < ETHER_ADDR_LEN; i++)
+ sc->mac.addr[i] = mac_addr[i];
+ } else {
+ for (int i = 0; i < ETHER_ADDR_LEN; i++)
+ sc->mac.addr[i] = dpni_mac_addr[i];
+ }
+
+ return (0);
+}
+
+static void
+dpaa2_ni_miibus_statchg(device_t dev)
+{
+ struct dpaa2_ni_softc *sc;
+ device_t child;
+ struct dpaa2_mac_link_state mac_link = { 0 };
+ uint16_t mac_token;
+ int error, link_state;
+
+ sc = device_get_softc(dev);
+ if (sc->fixed_link || sc->mii == NULL)
+ return;
+
+ /*
+ * Note: ifp link state will only be changed AFTER we are called so we
+ * cannot rely on ifp->if_linkstate here.
+ */
+ if (sc->mii->mii_media_status & IFM_AVALID) {
+ if (sc->mii->mii_media_status & IFM_ACTIVE)
+ link_state = LINK_STATE_UP;
+ else
+ link_state = LINK_STATE_DOWN;
+ } else
+ link_state = LINK_STATE_UNKNOWN;
+
+ if (link_state != sc->link_state) {
+
+ sc->link_state = link_state;
+
+ child = sc->dev;
+ error = DPAA2_CMD_MAC_OPEN(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->rc_token), sc->mac.dpmac_id, &mac_token);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to open DPMAC: "
+ "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
+ error);
+ return;
+ }
+
+ if (link_state == LINK_STATE_UP ||
+ link_state == LINK_STATE_DOWN) {
+ /* Update DPMAC link state. */
+ mac_link.supported = sc->mii->mii_media.ifm_media;
+ mac_link.advert = sc->mii->mii_media.ifm_media;
+ mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */
+ mac_link.options =
+ DPAA2_MAC_LINK_OPT_AUTONEG |
+ DPAA2_MAC_LINK_OPT_PAUSE;
+ mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
+ mac_link.state_valid = true;
+
+ /* Inform DPMAC about link state. */
+ error = DPAA2_CMD_MAC_SET_LINK_STATE(sc->dev, child,
+ sc->cmd, &mac_link);
+ if (error)
+ device_printf(sc->dev, "%s: failed to set DPMAC "
+ "link state: id=%d, error=%d\n", __func__,
+ sc->mac.dpmac_id, error);
+ }
+ DPAA2_CMD_MAC_CLOSE(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
+ mac_token));
+ }
+}
+
+/**
+ * @brief Callback function to process media change request.
+ */
+static int
+dpaa2_ni_media_change(struct ifnet *ifp)
+{
+ struct dpaa2_ni_softc *sc = ifp->if_softc;
+
+ DPNI_LOCK(sc);
+ if (sc->mii) {
+ mii_mediachg(sc->mii);
+ sc->media_status = sc->mii->mii_media.ifm_media;
+ } else if (sc->fixed_link) {
+ if_printf(ifp, "%s: can't change media in fixed mode\n",
+ __func__);
+ }
+ DPNI_UNLOCK(sc);
+
+ return (0);
+}
+
+/**
+ * @brief Callback function to process media status request.
+ */
+static void
+dpaa2_ni_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct dpaa2_ni_softc *sc = ifp->if_softc;
+
+ DPNI_LOCK(sc);
+ if (sc->mii) {
+ mii_pollstat(sc->mii);
+ ifmr->ifm_active = sc->mii->mii_media_active;
+ ifmr->ifm_status = sc->mii->mii_media_status;
+ }
+ DPNI_UNLOCK(sc);
+}
+
+/**
+ * @brief Callout function to check and update media status.
+ */
+static void
+dpaa2_ni_media_tick(void *arg)
+{
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
+
+ /* Check for media type change */
+ if (sc->mii) {
+ mii_tick(sc->mii);
+ if (sc->media_status != sc->mii->mii_media.ifm_media) {
+ printf("%s: media type changed (ifm_media=%x)\n",
+ __func__, sc->mii->mii_media.ifm_media);
+ dpaa2_ni_media_change(sc->ifp);
+ }
+ }
+
+ /* Schedule another timeout one second from now */
+ callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
+}
+
+static void
+dpaa2_ni_init(void *arg)
+{
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
+ struct ifnet *ifp = sc->ifp;
+ device_t dev = sc->dev;
+ device_t child = dev;
+ int error;
+
+ DPNI_LOCK(sc);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ DPNI_UNLOCK(sc);
+ return;
+ }
+ DPNI_UNLOCK(sc);
+
+ error = DPAA2_CMD_NI_ENABLE(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->ni_token));
+ if (error)
+ device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
+ __func__, error);
+
+ DPNI_LOCK(sc);
+ if (sc->mii)
+ mii_mediachg(sc->mii);
+ callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ DPNI_UNLOCK(sc);
+
+ /* Force link-state update to initilize things. */
+ dpaa2_ni_miibus_statchg(dev);
+
+ return;
+}
+
+static int
+dpaa2_ni_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ struct dpaa2_ni_softc *sc = ifp->if_softc;
+ struct dpaa2_ni_channel *chan;
+ struct dpaa2_ni_tx_ring *tx;
+ uint32_t fqid;
+ boolean_t found = false;
+ int chan_n = 0;
+
+ if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ return (0);
+
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+ fqid = m->m_pkthdr.flowid;
+ for (int i = 0; i < sc->chan_n; i++) {
+ chan = sc->channels[i];
+ for (int j = 0; j < chan->rxq_n; j++) {
+ if (fqid == chan->rx_queues[j].fqid) {
+ chan_n = chan->flowid;
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ }
+ tx = DPAA2_TX_RING(sc, chan_n, 0);
+
+ TX_LOCK(tx);
+ dpaa2_ni_tx_locked(sc, tx, m);
+ TX_UNLOCK(tx);
+
+ return (0);
+}
+
+static void
+dpaa2_ni_qflush(struct ifnet *ifp)
+{
+ /* TODO: Find a way to drain Tx queues in QBMan. */
+ if_qflush(ifp);
+}
+
+static int
+dpaa2_ni_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct dpaa2_ni_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ device_t dev, child;
+ uint32_t changed = 0;
+ int mtu, error, rc = 0;
+
+ dev = child = sc->dev;
+
+ switch (cmd) {
+ case SIOCSIFMTU:
+ DPNI_LOCK(sc);
+ mtu = ifr->ifr_mtu;
+ if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
+ DPNI_UNLOCK(sc);
+ return (EINVAL);
+ }
+ ifp->if_mtu = mtu;
+ DPNI_UNLOCK(sc);
+
+ /* Update maximum frame length. */
+ error = DPAA2_CMD_NI_SET_MFL(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->ni_token), mtu + ETHER_HDR_LEN);
+ if (error) {
+ device_printf(dev, "%s: failed to update maximum frame "
+ "length: error=%d\n", __func__, error);
+ return (error);
+ }
+ break;
+ case SIOCSIFCAP:
+ changed = ifp->if_capenable ^ ifr->ifr_reqcap;
+ if (changed & IFCAP_HWCSUM) {
+ if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
+ ifp->if_capenable |= IFCAP_HWCSUM;
+ else
+ ifp->if_capenable &= ~IFCAP_HWCSUM;
+ }
+ rc = dpaa2_ni_setup_if_caps(sc);
+ if (rc) {
+ printf("%s: failed to update iface capabilities: "
+ "error=%d\n", __func__, rc);
+ rc = ENXIO;
+ }
+ break;
+ case SIOCSIFFLAGS:
+ DPNI_LOCK(sc);
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ changed = ifp->if_flags ^ sc->if_flags;
+ if (changed & IFF_PROMISC ||
+ changed & IFF_ALLMULTI) {
+ rc = dpaa2_ni_setup_if_flags(sc);
+ }
+ } else {
+ DPNI_UNLOCK(sc);
+ dpaa2_ni_init(sc);
+ DPNI_LOCK(sc);
+ }
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /* dpni_if_stop(sc); */
+ }
+
+ sc->if_flags = ifp->if_flags;
+ DPNI_UNLOCK(sc);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ DPNI_LOCK(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ DPNI_UNLOCK(sc);
+ rc = dpaa2_ni_update_mac_filters(ifp);
+ if (rc)
+ device_printf(dev, "%s: failed to update MAC "
+ "filters: error=%d\n", __func__, rc);
+ DPNI_LOCK(sc);
+ }
+ DPNI_UNLOCK(sc);
+ break;
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ if (sc->mii)
+ rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, cmd);
+ else if(sc->fixed_link) {
+ rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, cmd);
+ }
+ break;
+ default:
+ rc = ether_ioctl(ifp, cmd, data);
+ }
+
+ return (rc);
+}
+
+static int
+dpaa2_ni_update_mac_filters(struct ifnet *ifp)
+{
+ struct dpaa2_ni_softc *sc = ifp->if_softc;
+ struct dpaa2_ni_mcaddr_ctx ctx;
+ device_t dev, child;
+ int error;
+
+ dev = child = sc->dev;
+
+ /* Remove all multicast MAC filters. */
+ error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->ni_token), false, true);
+ if (error) {
+ device_printf(dev, "%s: failed to clear multicast MAC filters: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ ctx.ifp = ifp;
+ ctx.error = 0;
+ ctx.nent = 0;
+
+ if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
+
+ return (ctx.error);
+}
+
+static u_int
+dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
+{
+ struct dpaa2_ni_mcaddr_ctx *ctx = arg;
+ struct dpaa2_ni_softc *sc = ctx->ifp->if_softc;
+ device_t dev, child;
+
+ dev = child = sc->dev;
+
+ if (ctx->error != 0)
+ return (0);
+
+ if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
+ ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, dpaa2_mcp_tk(
+ sc->cmd, sc->ni_token), LLADDR(sdl));
+ if (ctx->error != 0) {
+ device_printf(dev, "%s: can't add more then %d MAC "
+ "addresses, switching to the multicast promiscuous "
+ "mode\n", __func__, ctx->nent);
+
+ /* Enable multicast promiscuous mode. */
+ DPNI_LOCK(sc);
+ ctx->ifp->if_flags |= IFF_ALLMULTI;
+ sc->if_flags |= IFF_ALLMULTI;
+ ctx->error = dpaa2_ni_setup_if_flags(sc);
+ DPNI_UNLOCK(sc);
+
+ return (0);
+ }
+ ctx->nent++;
+ }
+
+ return (1);
+}
+
+static void
+dpaa2_ni_intr(void *arg)
+{
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
+ device_t child = sc->dev;
+ uint32_t status = ~0u; /* clear all IRQ status bits */
+ int error;
+
+ error = DPAA2_CMD_NI_GET_IRQ_STATUS(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
+ sc->ni_token), DPNI_IRQ_INDEX, &status);
+ if (error)
+ device_printf(sc->dev, "%s: failed to obtain IRQ status: "
+ "error=%d\n", __func__, error);
+}
+
+/**
+ * @brief Callback to obtain a physical address of the only DMA segment mapped.
+ */
+static void
+dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ if (error == 0) {
+ KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
+ *(bus_addr_t *) arg = segs[0].ds_addr;
+ }
+}
+
+/**
+ * @brief Release new buffers to the buffer pool if necessary.
+ */
+static void
+dpaa2_ni_bp_task(void *arg, int count)
+{
+ device_t bp_dev;
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
+ struct dpaa2_bp_softc *bpsc;
+ struct dpaa2_bp_conf bp_conf;
+ const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
+ int error;
+
+ /* There's only one buffer pool for now. */
+ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
+ bpsc = device_get_softc(bp_dev);
+
+ /* Get state of the buffer pool. */
+ error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
+ &bp_conf);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to query buffer pool "
+ "configuration: error=%d\n", __func__, error);
+ return;
+ }
+
+ /* Double allocated buffers number if free buffers < 25%. */
+ if (bp_conf.free_bufn < (buf_num >> 2)) {
+ (void)dpaa2_ni_seed_buf_pool(sc, buf_num);
+ DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
+ }
+}
+
+/**
+ * @brief Poll frames from a specific channel when CDAN is received.
+ *
+ * NOTE: To be called from the DPIO interrupt handler.
+ */
+static void
+dpaa2_ni_poll(void *arg)
+{
+ struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
+ struct dpaa2_io_softc *iosc;
+ struct dpaa2_swp *swp;
+ struct dpaa2_ni_fq *fq;
+ int error, consumed = 0;
+
+ KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
+
+ iosc = device_get_softc(chan->io_dev);
+ swp = iosc->swp;
+
+ do {
+ error = dpaa2_swp_pull(swp, chan->id, &chan->store,
+ ETH_STORE_FRAMES);
+ if (error) {
+ device_printf(chan->ni_dev, "%s: failed to pull frames: "
+ "chan_id=%d, error=%d\n", __func__, chan->id, error);
+ break;
+ }
+
+ /*
+ * TODO: Combine frames from the same Rx queue returned as
+ * a result to the current VDQ command into a chain (linked
+ * with m_nextpkt) to ammortize the FQ lock.
+ */
+ error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
+ if (error == ENOENT) {
+ break;
+ }
+ if (error == ETIMEDOUT) {
+ device_printf(chan->ni_dev, "%s: timeout to consume "
+ "frames: chan_id=%d\n", __func__, chan->id);
+ }
+ } while (true);
+
+ /* Re-arm channel to generate CDAN. */
+ error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
+ if (error) {
+ device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
+ "error=%d\n", __func__, chan->id, error);
+ }
+}
+
+/**
+ * @brief Transmit mbufs.
+ */
+static void
+dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
+ struct mbuf *m)
+{
+ struct dpaa2_ni_fq *fq = tx->fq;
+ struct dpaa2_buf *buf;
+ struct dpaa2_fd fd;
+ struct mbuf *m_d;
+ bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
+ uint64_t idx;
+ void *pidx;
+ int error, rc, txnsegs;
+
+ /* Obtain an index of a Tx buffer. */
+ pidx = buf_ring_dequeue_sc(tx->idx_br);
+ if (__predict_false(pidx == NULL)) {
+ /* TODO: Do not give up easily. */
+ m_freem(m);
+ return;
+ } else {
+ idx = (uint64_t) pidx;
+ buf = &tx->buf[idx];
+ buf->tx.m = m;
+ buf->tx.idx = idx;
+ buf->tx.sgt_paddr = 0;
+ }
+
+ /* Load mbuf to transmit. */
+ error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
+ txsegs, &txnsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0)) {
+ /* Too many fragments, trying to defragment... */
+ m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
+ if (m_d == NULL) {
+ device_printf(sc->dev, "%s: mbuf "
+ "defragmentation failed\n", __func__);
+ fq->chan->tx_dropped++;
+ goto err;
+ }
+
+ buf->tx.m = m = m_d;
+ error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
+ buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0)) {
+ device_printf(sc->dev, "%s: failed to load "
+ "mbuf: error=%d\n", __func__, error);
+ fq->chan->tx_dropped++;
+ goto err;
+ }
+ }
+
+ /* Build frame descriptor. */
+ error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
+ if (__predict_false(error != 0)) {
+ device_printf(sc->dev, "%s: failed to build frame "
+ "descriptor: error=%d\n", __func__, error);
+ fq->chan->tx_dropped++;
+ goto err_unload;
+ }
+
+ /* TODO: Enqueue several frames in a single command. */
+ for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
+ /* TODO: Return error codes instead of # of frames. */
+ rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
+ &fd, 1);
+ if (rc == 1) {
+ break;
+ }
+ }
+
+ bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
+ BUS_DMASYNC_PREWRITE);
+
+ if (rc != 1) {
+ fq->chan->tx_dropped++;
+ goto err_unload;
+ } else {
+ fq->chan->tx_frames++;
+ }
+ return;
+
+err_unload:
+ bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
+ if (buf->tx.sgt_paddr != 0) {
+ bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
+ }
+err:
+ m_freem(buf->tx.m);
+ buf_ring_enqueue(tx->idx_br, pidx);
+}
+
+static int
+dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
+ uint32_t *consumed)
+{
+ struct dpaa2_ni_fq *fq = NULL;
+ struct dpaa2_dq *dq;
+ struct dpaa2_fd *fd;
+ int rc, frames = 0;
+
+ do {
+ rc = dpaa2_ni_chan_storage_next(chan, &dq);
+ if (rc == EINPROGRESS) {
+ if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
+ fd = &dq->fdr.fd;
+ fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
+ fq->consume(chan, fq, fd);
+ frames++;
+ }
+ } else if (rc == EALREADY || rc == ENOENT) {
+ if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
+ fd = &dq->fdr.fd;
+ fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
+ fq->consume(chan, fq, fd);
+ frames++;
+ }
+ break;
+ } else {
+ KASSERT(1 == 0, ("%s: should not reach here", __func__));
+ }
+ } while (true);
+
+ KASSERT(chan->store_idx < chan->store_sz,
+ ("channel store idx >= size: store_idx=%d, store_sz=%d",
+ chan->store_idx, chan->store_sz));
+
+ /*
+ * A dequeue operation pulls frames from a single queue into the store.
+ * Return the frame queue and a number of consumed frames as an output.
+ */
+ if (src != NULL)
+ *src = fq;
+ if (consumed != NULL)
+ *consumed = frames;
+
+ return (rc);
+}
+
+/**
+ * @brief Receive frames.
+ */
+static int
+dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
+ struct dpaa2_fd *fd)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
+ struct dpaa2_bp_softc *bpsc;
+ struct dpaa2_buf *buf;
+ struct ifnet *ifp = sc->ifp;
+ struct mbuf *m;
+ device_t bp_dev;
+ bus_addr_t paddr = (bus_addr_t) fd->addr;
+ bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
+ void *buf_data;
+ int buf_idx, buf_len;
+ int error, released_n = 0;
+
+ /*
+ * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
+ * physical address.
+ */
+ buf_idx = dpaa2_ni_fd_buf_idx(fd);
+ buf = &sc->buf[buf_idx];
+
+ KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
+ if (paddr != buf->rx.paddr) {
+ panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
+ __func__, paddr, buf->rx.paddr);
+ }
+
+ /* Update statistics. */
+ switch (dpaa2_ni_fd_err(fd)) {
+ case 1: /* Enqueue rejected by QMan */
+ sc->rx_enq_rej_frames++;
+ break;
+ case 2: /* QMan IEOI error */
+ sc->rx_ieoi_err_frames++;
+ break;
+ default:
+ break;
+ }
+ switch (dpaa2_ni_fd_format(fd)) {
+ case DPAA2_FD_SINGLE:
+ sc->rx_single_buf_frames++;
+ break;
+ case DPAA2_FD_SG:
+ sc->rx_sg_buf_frames++;
+ break;
+ default:
+ break;
+ }
+
+ m = buf->rx.m;
+ buf->rx.m = NULL;
+ bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
+
+ buf_len = dpaa2_ni_fd_data_len(fd);
+ buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
+
+ /* Prefetch mbuf data. */
+ __builtin_prefetch(buf_data);
+
+ /* Write value to mbuf (avoid reading). */
+ m->m_flags |= M_PKTHDR;
+ m->m_data = buf_data;
+ m->m_len = buf_len;
+ m->m_pkthdr.len = buf_len;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.flowid = fq->fqid;
+ M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
+
+ (*ifp->if_input)(ifp, m);
+
+ /* Keep the buffer to be recycled. */
+ chan->recycled[chan->recycled_n++] = paddr;
+ KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
+ ("%s: too many buffers to recycle", __func__));
+
+ /* Re-seed and release recycled buffers back to the pool. */
+ if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
+ /* Release new buffers to the pool if needed. */
+ taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
+
+ for (int i = 0; i < chan->recycled_n; i++) {
+ paddr = chan->recycled[i];
+
+ /* Parse ADDR_TOK of the recycled buffer. */
+ buf_idx = (paddr >> DPAA2_NI_BUF_IDX_SHIFT)
+ & DPAA2_NI_BUF_IDX_MASK;
+ buf = &sc->buf[buf_idx];
+
+ /* Seed recycled buffer. */
+ error = dpaa2_ni_seed_rxbuf(sc, buf, buf_idx);
+ KASSERT(error == 0, ("%s: failed to seed recycled "
+ "buffer: error=%d", __func__, error));
+ if (__predict_false(error != 0)) {
+ device_printf(sc->dev, "%s: failed to seed "
+ "recycled buffer: error=%d\n", __func__,
+ error);
+ continue;
+ }
+
+ /* Prepare buffer to be released in a single command. */
+ released[released_n++] = buf->rx.paddr;
+ }
+
+ /* There's only one buffer pool for now. */
+ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
+ bpsc = device_get_softc(bp_dev);
+
+ error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
+ released, released_n);
+ if (__predict_false(error != 0)) {
+ device_printf(sc->dev, "%s: failed to release buffers "
+ "to the pool: error=%d\n", __func__, error);
+ return (error);
+ }
+
+ /* Be ready to recycle the next portion of the buffers. */
+ chan->recycled_n = 0;
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Receive Rx error frames.
+ */
+static int
+dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
+ struct dpaa2_fd *fd)
+{
+ device_t bp_dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
+ struct dpaa2_bp_softc *bpsc;
+ struct dpaa2_buf *buf;
+ bus_addr_t paddr = (bus_addr_t) fd->addr;
+ int buf_idx, error;
+
+ /*
+ * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
+ * physical address.
+ */
+ buf_idx = dpaa2_ni_fd_buf_idx(fd);
+ buf = &sc->buf[buf_idx];
+
+ KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
+ if (paddr != buf->rx.paddr) {
+ panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
+ __func__, paddr, buf->rx.paddr);
+ }
+
+ /* There's only one buffer pool for now. */
+ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
+ bpsc = device_get_softc(bp_dev);
+
+ /* Release buffer to QBMan buffer pool. */
+ error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
+ if (error != 0) {
+ device_printf(sc->dev, "%s: failed to release frame buffer to "
+ "the pool: error=%d\n", __func__, error);
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Receive Tx confirmation frames.
+ */
+static int
+dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
+ struct dpaa2_fd *fd)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
+ struct dpaa2_ni_channel *buf_chan;
+ struct dpaa2_ni_tx_ring *tx;
+ struct dpaa2_buf *buf;
+ bus_addr_t paddr = (bus_addr_t) (fd->addr & BUF_MAXADDR_49BIT);
+ uint64_t buf_idx;
+ int chan_idx, tx_idx;
+
+ /*
+ * Get channel, Tx ring and buffer indexes from the ADDR_TOK bits
+ * (not used by QBMan) of the physical address.
+ */
+ chan_idx = dpaa2_ni_fd_chan_idx(fd);
+ tx_idx = dpaa2_ni_fd_tx_idx(fd);
+ buf_idx = (uint64_t) dpaa2_ni_fd_txbuf_idx(fd);
+
+ KASSERT(tx_idx < DPAA2_NI_MAX_TCS, ("%s: invalid Tx ring index",
+ __func__));
+ KASSERT(buf_idx < DPAA2_NI_BUFS_PER_TX, ("%s: invalid Tx buffer index",
+ __func__));
+
+ buf_chan = sc->channels[chan_idx];
+ tx = &buf_chan->txc_queue.tx_rings[tx_idx];
+ buf = &tx->buf[buf_idx];
+
+ KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
+ if (paddr != buf->tx.paddr) {
+ panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
+ __func__, paddr, buf->tx.paddr);
+ }
+
+
+ bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
+ if (buf->tx.sgt_paddr != 0)
+ bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
+ m_freem(buf->tx.m);
+
+ /* Return Tx buffer index back to the ring. */
+ buf_ring_enqueue(tx->idx_br, (void *) buf_idx);
+
+ return (0);
+}
+
+/**
+ * @brief Compare versions of the DPAA2 network interface API.
+ */
+static int
+dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
+ uint16_t minor)
+{
+ if (sc->api_major == major)
+ return sc->api_minor - minor;
+ return sc->api_major - major;
+}
+
+/**
+ * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
+ */
+static int
+dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
+{
+ device_t bp_dev;
+ struct dpaa2_bp_softc *bpsc;
+ struct dpaa2_buf *buf;
+ bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
+ const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
+ int i, error, bufn = 0;
+
+ KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
+ "created?", __func__));
+
+ /* There's only one buffer pool for now. */
+ bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
+ bpsc = device_get_softc(bp_dev);
+
+ /* Limit # of buffers released to the pool. */
+ if (allocated + seedn > DPAA2_NI_BUFS_MAX)
+ seedn = DPAA2_NI_BUFS_MAX - allocated;
+
+ /* Release "seedn" buffers to the pool. */
+ for (i = allocated; i < (allocated + seedn); i++) {
+ /* Enough buffers were allocated for a single command. */
+ if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
+ error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
+ bpsc->attr.bpid, paddr, bufn);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to release "
+ "buffers to the pool (1)\n", __func__);
+ return (error);
+ }
+ DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
+ bufn = 0;
+ }
+
+ buf = &sc->buf[i];
+ buf->type = DPAA2_BUF_RX;
+ buf->rx.m = NULL;
+ buf->rx.dmap = NULL;
+ buf->rx.paddr = 0;
+ buf->rx.vaddr = NULL;
+ error = dpaa2_ni_seed_rxbuf(sc, buf, i);
+ if (error)
+ break;
+ paddr[bufn] = buf->rx.paddr;
+ bufn++;
+ }
+
+ /* Release if there are buffers left. */
+ if (bufn > 0) {
+ error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
+ bpsc->attr.bpid, paddr, bufn);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to release "
+ "buffers to the pool (2)\n", __func__);
+ return (error);
+ }
+ DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Prepare Rx buffer to be released to the buffer pool.
+ */
+static int
+dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
+{
+ struct mbuf *m;
+ bus_dmamap_t dmap;
+ bus_dma_segment_t segs;
+ int error, nsegs;
+
+ KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
+ "allocated?", __func__));
+ KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
+
+ /* Keep DMA tag for this buffer. */
+ if (__predict_false(buf->rx.dmat == NULL))
+ buf->rx.dmat = sc->bp_dmat;
+
+ /* Create a DMA map for the giving buffer if it doesn't exist yet. */
+ if (__predict_false(buf->rx.dmap == NULL)) {
+ error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to create DMA map "
+ "for buffer: buf_idx=%d, error=%d\n", __func__,
+ idx, error);
+ return (error);
+ }
+ buf->rx.dmap = dmap;
+ }
+
+ /* Allocate mbuf if needed. */
+ if (__predict_false(buf->rx.m == NULL)) {
+ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
+ if (__predict_false(m == NULL)) {
+ device_printf(sc->dev, "%s: failed to allocate mbuf for "
+ "buffer\n", __func__);
+ return (ENOMEM);
+ }
+ m->m_len = m->m_ext.ext_size;
+ m->m_pkthdr.len = m->m_ext.ext_size;
+ buf->rx.m = m;
+ } else
+ m = buf->rx.m;
+
+ error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
+ m, &segs, &nsegs, BUS_DMA_NOWAIT);
+ KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
+ KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
+ if (__predict_false(error != 0 || nsegs != 1)) {
+ device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
+ "nsegs=%d\n", __func__, error, nsegs);
+ bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
+ m_freem(m);
+ return (error);
+ }
+ buf->rx.paddr = segs.ds_addr;
+ buf->rx.vaddr = m->m_data;
+
+ /*
+ * Write buffer index to the ADDR_TOK (bits 63-49) which is not used by
+ * QBMan and is supposed to assist in physical to virtual address
+ * translation.
+ *
+ * NOTE: "lowaddr" and "highaddr" of the window which cannot be accessed
+ * by QBMan must be configured in the DMA tag accordingly.
+ */
+ buf->rx.paddr =
+ ((uint64_t)(idx & DPAA2_NI_BUF_IDX_MASK) <<
+ DPAA2_NI_BUF_IDX_SHIFT) |
+ (buf->rx.paddr & DPAA2_NI_BUF_ADDR_MASK);
+
+ return (0);
+}
+
+/**
+ * @brief Prepare Tx buffer to be added to the Tx ring.
+ */
+static int
+dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
+{
+ bus_dmamap_t dmap;
+ int error;
+
+ KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
+ __func__));
+ KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
+ __func__));
+ KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
+
+ /* Keep DMA tags for this buffer. */
+ if (__predict_true(buf->tx.dmat == NULL))
+ buf->tx.dmat = sc->tx_dmat;
+ if (__predict_true(buf->tx.sgt_dmat == NULL))
+ buf->tx.sgt_dmat = sc->sgt_dmat;
+
+ /* Create a DMA map for the giving buffer if it doesn't exist yet. */
+ if (__predict_true(buf->tx.dmap == NULL)) {
+ error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
+ if (error != 0) {
+ device_printf(sc->dev, "%s: failed to create "
+ "Tx DMA map: error=%d\n", __func__, error);
+ return (error);
+ }
+ buf->tx.dmap = dmap;
+ }
+
+ /* Allocate a buffer to store scatter/gather table. */
+ if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
+ error = bus_dmamem_alloc(buf->tx.sgt_dmat,
+ &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &buf->tx.sgt_dmap);
+ if (error != 0) {
+ device_printf(sc->dev, "%s: failed to allocate "
+ "S/G table: error=%d\n", __func__, error);
+ return (error);
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Allocate channel storage visible to QBMan.
+ */
+static int
+dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
+ struct dpaa2_ni_channel *chan)
+{
+ struct dpaa2_buf *buf = &chan->store;
+ int error;
+
+ KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
+ "allocated?", __func__));
+ KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
+ __func__));
+
+ /* Keep DMA tag for this buffer. */
+ if (__predict_false(buf->store.dmat == NULL)) {
+ buf->store.dmat = sc->st_dmat;
+ }
+
+ if (__predict_false(buf->store.vaddr == NULL)) {
+ error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to allocate channel "
+ "storage\n", __func__);
+ return (error);
+ }
+ }
+
+ if (__predict_false(buf->store.paddr == 0)) {
+ error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
+ buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
+ &buf->store.paddr, BUS_DMA_NOWAIT);
+ if (error) {
+ device_printf(sc->dev, "%s: failed to map channel "
+ "storage\n", __func__);
+ return (error);
+ }
+ }
+
+ chan->store_sz = ETH_STORE_FRAMES;
+ chan->store_idx = 0;
+
+ return (0);
+}
+
+/**
+ * @brief Build a DPAA2 frame descriptor.
+ */
+static int
+dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
+ struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
+ struct dpaa2_fd *fd)
+{
+ struct dpaa2_ni_channel *chan = tx->fq->chan;
+ struct dpaa2_sg_entry *sgt;
+ int i, error;
+
+ KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
+ "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
+ KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
+ KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
+ __func__));
+
+ /* Reset frame descriptor fields. */
+ memset(fd, 0, sizeof(*fd));
+
+ if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
+ /* Populate S/G table. */
+ sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
+ sc->tx_data_off;
+ for (i = 0; i < txnsegs; i++) {
+ sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
+ sgt[i].len = (uint32_t) txsegs[i].ds_len;
+ sgt[i].offset_fmt = 0u;
+ }
+ sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
+
+ KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
+ __func__, buf->tx.sgt_paddr));
+
+ /* Load S/G table. */
+ error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
+ buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
+ &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0)) {
+ device_printf(sc->dev, "%s: failed to map S/G table: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+ buf->tx.paddr = buf->tx.sgt_paddr;
+ buf->tx.vaddr = buf->tx.sgt_vaddr;
+ sc->tx_sg_frames++; /* for sysctl(9) */
+ } else {
+ return (EINVAL);
+ }
+
+ fd->addr =
+ ((uint64_t)(chan->flowid & DPAA2_NI_BUF_CHAN_MASK) <<
+ DPAA2_NI_BUF_CHAN_SHIFT) |
+ ((uint64_t)(tx->txid & DPAA2_NI_TX_IDX_MASK) <<
+ DPAA2_NI_TX_IDX_SHIFT) |
+ ((uint64_t)(buf->tx.idx & DPAA2_NI_TXBUF_IDX_MASK) <<
+ DPAA2_NI_TXBUF_IDX_SHIFT) |
+ (buf->tx.paddr & DPAA2_NI_BUF_ADDR_MASK);
+
+ fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
+ fd->bpid_ivp_bmt = 0;
+ fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
+ fd->ctrl = 0x00800000u;
+
+ return (0);
+}
+
+static int
+dpaa2_ni_fd_err(struct dpaa2_fd *fd)
+{
+ return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
+}
+
+static uint32_t
+dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
+{
+ if (dpaa2_ni_fd_short_len(fd))
+ return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
+
+ return (fd->data_length);
+}
+
+static int
+dpaa2_ni_fd_chan_idx(struct dpaa2_fd *fd)
+{
+ return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_CHAN_SHIFT) &
+ DPAA2_NI_BUF_CHAN_MASK);
+}
+
+static int
+dpaa2_ni_fd_buf_idx(struct dpaa2_fd *fd)
+{
+ return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_IDX_SHIFT) &
+ DPAA2_NI_BUF_IDX_MASK);
+}
+
+static int
+dpaa2_ni_fd_tx_idx(struct dpaa2_fd *fd)
+{
+ return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TX_IDX_SHIFT) &
+ DPAA2_NI_TX_IDX_MASK);
+}
+
+static int
+dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *fd)
+{
+ return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TXBUF_IDX_SHIFT) &
+ DPAA2_NI_TXBUF_IDX_MASK);
+}
+
+static int
+dpaa2_ni_fd_format(struct dpaa2_fd *fd)
+{
+ return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
+ DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
+}
+
+static bool
+dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
+{
+ return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
+ & DPAA2_NI_FD_SL_MASK) == 1);
+}
+
+static int
+dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
+{
+ return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
+}
+
+/**
+ * @brief Collect statistics of the network interface.
+ */
+static int
+dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
+ struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
+ device_t child = sc->dev;
+ uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
+ uint64_t result = 0;
+ int error;
+
+ error = DPAA2_CMD_NI_GET_STATISTICS(sc->dev, child,
+ dpaa2_mcp_tk(sc->cmd, sc->ni_token), stat->page, 0, cnt);
+ if (!error)
+ result = cnt[stat->cnt];
+
+ return (sysctl_handle_64(oidp, &result, 0, req));
+}
+
+static int
+dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
+{
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
+ uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
+
+ return (sysctl_handle_32(oidp, &buf_num, 0, req));
+}
+
+static int
+dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
+{
+ struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
+ uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
+
+ return (sysctl_handle_32(oidp, &buf_free, 0, req));
+}
+
+static int
+dpaa2_ni_set_hash(device_t dev, uint64_t flags)
+{
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ uint64_t key = 0;
+ int i;
+
+ if (!(sc->attr.num.queues > 1)) {
+ return (EOPNOTSUPP);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (dist_fields[i].rxnfc_field & flags) {
+ key |= dist_fields[i].id;
+ }
+ }
+
+ return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
+}
+
+/**
+ * @brief Set Rx distribution (hash or flow classification) key flags is a
+ * combination of RXH_ bits.
+ */
+static int
+dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
+{
+ device_t child = dev;
+ struct dpaa2_ni_softc *sc = device_get_softc(dev);
+ struct dpkg_profile_cfg cls_cfg;
+ struct dpkg_extract *key;
+ struct dpaa2_buf *buf = &sc->rxd_kcfg;
+ int i, error = 0;
+
+ KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
+ __func__));
+ if (__predict_true(buf->store.dmat == NULL))
+ buf->store.dmat = sc->rxd_dmat;
+
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+ /* Configure extracts according to the given flags. */
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ key = &cls_cfg.extracts[cls_cfg.num_extracts];
+
+ if (!(flags & dist_fields[i].id))
+ continue;
+
+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ device_printf(dev, "%s: failed to add key extraction "
+ "rule\n", __func__);
+ return (E2BIG);
+ }
+
+ key->type = DPKG_EXTRACT_FROM_HDR;
+ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
+ key->extract.from_hdr.field = dist_fields[i].cls_field;
+ cls_cfg.num_extracts++;
+ }
+
+ error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
+ BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to allocate a buffer for Rx "
+ "traffic distribution key configuration\n", __func__);
+ return (error);
+ }
+
+ error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
+ if (error != 0) {
+ device_printf(dev, "%s: failed to prepare key configuration: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ /* Prepare for setting the Rx dist. */
+ error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
+ buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
+ &buf->store.paddr, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc->dev, "%s: failed to map a buffer for Rx "
+ "traffic distribution key configuration\n", __func__);
+ return (error);
+ }
+
+ if (type == DPAA2_NI_DIST_MODE_HASH) {
+ error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, dpaa2_mcp_tk(
+ sc->cmd, sc->ni_token), sc->attr.num.queues, 0,
+ DPAA2_NI_DIST_MODE_HASH, buf->store.paddr);
+ if (error != 0)
+ device_printf(dev, "%s: failed to set distribution mode "
+ "and size for the traffic class\n", __func__);
+ }
+
+ return (error);
+}
+
+/**
+ * @brief Prepares extract parameters.
+ *
+ * cfg: Defining a full Key Generation profile.
+ * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA.
+ */
+static int
+dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
+{
+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
+ struct dpni_dist_extract *extr;
+ int i, j;
+
+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+ return (EINVAL);
+
+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
+ dpni_ext->num_extracts = cfg->num_extracts;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ extr = &dpni_ext->extracts[i];
+
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+ extr->efh_type =
+ cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
+ extr->size = cfg->extracts[i].extract.from_hdr.size;
+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+ extr->field = cfg->extracts[i].extract.from_hdr.field;
+ extr->hdr_index =
+ cfg->extracts[i].extract.from_hdr.hdr_index;
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ extr->size = cfg->extracts[i].extract.from_data.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_data.offset;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ extr->size = cfg->extracts[i].extract.from_parse.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_parse.offset;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+ extr->extract_type = cfg->extracts[i].type & 0x0Fu;
+
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+ extr->masks[j].offset =
+ cfg->extracts[i].masks[j].offset;
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Obtain the next dequeue response from the channel storage.
+ */
+static int
+dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
+{
+ struct dpaa2_buf *buf = &chan->store;
+ struct dpaa2_dq *msgs = buf->store.vaddr;
+ struct dpaa2_dq *msg = &msgs[chan->store_idx];
+ int rc = EINPROGRESS;
+
+ chan->store_idx++;
+
+ if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
+ rc = EALREADY; /* VDQ command is expired */
+ chan->store_idx = 0;
+ if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
+ msg = NULL; /* Null response, FD is invalid */
+ }
+ if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
+ rc = ENOENT; /* FQ is empty */
+ chan->store_idx = 0;
+ }
+
+ if (dq != NULL)
+ *dq = msg;
+
+ return (rc);
+}
+
+static device_method_t dpaa2_ni_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_ni_probe),
+ DEVMETHOD(device_attach, dpaa2_ni_attach),
+ DEVMETHOD(device_detach, dpaa2_ni_detach),
+
+ /* mii via memac_mdio */
+ DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_ni_driver = {
+ "dpaa2_ni",
+ dpaa2_ni_methods,
+ sizeof(struct dpaa2_ni_softc),
+};
+
+DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
+DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
+
+MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
+#ifdef DEV_ACPI
+MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
+#endif
+#ifdef FDT
+MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
+#endif
diff --git a/sys/dev/dpaa2/dpaa2_ni.h b/sys/dev/dpaa2/dpaa2_ni.h
new file mode 100644
index 000000000000..929a4d0d4966
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_ni.h
@@ -0,0 +1,607 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ * Copyright © 2022 Mathew McBride
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_NI_H
+#define _DPAA2_NI_H
+
+#include <sys/rman.h>
+#include <sys/bus.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/mbuf.h>
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/buf_ring.h>
+#include <sys/proc.h>
+#include <sys/mutex.h>
+
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_media.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_mcp.h"
+#include "dpaa2_swp.h"
+#include "dpaa2_io.h"
+#include "dpaa2_mac.h"
+#include "dpaa2_ni_dpkg.h"
+
+/* Name of the DPAA2 network interface. */
+#define DPAA2_NI_IFNAME "dpni"
+
+/* Maximum resources per DPNI: 16 DPIOs + 16 DPCONs + 1 DPBP + 1 DPMCP. */
+#define DPAA2_NI_MAX_RESOURCES 34
+
+#define DPAA2_NI_MSI_COUNT 1 /* MSIs per DPNI */
+#define DPAA2_NI_MAX_CHANNELS 16 /* to distribute ingress traffic to cores */
+#define DPAA2_NI_MAX_TCS 8 /* traffic classes per DPNI */
+#define DPAA2_NI_MAX_POOLS 8 /* buffer pools per DPNI */
+
+/* Maximum number of Rx buffers. */
+#define DPAA2_NI_BUFS_INIT (50u * DPAA2_SWP_BUFS_PER_CMD)
+#define DPAA2_NI_BUFS_MAX (1 << 15) /* 15 bits for buffer index max. */
+
+/* Maximum number of buffers allocated per Tx ring. */
+#define DPAA2_NI_BUFS_PER_TX (1 << 7)
+#define DPAA2_NI_MAX_BPTX (1 << 8) /* 8 bits for buffer index max. */
+
+/* Number of the DPNI statistics counters. */
+#define DPAA2_NI_STAT_COUNTERS 7u
+#define DPAA2_NI_STAT_SYSCTLS 9u
+
+/* Error and status bits in the frame annotation status word. */
+#define DPAA2_NI_FAS_DISC 0x80000000 /* debug frame */
+#define DPAA2_NI_FAS_MS 0x40000000 /* MACSEC frame */
+#define DPAA2_NI_FAS_PTP 0x08000000
+#define DPAA2_NI_FAS_MC 0x04000000 /* Ethernet multicast frame */
+#define DPAA2_NI_FAS_BC 0x02000000 /* Ethernet broadcast frame */
+#define DPAA2_NI_FAS_KSE 0x00040000
+#define DPAA2_NI_FAS_EOFHE 0x00020000
+#define DPAA2_NI_FAS_MNLE 0x00010000
+#define DPAA2_NI_FAS_TIDE 0x00008000
+#define DPAA2_NI_FAS_PIEE 0x00004000
+#define DPAA2_NI_FAS_FLE 0x00002000 /* Frame length error */
+#define DPAA2_NI_FAS_FPE 0x00001000 /* Frame physical error */
+#define DPAA2_NI_FAS_PTE 0x00000080
+#define DPAA2_NI_FAS_ISP 0x00000040
+#define DPAA2_NI_FAS_PHE 0x00000020
+#define DPAA2_NI_FAS_BLE 0x00000010
+#define DPAA2_NI_FAS_L3CV 0x00000008 /* L3 csum validation performed */
+#define DPAA2_NI_FAS_L3CE 0x00000004 /* L3 csum error */
+#define DPAA2_NI_FAS_L4CV 0x00000002 /* L4 csum validation performed */
+#define DPAA2_NI_FAS_L4CE 0x00000001 /* L4 csum error */
+
+/* Mask for errors on the ingress path. */
+#define DPAA2_NI_FAS_RX_ERR_MASK (DPAA2_NI_FAS_KSE | \
+ DPAA2_NI_FAS_EOFHE | \
+ DPAA2_NI_FAS_MNLE | \
+ DPAA2_NI_FAS_TIDE | \
+ DPAA2_NI_FAS_PIEE | \
+ DPAA2_NI_FAS_FLE | \
+ DPAA2_NI_FAS_FPE | \
+ DPAA2_NI_FAS_PTE | \
+ DPAA2_NI_FAS_ISP | \
+ DPAA2_NI_FAS_PHE | \
+ DPAA2_NI_FAS_BLE | \
+ DPAA2_NI_FAS_L3CE | \
+ DPAA2_NI_FAS_L4CE \
+)
+
+/* Option bits to select specific queue configuration options to apply. */
+#define DPAA2_NI_QUEUE_OPT_USER_CTX 0x00000001
+#define DPAA2_NI_QUEUE_OPT_DEST 0x00000002
+#define DPAA2_NI_QUEUE_OPT_FLC 0x00000004
+#define DPAA2_NI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+#define DPAA2_NI_QUEUE_OPT_SET_CGID 0x00000040
+#define DPAA2_NI_QUEUE_OPT_CLEAR_CGID 0x00000080
+
+/* DPNI link configuration options. */
+#define DPAA2_NI_LINK_OPT_AUTONEG ((uint64_t) 0x01u)
+#define DPAA2_NI_LINK_OPT_HALF_DUPLEX ((uint64_t) 0x02u)
+#define DPAA2_NI_LINK_OPT_PAUSE ((uint64_t) 0x04u)
+#define DPAA2_NI_LINK_OPT_ASYM_PAUSE ((uint64_t) 0x08u)
+#define DPAA2_NI_LINK_OPT_PFC_PAUSE ((uint64_t) 0x10u)
+
+/*
+ * Number of times to retry a frame enqueue before giving up. Value determined
+ * empirically, in order to minimize the number of frames dropped on Tx.
+ */
+#define DPAA2_NI_ENQUEUE_RETRIES 10
+
+enum dpaa2_ni_queue_type {
+ DPAA2_NI_QUEUE_RX = 0,
+ DPAA2_NI_QUEUE_TX,
+ DPAA2_NI_QUEUE_TX_CONF,
+ DPAA2_NI_QUEUE_RX_ERR
+};
+
+enum dpaa2_ni_dest_type {
+ DPAA2_NI_DEST_NONE = 0,
+ DPAA2_NI_DEST_DPIO,
+ DPAA2_NI_DEST_DPCON
+};
+
+enum dpaa2_ni_ofl_type {
+ DPAA2_NI_OFL_RX_L3_CSUM = 0,
+ DPAA2_NI_OFL_RX_L4_CSUM,
+ DPAA2_NI_OFL_TX_L3_CSUM,
+ DPAA2_NI_OFL_TX_L4_CSUM,
+ DPAA2_NI_OFL_FLCTYPE_HASH /* FD flow context for AIOP/CTLU */
+};
+
+/**
+ * @brief DPNI ingress traffic distribution mode.
+ */
+enum dpaa2_ni_dist_mode {
+ DPAA2_NI_DIST_MODE_NONE = 0,
+ DPAA2_NI_DIST_MODE_HASH,
+ DPAA2_NI_DIST_MODE_FS
+};
+
+/**
+ * @brief DPNI behavior in case of errors.
+ */
+enum dpaa2_ni_err_action {
+ DPAA2_NI_ERR_DISCARD = 0,
+ DPAA2_NI_ERR_CONTINUE,
+ DPAA2_NI_ERR_SEND_TO_ERROR_QUEUE
+};
+
+struct dpaa2_ni_channel;
+struct dpaa2_ni_fq;
+
+/**
+ * @brief Attributes of the DPNI object.
+ *
+ * options: ...
+ * wriop_ver: Revision of the underlying WRIOP hardware block.
+ */
+struct dpaa2_ni_attr {
+ uint32_t options;
+ uint16_t wriop_ver;
+ struct {
+ uint16_t fs;
+ uint8_t mac;
+ uint8_t vlan;
+ uint8_t qos;
+ } entries;
+ struct {
+ uint8_t queues;
+ uint8_t rx_tcs;
+ uint8_t tx_tcs;
+ uint8_t channels;
+ uint8_t cgs;
+ } num;
+ struct {
+ uint8_t fs;
+ uint8_t qos;
+ } key_size;
+};
+
+/**
+ * @brief Tx ring.
+ *
+ * fq: Parent (TxConf) frame queue.
+ * fqid: ID of the logical Tx queue.
+ * mbuf_br: Ring buffer for mbufs to transmit.
+ * mbuf_lock: Lock for the ring buffer.
+ */
+struct dpaa2_ni_tx_ring {
+ struct dpaa2_ni_fq *fq;
+ uint32_t fqid;
+ uint32_t txid; /* Tx ring index */
+
+ /* Ring buffer for indexes in "buf" array. */
+ struct buf_ring *idx_br;
+ struct mtx lock;
+
+ /* Buffers to DMA load/unload Tx mbufs. */
+ struct dpaa2_buf buf[DPAA2_NI_BUFS_PER_TX];
+};
+
+/**
+ * @brief A Frame Queue is the basic queuing structure used by the QMan.
+ *
+ * It comprises a list of frame descriptors (FDs), so it can be thought of
+ * as a queue of frames.
+ *
+ * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued
+ * onto a work queue (WQ).
+ *
+ * fqid: Frame queue ID, can be used to enqueue/dequeue or execute other
+ * commands on the queue through DPIO.
+ * txq_n: Number of configured Tx queues.
+ * tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid.
+ * Note that Tx queues are logical queues and not all management
+ * commands are available on these queue types.
+ * qdbin: Queue destination bin. Can be used with the DPIO enqueue
+ * operation based on QDID, QDBIN and QPRI. Note that all Tx queues
+ * with the same flowid have the same destination bin.
+ */
+struct dpaa2_ni_fq {
+ int (*consume)(struct dpaa2_ni_channel *,
+ struct dpaa2_ni_fq *, struct dpaa2_fd *);
+
+ struct dpaa2_ni_channel *chan;
+ uint32_t fqid;
+ uint16_t flowid;
+ uint8_t tc;
+ enum dpaa2_ni_queue_type type;
+
+ /* Optional fields (for TxConf queue). */
+ struct dpaa2_ni_tx_ring tx_rings[DPAA2_NI_MAX_TCS];
+ uint32_t tx_qdbin;
+} __aligned(CACHE_LINE_SIZE);
+
+/**
+ * @brief QBMan channel to process ingress traffic (Rx, Tx conf).
+ *
+ * NOTE: Several WQs are organized into a single WQ Channel.
+ */
+struct dpaa2_ni_channel {
+ device_t ni_dev;
+ device_t io_dev;
+ device_t con_dev;
+ uint16_t id;
+ uint16_t flowid;
+
+ /* For debug purposes only! */
+ uint64_t tx_frames;
+ uint64_t tx_dropped;
+
+ /* Context to configure CDAN. */
+ struct dpaa2_io_notif_ctx ctx;
+
+ /* Channel storage (to keep responses from VDQ command). */
+ struct dpaa2_buf store;
+ uint32_t store_sz; /* in frames */
+ uint32_t store_idx; /* frame index */
+
+ /* Recycled buffers to release back to the pool. */
+ uint32_t recycled_n;
+ bus_addr_t recycled[DPAA2_SWP_BUFS_PER_CMD];
+
+ /* Frame queues */
+ uint32_t rxq_n;
+ struct dpaa2_ni_fq rx_queues[DPAA2_NI_MAX_TCS];
+ struct dpaa2_ni_fq txc_queue;
+};
+
+/**
+ * @brief Configuration of the network interface queue.
+ *
+ * NOTE: This configuration is used to obtain information of a queue by
+ * DPNI_GET_QUEUE command and update it by DPNI_SET_QUEUE one.
+ *
+ * It includes binding of the queue to a DPIO or DPCON object to receive
+ * notifications and traffic on the CPU.
+ *
+ * user_ctx: (r/w) User defined data, presented along with the frames
+ * being dequeued from this queue.
+ * flow_ctx: (r/w) Set default FLC value for traffic dequeued from this queue.
+ * Please check description of FD structure for more information.
+ * Note that FLC values set using DPNI_ADD_FS_ENTRY, if any, take
+ * precedence over values per queue.
+ * dest_id: (r/w) The ID of a DPIO or DPCON object, depending on
+ * DEST_TYPE (in flags) value. This field is ignored for DEST_TYPE
+ * set to 0 (DPNI_DEST_NONE).
+ * fqid: (r) Frame queue ID, can be used to enqueue/dequeue or execute
+ * other commands on the queue through DPIO. Note that Tx queues
+ * are logical queues and not all management commands are available
+ * on these queue types.
+ * qdbin: (r) Queue destination bin. Can be used with the DPIO enqueue
+ * operation based on QDID, QDBIN and QPRI.
+ * type: Type of the queue to set configuration to.
+ * tc: Traffic class. Ignored for QUEUE_TYPE 2 and 3 (Tx confirmation
+ * and Rx error queues).
+ * idx: Selects a specific queue out of the set of queues in a TC.
+ * Accepted values are in range 0 to NUM_QUEUES–1. This field is
+ * ignored for QUEUE_TYPE 3 (Rx error queue). For access to the
+ * shared Tx confirmation queue (for Tx confirmation mode 1), this
+ * field must be set to 0xff.
+ * cgid: (r/w) Congestion group ID.
+ * chan_id: (w) Channel index to be configured. Used only when QUEUE_TYPE is
+ * set to DPNI_QUEUE_TX.
+ * priority: (r/w) Sets the priority in the destination DPCON or DPIO for
+ * dequeued traffic. Supported values are 0 to # of priorities in
+ * destination DPCON or DPIO - 1. This field is ignored for
+ * DEST_TYPE set to 0 (DPNI_DEST_NONE), except if this DPNI is in
+ * AIOP context. In that case the DPNI_SET_QUEUE can be used to
+ * override the default assigned priority of the FQ from the TC.
+ * options: Option bits selecting specific configuration options to apply.
+ * See DPAA2_NI_QUEUE_OPT_* for details.
+ * dest_type: Type of destination for dequeued traffic.
+ * cgid_valid: (r) Congestion group ID is valid.
+ * stash_control: (r/w) If true, lowest 6 bits of FLC are used for stash control.
+ * Please check description of FD structure for more information.
+ * hold_active: (r/w) If true, this flag prevents the queue from being
+ * rescheduled between DPIOs while it carries traffic and is active
+ * on one DPIO. Can help reduce reordering if one queue is services
+ * on multiple CPUs, but the queue is also more likely to be trapped
+ * in one DPIO, especially when congested.
+ */
+struct dpaa2_ni_queue_cfg {
+ uint64_t user_ctx;
+ uint64_t flow_ctx;
+ uint32_t dest_id;
+ uint32_t fqid;
+ uint16_t qdbin;
+ enum dpaa2_ni_queue_type type;
+ uint8_t tc;
+ uint8_t idx;
+ uint8_t cgid;
+ uint8_t chan_id;
+ uint8_t priority;
+ uint8_t options;
+
+ enum dpaa2_ni_dest_type dest_type;
+ bool cgid_valid;
+ bool stash_control;
+ bool hold_active;
+};
+
+/**
+ * @brief Buffer layout attributes.
+ *
+ * pd_size: Size kept for private data (in bytes).
+ * fd_align: Frame data alignment.
+ * head_size: Data head room.
+ * tail_size: Data tail room.
+ * options: ...
+ * pass_timestamp: Timestamp is included in the buffer layout.
+ * pass_parser_result: Parsing results are included in the buffer layout.
+ * pass_frame_status: Frame status is included in the buffer layout.
+ * pass_sw_opaque: SW annotation is activated.
+ * queue_type: Type of a queue this configuration applies to.
+ */
+struct dpaa2_ni_buf_layout {
+ uint16_t pd_size;
+ uint16_t fd_align;
+ uint16_t head_size;
+ uint16_t tail_size;
+ uint16_t options;
+ bool pass_timestamp;
+ bool pass_parser_result;
+ bool pass_frame_status;
+ bool pass_sw_opaque;
+ enum dpaa2_ni_queue_type queue_type;
+};
+
+/**
+ * @brief Buffer pools configuration for a network interface.
+ */
+struct dpaa2_ni_pools_cfg {
+ uint8_t pools_num;
+ struct {
+ uint32_t bp_obj_id;
+ uint16_t buf_sz;
+ int backup_flag; /* 0 - regular pool, 1 - backup pool */
+ } pools[DPAA2_NI_MAX_POOLS];
+};
+
+/**
+ * @brief Errors behavior configuration for a network interface.
+ *
+ * err_mask: The errors mask to configure.
+ * action: Desired action for the errors selected in the mask.
+ * set_err_fas: Set to true to mark the errors in frame annotation
+ * status (FAS); relevant for non-discard actions only.
+ */
+struct dpaa2_ni_err_cfg {
+ uint32_t err_mask;
+ enum dpaa2_ni_err_action action;
+ bool set_err_fas;
+};
+
+/**
+ * @brief Link configuration.
+ *
+ * options: Mask of available options.
+ * adv_speeds: Speeds that are advertised for autoneg.
+ * rate: Rate in Mbps.
+ */
+struct dpaa2_ni_link_cfg {
+ uint64_t options;
+ uint64_t adv_speeds;
+ uint32_t rate;
+};
+
+/**
+ * @brief Link state.
+ *
+ * options: Mask of available options.
+ * adv_speeds: Speeds that are advertised for autoneg.
+ * sup_speeds: Speeds capability of the PHY.
+ * rate: Rate in Mbps.
+ * link_up: Link state (true if link is up, false otherwise).
+ * state_valid: Ignore/Update the state of the link.
+ */
+struct dpaa2_ni_link_state {
+ uint64_t options;
+ uint64_t adv_speeds;
+ uint64_t sup_speeds;
+ uint32_t rate;
+ bool link_up;
+ bool state_valid;
+};
+
+/**
+ * @brief QoS table configuration.
+ *
+ * kcfg_busaddr: Address of the buffer in I/O virtual address space which
+ * holds the QoS table key configuration.
+ * default_tc: Default traffic class to use in case of a lookup miss in
+ * the QoS table.
+ * discard_on_miss: Set to true to discard frames in case of no match.
+ * Default traffic class will be used otherwise.
+ * keep_entries: Set to true to keep existing QoS table entries. This
+ * option will work properly only for DPNI objects created
+ * with DPNI_OPT_HAS_KEY_MASKING option.
+ */
+struct dpaa2_ni_qos_table {
+ uint64_t kcfg_busaddr;
+ uint8_t default_tc;
+ bool discard_on_miss;
+ bool keep_entries;
+};
+
+/**
+ * @brief Context to add multicast physical addresses to the filter table.
+ *
+ * ifp: Network interface associated with the context.
+ * error: Result of the last MC command.
+ * nent: Number of entries added.
+ */
+struct dpaa2_ni_mcaddr_ctx {
+ struct ifnet *ifp;
+ int error;
+ int nent;
+};
+
+struct dpaa2_eth_dist_fields {
+ uint64_t rxnfc_field;
+ enum net_prot cls_prot;
+ int cls_field;
+ int size;
+ uint64_t id;
+};
+
+struct dpni_mask_cfg {
+ uint8_t mask;
+ uint8_t offset;
+} __packed;
+
+struct dpni_dist_extract {
+ uint8_t prot;
+ uint8_t efh_type; /* EFH type is in the 4 LSBs. */
+ uint8_t size;
+ uint8_t offset;
+ uint32_t field;
+ uint8_t hdr_index;
+ uint8_t constant;
+ uint8_t num_of_repeats;
+ uint8_t num_of_byte_masks;
+ uint8_t extract_type; /* Extraction type is in the 4 LSBs */
+ uint8_t _reserved[3];
+ struct dpni_mask_cfg masks[4];
+} __packed;
+
+struct dpni_ext_set_rx_tc_dist {
+ uint8_t num_extracts;
+ uint8_t _reserved[7];
+ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+} __packed;
+
+/**
+ * @brief Software context for the DPAA2 Network Interface driver.
+ */
+struct dpaa2_ni_softc {
+ device_t dev;
+ struct resource *res[DPAA2_NI_MAX_RESOURCES];
+ uint16_t api_major;
+ uint16_t api_minor;
+ uint64_t rx_hash_fields;
+ uint16_t tx_data_off;
+ uint16_t tx_qdid;
+ uint32_t link_options;
+ int link_state;
+
+ uint16_t buf_align;
+ uint16_t buf_sz;
+
+ /* For debug purposes only! */
+ uint64_t rx_anomaly_frames;
+ uint64_t rx_single_buf_frames;
+ uint64_t rx_sg_buf_frames;
+ uint64_t rx_enq_rej_frames;
+ uint64_t rx_ieoi_err_frames;
+ uint64_t tx_single_buf_frames;
+ uint64_t tx_sg_frames;
+
+ /* Attributes of the DPAA2 network interface. */
+ struct dpaa2_ni_attr attr;
+
+ /* Helps to send commands to MC. */
+ struct dpaa2_cmd *cmd;
+ uint16_t rc_token;
+ uint16_t ni_token;
+
+ /* For network interface and miibus. */
+ struct ifnet *ifp;
+ uint32_t if_flags;
+ struct mtx lock;
+ device_t miibus;
+ struct mii_data *mii;
+ boolean_t fixed_link;
+ struct ifmedia fixed_ifmedia;
+ int media_status;
+
+ /* DMA resources */
+ bus_dma_tag_t bp_dmat; /* for buffer pool */
+ bus_dma_tag_t tx_dmat; /* for Tx buffers */
+ bus_dma_tag_t st_dmat; /* for channel storage */
+ bus_dma_tag_t rxd_dmat; /* for Rx distribution key */
+ bus_dma_tag_t qos_dmat; /* for QoS table key */
+ bus_dma_tag_t sgt_dmat; /* for scatter/gather tables */
+
+ struct dpaa2_buf qos_kcfg; /* QoS table key config. */
+ struct dpaa2_buf rxd_kcfg; /* Rx distribution key config. */
+
+ /* Channels and RxError frame queue */
+ uint32_t chan_n;
+ struct dpaa2_ni_channel *channels[DPAA2_NI_MAX_CHANNELS];
+ struct dpaa2_ni_fq rxe_queue; /* one per network interface */
+
+ /* Rx buffers for buffer pool. */
+ struct dpaa2_atomic buf_num;
+ struct dpaa2_atomic buf_free; /* for sysctl(9) only */
+ struct dpaa2_buf buf[DPAA2_NI_BUFS_MAX];
+
+ /* Interrupts */
+ int irq_rid[DPAA2_NI_MSI_COUNT];
+ struct resource *irq_res;
+ void *intr; /* interrupt handle */
+
+ /* Tasks */
+ struct taskqueue *bp_taskq;
+ struct task bp_task;
+
+ /* Callouts */
+ struct callout mii_callout;
+
+ struct {
+ uint32_t dpmac_id;
+ uint8_t addr[ETHER_ADDR_LEN];
+ device_t phy_dev;
+ int phy_loc;
+ } mac; /* Info about connected DPMAC (if exists). */
+};
+
+extern struct resource_spec dpaa2_ni_spec[];
+
+#endif /* _DPAA2_NI_H */
diff --git a/sys/dev/dpaa2/dpaa2_ni_dpkg.h b/sys/dev/dpaa2/dpaa2_ni_dpkg.h
new file mode 100644
index 000000000000..209486a1ce98
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_ni_dpkg.h
@@ -0,0 +1,536 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause
+ *
+ * Copyright © 2013-2015 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Original source file obtained from:
+ * drivers/net/ethernet/freescale/dpaa2/dpkg.h
+ *
+ * Commit: 4c86114194e644b6da9107d75910635c9e87179e
+ * Repository: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
+ */
+
+/*
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_NI_DPKG_H
+#define _DPAA2_NI_DPKG_H
+
+#define BIT(x) (1ul << (x))
+
+/**
+ * DPKG_NUM_OF_MASKS - Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+
+/**
+ * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ uint8_t mask;
+ uint8_t offset;
+};
+
+/* Protocol fields */
+
+/* Ethernet fields */
+#define NH_FLD_ETH_DA BIT(0)
+#define NH_FLD_ETH_SA BIT(1)
+#define NH_FLD_ETH_LENGTH BIT(2)
+#define NH_FLD_ETH_TYPE BIT(3)
+#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
+#define NH_FLD_ETH_PADDING BIT(5)
+#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
+
+/* VLAN fields */
+#define NH_FLD_VLAN_VPRI BIT(0)
+#define NH_FLD_VLAN_CFI BIT(1)
+#define NH_FLD_VLAN_VID BIT(2)
+#define NH_FLD_VLAN_LENGTH BIT(3)
+#define NH_FLD_VLAN_TYPE BIT(4)
+#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/* IP (generic) fields */
+#define NH_FLD_IP_VER BIT(0)
+#define NH_FLD_IP_DSCP BIT(2)
+#define NH_FLD_IP_ECN BIT(3)
+#define NH_FLD_IP_PROTO BIT(4)
+#define NH_FLD_IP_SRC BIT(5)
+#define NH_FLD_IP_DST BIT(6)
+#define NH_FLD_IP_TOS_TC BIT(7)
+#define NH_FLD_IP_ID BIT(8)
+#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
+
+/* IPV4 fields */
+#define NH_FLD_IPV4_VER BIT(0)
+#define NH_FLD_IPV4_HDR_LEN BIT(1)
+#define NH_FLD_IPV4_TOS BIT(2)
+#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
+#define NH_FLD_IPV4_ID BIT(4)
+#define NH_FLD_IPV4_FLAG_D BIT(5)
+#define NH_FLD_IPV4_FLAG_M BIT(6)
+#define NH_FLD_IPV4_OFFSET BIT(7)
+#define NH_FLD_IPV4_TTL BIT(8)
+#define NH_FLD_IPV4_PROTO BIT(9)
+#define NH_FLD_IPV4_CKSUM BIT(10)
+#define NH_FLD_IPV4_SRC_IP BIT(11)
+#define NH_FLD_IPV4_DST_IP BIT(12)
+#define NH_FLD_IPV4_OPTS BIT(13)
+#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
+#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
+
+/* IPV6 fields */
+#define NH_FLD_IPV6_VER BIT(0)
+#define NH_FLD_IPV6_TC BIT(1)
+#define NH_FLD_IPV6_SRC_IP BIT(2)
+#define NH_FLD_IPV6_DST_IP BIT(3)
+#define NH_FLD_IPV6_NEXT_HDR BIT(4)
+#define NH_FLD_IPV6_FL BIT(5)
+#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
+#define NH_FLD_IPV6_ID BIT(7)
+#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
+
+/* ICMP fields */
+#define NH_FLD_ICMP_TYPE BIT(0)
+#define NH_FLD_ICMP_CODE BIT(1)
+#define NH_FLD_ICMP_CKSUM BIT(2)
+#define NH_FLD_ICMP_ID BIT(3)
+#define NH_FLD_ICMP_SQ_NUM BIT(4)
+#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
+
+/* IGMP fields */
+#define NH_FLD_IGMP_VERSION BIT(0)
+#define NH_FLD_IGMP_TYPE BIT(1)
+#define NH_FLD_IGMP_CKSUM BIT(2)
+#define NH_FLD_IGMP_DATA BIT(3)
+#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
+
+/* TCP fields */
+#define NH_FLD_TCP_PORT_SRC BIT(0)
+#define NH_FLD_TCP_PORT_DST BIT(1)
+#define NH_FLD_TCP_SEQ BIT(2)
+#define NH_FLD_TCP_ACK BIT(3)
+#define NH_FLD_TCP_OFFSET BIT(4)
+#define NH_FLD_TCP_FLAGS BIT(5)
+#define NH_FLD_TCP_WINDOW BIT(6)
+#define NH_FLD_TCP_CKSUM BIT(7)
+#define NH_FLD_TCP_URGPTR BIT(8)
+#define NH_FLD_TCP_OPTS BIT(9)
+#define NH_FLD_TCP_OPTS_COUNT BIT(10)
+#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
+
+/* UDP fields */
+#define NH_FLD_UDP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_PORT_DST BIT(1)
+#define NH_FLD_UDP_LEN BIT(2)
+#define NH_FLD_UDP_CKSUM BIT(3)
+#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
+
+/* UDP-lite fields */
+#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
+#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
+
+/* UDP-encap-ESP fields */
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
+#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
+#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_PORT_SRC BIT(0)
+#define NH_FLD_SCTP_PORT_DST BIT(1)
+#define NH_FLD_SCTP_VER_TAG BIT(2)
+#define NH_FLD_SCTP_CKSUM BIT(3)
+#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
+
+/* DCCP fields */
+#define NH_FLD_DCCP_PORT_SRC BIT(0)
+#define NH_FLD_DCCP_PORT_DST BIT(1)
+#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
+
+/* IPHC fields */
+#define NH_FLD_IPHC_CID BIT(0)
+#define NH_FLD_IPHC_CID_TYPE BIT(1)
+#define NH_FLD_IPHC_HCINDEX BIT(2)
+#define NH_FLD_IPHC_GEN BIT(3)
+#define NH_FLD_IPHC_D_BIT BIT(4)
+#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
+
+/* SCTP fields */
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
+#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
+
+/* L2TPV2 fields */
+#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
+#define NH_FLD_L2TPV2_VERSION BIT(5)
+#define NH_FLD_L2TPV2_LEN BIT(6)
+#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
+#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
+#define NH_FLD_L2TPV2_NS BIT(9)
+#define NH_FLD_L2TPV2_NR BIT(10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
+#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
+#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
+
+/* L2TPV3 fields */
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
+#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
+#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
+#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
+#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
+#define NH_FLD_L2TPV3_SESS_ID BIT(2)
+#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
+
+/* PPP fields */
+#define NH_FLD_PPP_PID BIT(0)
+#define NH_FLD_PPP_COMPRESSED BIT(1)
+#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
+
+/* PPPoE fields */
+#define NH_FLD_PPPOE_VER BIT(0)
+#define NH_FLD_PPPOE_TYPE BIT(1)
+#define NH_FLD_PPPOE_CODE BIT(2)
+#define NH_FLD_PPPOE_SID BIT(3)
+#define NH_FLD_PPPOE_LEN BIT(4)
+#define NH_FLD_PPPOE_SESSION BIT(5)
+#define NH_FLD_PPPOE_PID BIT(6)
+#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
+
+/* PPP-Mux fields */
+#define NH_FLD_PPPMUX_PID BIT(0)
+#define NH_FLD_PPPMUX_CKSUM BIT(1)
+#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
+#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
+
+/* PPP-Mux sub-frame fields */
+#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
+#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
+#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
+
+/* LLC fields */
+#define NH_FLD_LLC_DSAP BIT(0)
+#define NH_FLD_LLC_SSAP BIT(1)
+#define NH_FLD_LLC_CTRL BIT(2)
+#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
+
+/* NLPID fields */
+#define NH_FLD_NLPID_NLPID BIT(0)
+#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
+
+/* SNAP fields */
+#define NH_FLD_SNAP_OUI BIT(0)
+#define NH_FLD_SNAP_PID BIT(1)
+#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
+
+/* LLC SNAP fields */
+#define NH_FLD_LLC_SNAP_TYPE BIT(0)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
+
+/* ARP fields */
+#define NH_FLD_ARP_HTYPE BIT(0)
+#define NH_FLD_ARP_PTYPE BIT(1)
+#define NH_FLD_ARP_HLEN BIT(2)
+#define NH_FLD_ARP_PLEN BIT(3)
+#define NH_FLD_ARP_OPER BIT(4)
+#define NH_FLD_ARP_SHA BIT(5)
+#define NH_FLD_ARP_SPA BIT(6)
+#define NH_FLD_ARP_THA BIT(7)
+#define NH_FLD_ARP_TPA BIT(8)
+#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
+
+/* RFC2684 fields */
+#define NH_FLD_RFC2684_LLC BIT(0)
+#define NH_FLD_RFC2684_NLPID BIT(1)
+#define NH_FLD_RFC2684_OUI BIT(2)
+#define NH_FLD_RFC2684_PID BIT(3)
+#define NH_FLD_RFC2684_VPN_OUI BIT(4)
+#define NH_FLD_RFC2684_VPN_IDX BIT(5)
+#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
+
+/* User defined fields */
+#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
+#define NH_FLD_USER_DEFINED_PCDID BIT(1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
+
+/* Payload fields */
+#define NH_FLD_PAYLOAD_BUFFER BIT(0)
+#define NH_FLD_PAYLOAD_SIZE BIT(1)
+#define NH_FLD_MAX_FRM_SIZE BIT(2)
+#define NH_FLD_MIN_FRM_SIZE BIT(3)
+#define NH_FLD_PAYLOAD_TYPE BIT(4)
+#define NH_FLD_FRAME_SIZE BIT(5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
+
+/* GRE fields */
+#define NH_FLD_GRE_TYPE BIT(0)
+#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
+
+/* MINENCAP fields */
+#define NH_FLD_MINENCAP_SRC_IP BIT(0)
+#define NH_FLD_MINENCAP_DST_IP BIT(1)
+#define NH_FLD_MINENCAP_TYPE BIT(2)
+#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
+
+/* IPSEC AH fields */
+#define NH_FLD_IPSEC_AH_SPI BIT(0)
+#define NH_FLD_IPSEC_AH_NH BIT(1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
+
+/* IPSEC ESP fields */
+#define NH_FLD_IPSEC_ESP_SPI BIT(0)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
+
+/* MPLS fields */
+#define NH_FLD_MPLS_LABEL_STACK BIT(0)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
+
+/* MACSEC fields */
+#define NH_FLD_MACSEC_SECTAG BIT(0)
+#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
+
+/* GTP fields */
+#define NH_FLD_GTP_TEID BIT(0)
+
+/* Supported protocols */
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @extract.from_hdr.prot: Any of the supported headers
+ * @extract.from_hdr.type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @extract.from_hdr.field: One of the supported fields (NH_FLD_)
+ * @extract.from_hdr.size: Size in bytes
+ * @extract.from_hdr.offset: Byte offset
+ * @extract.from_hdr.hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ * @extract.from_data.size: Size in bytes
+ * @extract.from_data.offset: Byte offset
+ * @extract.from_parse.size: Size in bytes
+ * @extract.from_parse.offset: Byte offset
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ union {
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ uint32_t field;
+ uint8_t size;
+ uint8_t offset;
+ uint8_t hdr_index;
+ } from_hdr;
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_data;
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_parse;
+ } extract;
+
+ uint8_t num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ uint8_t num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* _DPAA2_NI_DPKG_H */
diff --git a/sys/dev/dpaa2/dpaa2_rc.c b/sys/dev/dpaa2/dpaa2_rc.c
new file mode 100644
index 000000000000..f5d7bae92e04
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_rc.c
@@ -0,0 +1,3585 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The DPAA2 Resource Container (DPRC) bus driver.
+ *
+ * DPRC holds all the resources and object information that a software context
+ * (kernel, virtual machine, etc.) can access or use.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/lock.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_mcp.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_ni.h"
+#include "dpaa2_mc_if.h"
+#include "dpaa2_cmd_if.h"
+
+/* Timeouts to wait for a command response from MC. */
+#define CMD_SPIN_TIMEOUT 100u /* us */
+#define CMD_SPIN_ATTEMPTS 2000u /* max. 200 ms */
+
+#define TYPE_LEN_MAX 16u
+#define LABEL_LEN_MAX 16u
+
+MALLOC_DEFINE(M_DPAA2_RC, "dpaa2_rc", "DPAA2 Resource Container");
+
+/* Discover and add devices to the resource container. */
+static int dpaa2_rc_discover(struct dpaa2_rc_softc *);
+static int dpaa2_rc_add_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *,
+ struct dpaa2_obj *);
+static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *,
+ struct dpaa2_cmd *, struct dpaa2_obj *);
+
+/* Helper routines. */
+static int dpaa2_rc_enable_irq(struct dpaa2_mcp *, struct dpaa2_cmd *, uint8_t,
+ bool, uint16_t);
+static int dpaa2_rc_configure_irq(device_t, device_t, int, uint64_t, uint32_t);
+static int dpaa2_rc_add_res(device_t, device_t, enum dpaa2_dev_type, int *, int);
+static int dpaa2_rc_print_type(struct resource_list *, enum dpaa2_dev_type);
+static struct dpaa2_mcp *dpaa2_rc_select_portal(device_t, device_t);
+
+/* Routines to send commands to MC. */
+static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *, uint16_t);
+static int dpaa2_rc_send_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *);
+static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *);
+static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *);
+
+static int
+dpaa2_rc_probe(device_t dev)
+{
+ /* DPRC device will be added by the parent DPRC or MC bus itself. */
+ device_set_desc(dev, "DPAA2 Resource Container");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+dpaa2_rc_detach(device_t dev)
+{
+ struct dpaa2_devinfo *dinfo;
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error)
+ return (error);
+
+ dinfo = device_get_ivars(dev);
+
+ if (dinfo->portal)
+ dpaa2_mcp_free_portal(dinfo->portal);
+ if (dinfo)
+ free(dinfo, M_DPAA2_RC);
+
+ return (device_delete_children(dev));
+}
+
+static int
+dpaa2_rc_attach(device_t dev)
+{
+ device_t pdev;
+ struct dpaa2_mc_softc *mcsc;
+ struct dpaa2_rc_softc *sc;
+ struct dpaa2_devinfo *dinfo = NULL;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->unit = device_get_unit(dev);
+
+ if (sc->unit == 0) {
+ /* Root DPRC should be attached directly to the MC bus. */
+ pdev = device_get_parent(dev);
+ mcsc = device_get_softc(pdev);
+
+ KASSERT(strcmp(device_get_name(pdev), "dpaa2_mc") == 0,
+ ("root DPRC should be attached to the MC bus"));
+
+ /*
+ * Allocate devinfo to let the parent MC bus access ICID of the
+ * DPRC object.
+ */
+ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC,
+ M_WAITOK | M_ZERO);
+ if (!dinfo) {
+ device_printf(dev, "%s: failed to allocate "
+ "dpaa2_devinfo\n", __func__);
+ dpaa2_rc_detach(dev);
+ return (ENXIO);
+ }
+ device_set_ivars(dev, dinfo);
+
+ dinfo->pdev = pdev;
+ dinfo->dev = dev;
+ dinfo->dtype = DPAA2_DEV_RC;
+ dinfo->portal = NULL;
+
+ /* Prepare helper portal object to send commands to MC. */
+ error = dpaa2_mcp_init_portal(&dinfo->portal, mcsc->res[0],
+ &mcsc->map[0], DPAA2_PORTAL_DEF);
+ if (error) {
+ device_printf(dev, "%s: failed to initialize dpaa2_mcp: "
+ "error=%d\n", __func__, error);
+ dpaa2_rc_detach(dev);
+ return (ENXIO);
+ }
+ } else {
+ /* TODO: Child DPRCs aren't supported yet. */
+ return (ENXIO);
+ }
+
+ /* Create DPAA2 devices for objects in this container. */
+ error = dpaa2_rc_discover(sc);
+ if (error) {
+ device_printf(dev, "%s: failed to discover objects in "
+ "container: error=%d\n", __func__, error);
+ dpaa2_rc_detach(dev);
+ return (error);
+ }
+
+ return (0);
+}
+
+/*
+ * Bus interface.
+ */
+
+static struct resource_list *
+dpaa2_rc_get_resource_list(device_t rcdev, device_t child)
+{
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+
+ return (&dinfo->resources);
+}
+
+static void
+dpaa2_rc_delete_resource(device_t rcdev, device_t child, int type, int rid)
+{
+ struct resource_list *rl;
+ struct resource_list_entry *rle;
+ struct dpaa2_devinfo *dinfo;
+
+ if (device_get_parent(child) != rcdev)
+ return;
+
+ dinfo = device_get_ivars(child);
+ rl = &dinfo->resources;
+ rle = resource_list_find(rl, type, rid);
+ if (rle == NULL)
+ return;
+
+ if (rle->res) {
+ if (rman_get_flags(rle->res) & RF_ACTIVE ||
+ resource_list_busy(rl, type, rid)) {
+ device_printf(rcdev, "%s: resource still owned by "
+ "child: type=%d, rid=%d, start=%jx\n", __func__,
+ type, rid, rman_get_start(rle->res));
+ return;
+ }
+ resource_list_unreserve(rl, rcdev, child, type, rid);
+ }
+ resource_list_delete(rl, type, rid);
+}
+
+static struct resource *
+dpaa2_rc_alloc_multi_resource(device_t rcdev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ struct resource_list *rl;
+ struct dpaa2_devinfo *dinfo;
+
+ dinfo = device_get_ivars(child);
+ rl = &dinfo->resources;
+
+ /*
+ * By default, software portal interrupts are message-based, that is,
+ * they are issued from QMan using a 4 byte write.
+ *
+ * TODO: However this default behavior can be changed by programming one
+ * or more software portals to issue their interrupts via a
+ * dedicated software portal interrupt wire.
+ * See registers SWP_INTW0_CFG to SWP_INTW3_CFG for details.
+ */
+ if (type == SYS_RES_IRQ && *rid == 0)
+ return (NULL);
+
+ return (resource_list_alloc(rl, rcdev, child, type, rid,
+ start, end, count, flags));
+}
+
+static struct resource *
+dpaa2_rc_alloc_resource(device_t rcdev, device_t child, int type, int *rid,
+ rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
+{
+ if (device_get_parent(child) != rcdev)
+ return (BUS_ALLOC_RESOURCE(device_get_parent(rcdev), child,
+ type, rid, start, end, count, flags));
+
+ return (dpaa2_rc_alloc_multi_resource(rcdev, child, type, rid, start,
+ end, count, flags));
+}
+
+static int
+dpaa2_rc_release_resource(device_t rcdev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ struct resource_list *rl;
+ struct dpaa2_devinfo *dinfo;
+
+ if (device_get_parent(child) != rcdev)
+ return (BUS_RELEASE_RESOURCE(device_get_parent(rcdev), child,
+ type, rid, r));
+
+ dinfo = device_get_ivars(child);
+ rl = &dinfo->resources;
+ return (resource_list_release(rl, rcdev, child, type, rid, r));
+}
+
+static void
+dpaa2_rc_child_deleted(device_t rcdev, device_t child)
+{
+ struct dpaa2_devinfo *dinfo;
+ struct resource_list *rl;
+ struct resource_list_entry *rle;
+
+ dinfo = device_get_ivars(child);
+ rl = &dinfo->resources;
+
+ /* Free all allocated resources */
+ STAILQ_FOREACH(rle, rl, link) {
+ if (rle->res) {
+ if (rman_get_flags(rle->res) & RF_ACTIVE ||
+ resource_list_busy(rl, rle->type, rle->rid)) {
+ device_printf(child, "%s: resource still owned: "
+ "type=%d, rid=%d, addr=%lx\n", __func__,
+ rle->type, rle->rid,
+ rman_get_start(rle->res));
+ bus_release_resource(child, rle->type, rle->rid,
+ rle->res);
+ }
+ resource_list_unreserve(rl, rcdev, child, rle->type,
+ rle->rid);
+ }
+ }
+ resource_list_free(rl);
+
+ if (dinfo)
+ free(dinfo, M_DPAA2_RC);
+}
+
+static void
+dpaa2_rc_child_detached(device_t rcdev, device_t child)
+{
+ struct dpaa2_devinfo *dinfo;
+ struct resource_list *rl;
+
+ dinfo = device_get_ivars(child);
+ rl = &dinfo->resources;
+
+ if (resource_list_release_active(rl, rcdev, child, SYS_RES_IRQ) != 0)
+ device_printf(child, "%s: leaked IRQ resources!\n", __func__);
+ if (dinfo->msi.msi_alloc != 0) {
+ device_printf(child, "%s: leaked %d MSI vectors!\n", __func__,
+ dinfo->msi.msi_alloc);
+ PCI_RELEASE_MSI(rcdev, child);
+ }
+ if (resource_list_release_active(rl, rcdev, child, SYS_RES_MEMORY) != 0)
+ device_printf(child, "%s: leaked memory resources!\n", __func__);
+}
+
+static int
+dpaa2_rc_setup_intr(device_t rcdev, device_t child, struct resource *irq,
+ int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
+ void **cookiep)
+{
+ struct dpaa2_devinfo *dinfo;
+ uint64_t addr;
+ uint32_t data;
+ void *cookie;
+ int error, rid;
+
+ error = bus_generic_setup_intr(rcdev, child, irq, flags, filter, intr,
+ arg, &cookie);
+ if (error) {
+ device_printf(rcdev, "%s: bus_generic_setup_intr() failed: "
+ "error=%d\n", __func__, error);
+ return (error);
+ }
+
+ /* If this is not a direct child, just bail out. */
+ if (device_get_parent(child) != rcdev) {
+ *cookiep = cookie;
+ return (0);
+ }
+
+ rid = rman_get_rid(irq);
+ if (rid == 0) {
+ if (bootverbose)
+ device_printf(rcdev, "%s: cannot setup interrupt with "
+ "rid=0: INTx are not supported by DPAA2 objects "
+ "yet\n", __func__);
+ return (EINVAL);
+ } else {
+ dinfo = device_get_ivars(child);
+ KASSERT(dinfo->msi.msi_alloc > 0,
+ ("No MSI interrupts allocated"));
+
+ /*
+ * Ask our parent to map the MSI and give us the address and
+ * data register values. If we fail for some reason, teardown
+ * the interrupt handler.
+ */
+ error = PCIB_MAP_MSI(device_get_parent(rcdev), child,
+ rman_get_start(irq), &addr, &data);
+ if (error) {
+ device_printf(rcdev, "%s: PCIB_MAP_MSI failed: "
+ "error=%d\n", __func__, error);
+ (void)bus_generic_teardown_intr(rcdev, child, irq,
+ cookie);
+ return (error);
+ }
+
+ /* Configure MSI for this DPAA2 object. */
+ error = dpaa2_rc_configure_irq(rcdev, child, rid, addr, data);
+ if (error) {
+ device_printf(rcdev, "%s: failed to configure IRQ for "
+ "DPAA2 object: rid=%d, type=%s, unit=%d\n", __func__,
+ rid, dpaa2_ttos(dinfo->dtype),
+ device_get_unit(child));
+ return (error);
+ }
+ dinfo->msi.msi_handlers++;
+ }
+ *cookiep = cookie;
+ return (0);
+}
+
+static int
+dpaa2_rc_teardown_intr(device_t rcdev, device_t child, struct resource *irq,
+ void *cookie)
+{
+ struct resource_list_entry *rle;
+ struct dpaa2_devinfo *dinfo;
+ int error, rid;
+
+ if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
+ return (EINVAL);
+
+ /* If this isn't a direct child, just bail out */
+ if (device_get_parent(child) != rcdev)
+ return(bus_generic_teardown_intr(rcdev, child, irq, cookie));
+
+ rid = rman_get_rid(irq);
+ if (rid == 0) {
+ if (bootverbose)
+ device_printf(rcdev, "%s: cannot teardown interrupt "
+ "with rid=0: INTx are not supported by DPAA2 "
+ "objects yet\n", __func__);
+ return (EINVAL);
+ } else {
+ dinfo = device_get_ivars(child);
+ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
+ if (rle->res != irq)
+ return (EINVAL);
+ dinfo->msi.msi_handlers--;
+ }
+
+ error = bus_generic_teardown_intr(rcdev, child, irq, cookie);
+ if (rid > 0)
+ KASSERT(error == 0,
+ ("%s: generic teardown failed for MSI", __func__));
+ return (error);
+}
+
+static int
+dpaa2_rc_print_child(device_t rcdev, device_t child)
+{
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+ struct resource_list *rl = &dinfo->resources;
+ int retval = 0;
+
+ retval += bus_print_child_header(rcdev, child);
+
+ retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
+ retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx");
+ retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
+
+ /* Print DPAA2-specific resources. */
+ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_IO);
+ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_BP);
+ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_CON);
+ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_MCP);
+
+ retval += printf(" at %s (id=%u)", dpaa2_ttos(dinfo->dtype), dinfo->id);
+
+ retval += bus_print_child_domain(rcdev, child);
+ retval += bus_print_child_footer(rcdev, child);
+
+ return (retval);
+}
+
+/*
+ * Pseudo-PCI interface.
+ */
+
+/*
+ * Attempt to allocate *count MSI messages. The actual number allocated is
+ * returned in *count. After this function returns, each message will be
+ * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
+ *
+ * NOTE: Implementation is similar to sys/dev/pci/pci.c.
+ */
+static int
+dpaa2_rc_alloc_msi(device_t rcdev, device_t child, int *count)
+{
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+ int error, actual, i, run, irqs[32];
+
+ /* Don't let count == 0 get us into trouble. */
+ if (*count == 0)
+ return (EINVAL);
+
+ /* MSI should be allocated by the resource container. */
+ if (rcinfo->dtype != DPAA2_DEV_RC)
+ return (ENODEV);
+
+ /* Already have allocated messages? */
+ if (dinfo->msi.msi_alloc != 0)
+ return (ENXIO);
+
+ /* Don't ask for more than the device supports. */
+ actual = min(*count, dinfo->msi.msi_msgnum);
+
+ /* Don't ask for more than 32 messages. */
+ actual = min(actual, 32);
+
+ /* MSI requires power of 2 number of messages. */
+ if (!powerof2(actual))
+ return (EINVAL);
+
+ for (;;) {
+ /* Try to allocate N messages. */
+ error = PCIB_ALLOC_MSI(device_get_parent(rcdev), child, actual,
+ actual, irqs);
+ if (error == 0)
+ break;
+ if (actual == 1)
+ return (error);
+
+ /* Try N / 2. */
+ actual >>= 1;
+ }
+
+ /*
+ * We now have N actual messages mapped onto SYS_RES_IRQ resources in
+ * the irqs[] array, so add new resources starting at rid 1.
+ */
+ for (i = 0; i < actual; i++)
+ resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
+ irqs[i], irqs[i], 1);
+
+ if (bootverbose) {
+ if (actual == 1) {
+ device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
+ } else {
+ /*
+ * Be fancy and try to print contiguous runs
+ * of IRQ values as ranges. 'run' is true if
+ * we are in a range.
+ */
+ device_printf(child, "using IRQs %d", irqs[0]);
+ run = 0;
+ for (i = 1; i < actual; i++) {
+ /* Still in a run? */
+ if (irqs[i] == irqs[i - 1] + 1) {
+ run = 1;
+ continue;
+ }
+
+ /* Finish previous range. */
+ if (run) {
+ printf("-%d", irqs[i - 1]);
+ run = 0;
+ }
+
+ /* Start new range. */
+ printf(",%d", irqs[i]);
+ }
+
+ /* Unfinished range? */
+ if (run)
+ printf("-%d", irqs[actual - 1]);
+ printf(" for MSI\n");
+ }
+ }
+
+ /* Update counts of alloc'd messages. */
+ dinfo->msi.msi_alloc = actual;
+ dinfo->msi.msi_handlers = 0;
+ *count = actual;
+ return (0);
+}
+
+/*
+ * Release the MSI messages associated with this DPAA2 device.
+ *
+ * NOTE: Implementation is similar to sys/dev/pci/pci.c.
+ */
+static int
+dpaa2_rc_release_msi(device_t rcdev, device_t child)
+{
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev);
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+ struct resource_list_entry *rle;
+ int i, irqs[32];
+
+ /* MSI should be released by the resource container. */
+ if (rcinfo->dtype != DPAA2_DEV_RC)
+ return (ENODEV);
+
+ /* Do we have any messages to release? */
+ if (dinfo->msi.msi_alloc == 0)
+ return (ENODEV);
+ KASSERT(dinfo->msi.msi_alloc <= 32,
+ ("more than 32 alloc'd MSI messages"));
+
+ /* Make sure none of the resources are allocated. */
+ if (dinfo->msi.msi_handlers > 0)
+ return (EBUSY);
+ for (i = 0; i < dinfo->msi.msi_alloc; i++) {
+ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
+ KASSERT(rle != NULL, ("missing MSI resource"));
+ if (rle->res != NULL)
+ return (EBUSY);
+ irqs[i] = rle->start;
+ }
+
+ /* Release the messages. */
+ PCIB_RELEASE_MSI(device_get_parent(rcdev), child, dinfo->msi.msi_alloc,
+ irqs);
+ for (i = 0; i < dinfo->msi.msi_alloc; i++)
+ resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
+
+ /* Update alloc count. */
+ dinfo->msi.msi_alloc = 0;
+ return (0);
+}
+
+/**
+ * @brief Return the maximum number of the MSI supported by this DPAA2 device.
+ */
+static int
+dpaa2_rc_msi_count(device_t rcdev, device_t child)
+{
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+
+ return (dinfo->msi.msi_msgnum);
+}
+
+static int
+dpaa2_rc_get_id(device_t rcdev, device_t child, enum pci_id_type type,
+ uintptr_t *id)
+{
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev);
+
+ if (rcinfo->dtype != DPAA2_DEV_RC)
+ return (ENODEV);
+
+ return (PCIB_GET_ID(device_get_parent(rcdev), child, type, id));
+}
+
+/*
+ * DPAA2 MC command interface.
+ */
+
+static int
+dpaa2_rc_mng_get_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *major, uint32_t *minor, uint32_t *rev)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || major == NULL || minor == NULL ||
+ rev == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_VER);
+ if (!error) {
+ *major = cmd->params[0] >> 32;
+ *minor = cmd->params[1] & 0xFFFFFFFF;
+ *rev = cmd->params[0] & 0xFFFFFFFF;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mng_get_soc_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *pvr, uint32_t *svr)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || pvr == NULL || svr == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_SOC_VER);
+ if (!error) {
+ *pvr = cmd->params[0] >> 32;
+ *svr = cmd->params[0] & 0xFFFFFFFF;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mng_get_container_id(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint32_t *cont_id)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || cont_id == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_CONT_ID);
+ if (!error)
+ *cont_id = cmd->params[0] & 0xFFFFFFFF;
+
+ return (error);
+}
+
+static int
+dpaa2_rc_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t cont_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = cont_id;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_CLOSE));
+}
+
+static int
+dpaa2_rc_get_obj_count(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t *obj_count)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || obj_count == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_COUNT);
+ if (!error)
+ *obj_count = (uint32_t)(cmd->params[0] >> 32);
+
+ return (error);
+}
+
+static int
+dpaa2_rc_get_obj(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t obj_idx, struct dpaa2_obj *obj)
+{
+ struct __packed dpaa2_obj_resp {
+ uint32_t _reserved1;
+ uint32_t id;
+ uint16_t vendor;
+ uint8_t irq_count;
+ uint8_t reg_count;
+ uint32_t state;
+ uint16_t ver_major;
+ uint16_t ver_minor;
+ uint16_t flags;
+ uint16_t _reserved2;
+ uint8_t type[16];
+ uint8_t label[16];
+ } *pobj;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || obj == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = obj_idx;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ);
+ if (!error) {
+ pobj = (struct dpaa2_obj_resp *) &cmd->params[0];
+ obj->id = pobj->id;
+ obj->vendor = pobj->vendor;
+ obj->irq_count = pobj->irq_count;
+ obj->reg_count = pobj->reg_count;
+ obj->state = pobj->state;
+ obj->ver_major = pobj->ver_major;
+ obj->ver_minor = pobj->ver_minor;
+ obj->flags = pobj->flags;
+ obj->type = dpaa2_stot((const char *) pobj->type);
+ memcpy(obj->label, pobj->label, sizeof(pobj->label));
+ }
+
+ /* Some DPAA2 objects might not be supported by the driver yet. */
+ if (obj->type == DPAA2_DEV_NOTYPE)
+ error = DPAA2_CMD_STAT_UNKNOWN_OBJ;
+
+ return (error);
+}
+
+static int
+dpaa2_rc_get_obj_descriptor(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint32_t obj_id, enum dpaa2_dev_type dtype,
+ struct dpaa2_obj *obj)
+{
+ struct __packed get_obj_desc_args {
+ uint32_t obj_id;
+ uint32_t _reserved1;
+ uint8_t type[16];
+ } *args;
+ struct __packed dpaa2_obj_resp {
+ uint32_t _reserved1;
+ uint32_t id;
+ uint16_t vendor;
+ uint8_t irq_count;
+ uint8_t reg_count;
+ uint32_t state;
+ uint16_t ver_major;
+ uint16_t ver_minor;
+ uint16_t flags;
+ uint16_t _reserved2;
+ uint8_t type[16];
+ uint8_t label[16];
+ } *pobj;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ const char *type = dpaa2_ttos(dtype);
+ int error;
+
+ if (portal == NULL || cmd == NULL || obj == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct get_obj_desc_args *) &cmd->params[0];
+ args->obj_id = obj_id;
+ memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX));
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_DESC);
+ if (!error) {
+ pobj = (struct dpaa2_obj_resp *) &cmd->params[0];
+ obj->id = pobj->id;
+ obj->vendor = pobj->vendor;
+ obj->irq_count = pobj->irq_count;
+ obj->reg_count = pobj->reg_count;
+ obj->state = pobj->state;
+ obj->ver_major = pobj->ver_major;
+ obj->ver_minor = pobj->ver_minor;
+ obj->flags = pobj->flags;
+ obj->type = dpaa2_stot((const char *) pobj->type);
+ memcpy(obj->label, pobj->label, sizeof(pobj->label));
+ }
+
+ /* Some DPAA2 objects might not be supported by the driver yet. */
+ if (obj->type == DPAA2_DEV_NOTYPE)
+ error = DPAA2_CMD_STAT_UNKNOWN_OBJ;
+
+ return (error);
+}
+
+static int
+dpaa2_rc_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_rc_attr *attr)
+{
+ struct __packed dpaa2_rc_attr {
+ uint32_t cont_id;
+ uint32_t icid;
+ uint32_t options;
+ uint32_t portal_id;
+ } *pattr;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || attr == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_ATTR);
+ if (!error) {
+ pattr = (struct dpaa2_rc_attr *) &cmd->params[0];
+ attr->cont_id = pattr->cont_id;
+ attr->portal_id = pattr->portal_id;
+ attr->options = pattr->options;
+ attr->icid = pattr->icid;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_get_obj_region(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t obj_id, uint8_t reg_idx, enum dpaa2_dev_type dtype,
+ struct dpaa2_rc_obj_region *reg)
+{
+ struct __packed obj_region_args {
+ uint32_t obj_id;
+ uint16_t _reserved1;
+ uint8_t reg_idx;
+ uint8_t _reserved2;
+ uint64_t _reserved3;
+ uint64_t _reserved4;
+ uint8_t type[16];
+ } *args;
+ struct __packed obj_region {
+ uint64_t _reserved1;
+ uint64_t base_offset;
+ uint32_t size;
+ uint32_t type;
+ uint32_t flags;
+ uint32_t _reserved2;
+ uint64_t base_paddr;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ uint16_t cmdid, api_major, api_minor;
+ const char *type = dpaa2_ttos(dtype);
+ int error;
+
+ if (portal == NULL || cmd == NULL || reg == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!portal->rc_api_major && !portal->rc_api_minor) {
+ error = DPAA2_CMD_RC_GET_API_VERSION(dev, child, cmd,
+ &api_major, &api_minor);
+ if (error)
+ return (error);
+ portal->rc_api_major = api_major;
+ portal->rc_api_minor = api_minor;
+ } else {
+ api_major = portal->rc_api_major;
+ api_minor = portal->rc_api_minor;
+ }
+
+ /* TODO: Remove magic numbers. */
+ if (api_major > 6u || (api_major == 6u && api_minor >= 6u))
+ /*
+ * MC API version 6.6 changed the size of the MC portals and
+ * software portals to 64K (as implemented by hardware).
+ */
+ cmdid = CMDID_RC_GET_OBJ_REG_V3;
+ else if (api_major == 6u && api_minor >= 3u)
+ /*
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address.
+ */
+ cmdid = CMDID_RC_GET_OBJ_REG_V2;
+ else
+ cmdid = CMDID_RC_GET_OBJ_REG;
+
+ args = (struct obj_region_args *) &cmd->params[0];
+ args->obj_id = obj_id;
+ args->reg_idx = reg_idx;
+ memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX));
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, cmdid);
+ if (!error) {
+ resp = (struct obj_region *) &cmd->params[0];
+ reg->base_paddr = resp->base_paddr;
+ reg->base_offset = resp->base_offset;
+ reg->size = resp->size;
+ reg->flags = resp->flags;
+ reg->type = resp->type & 0xFu;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t *major, uint16_t *minor)
+{
+ struct __packed rc_api_version {
+ uint16_t major;
+ uint16_t minor;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || major == NULL || minor == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_API_VERSION);
+ if (!error) {
+ resp = (struct rc_api_version *) &cmd->params[0];
+ *major = resp->major;
+ *minor = resp->minor;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint8_t enable)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_enable_irq(portal, cmd, irq_idx, enable,
+ CMDID_RC_SET_IRQ_ENABLE));
+}
+
+static int
+dpaa2_rc_set_obj_irq(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint64_t addr, uint32_t data, uint32_t irq_usr,
+ uint32_t obj_id, enum dpaa2_dev_type dtype)
+{
+ struct __packed set_obj_irq_args {
+ uint32_t data;
+ uint8_t irq_idx;
+ uint8_t _reserved1[3];
+ uint64_t addr;
+ uint32_t irq_usr;
+ uint32_t obj_id;
+ uint8_t type[16];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ const char *type = dpaa2_ttos(dtype);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct set_obj_irq_args *) &cmd->params[0];
+ args->irq_idx = irq_idx;
+ args->addr = addr;
+ args->data = data;
+ args->irq_usr = irq_usr;
+ args->obj_id = obj_id;
+ memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX));
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_SET_OBJ_IRQ));
+}
+
+static int
+dpaa2_rc_get_conn(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ep_desc *ep1_desc, struct dpaa2_ep_desc *ep2_desc,
+ uint32_t *link_stat)
+{
+ struct __packed get_conn_args {
+ uint32_t ep1_id;
+ uint32_t ep1_ifid;
+ uint8_t ep1_type[16];
+ uint64_t _reserved[4];
+ } *args;
+ struct __packed get_conn_resp {
+ uint64_t _reserved1[3];
+ uint32_t ep2_id;
+ uint32_t ep2_ifid;
+ uint8_t ep2_type[16];
+ uint32_t link_stat;
+ uint32_t _reserved2;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || ep1_desc == NULL ||
+ ep2_desc == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct get_conn_args *) &cmd->params[0];
+ args->ep1_id = ep1_desc->obj_id;
+ args->ep1_ifid = ep1_desc->if_id;
+ /* TODO: Remove magic number. */
+ strncpy(args->ep1_type, dpaa2_ttos(ep1_desc->type), 16);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_CONN);
+ if (!error) {
+ resp = (struct get_conn_resp *) &cmd->params[0];
+ ep2_desc->obj_id = resp->ep2_id;
+ ep2_desc->if_id = resp->ep2_ifid;
+ ep2_desc->type = dpaa2_stot((const char *) resp->ep2_type);
+ if (link_stat != NULL)
+ *link_stat = resp->link_stat;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpni_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = dpni_id;
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLOSE));
+}
+
+static int
+dpaa2_rc_ni_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ENABLE));
+}
+
+static int
+dpaa2_rc_ni_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_DISABLE));
+}
+
+static int
+dpaa2_rc_ni_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t *major, uint16_t *minor)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || major == NULL || minor == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_API_VER);
+ if (!error) {
+ *major = cmd->params[0] & 0xFFFFU;
+ *minor = (cmd->params[0] >> 16) & 0xFFFFU;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_RESET));
+}
+
+static int
+dpaa2_rc_ni_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_attr *attr)
+{
+ struct __packed ni_attr {
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_rx_tcs;
+ uint8_t mac_entries;
+ uint8_t num_tx_tcs;
+ uint8_t vlan_entries;
+ uint8_t num_channels;
+ uint8_t qos_entries;
+ uint8_t _reserved1;
+ uint16_t fs_entries;
+ uint16_t _reserved2;
+ uint8_t qos_key_size;
+ uint8_t fs_key_size;
+ uint16_t wriop_ver;
+ uint8_t num_cgs;
+ uint8_t _reserved3;
+ uint16_t _reserved4;
+ uint64_t _reserved5[4];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || attr == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_ATTR);
+ if (!error) {
+ resp = (struct ni_attr *) &cmd->params[0];
+
+ attr->options = resp->options;
+ attr->wriop_ver = resp->wriop_ver;
+
+ attr->entries.fs = resp->fs_entries;
+ attr->entries.mac = resp->mac_entries;
+ attr->entries.vlan = resp->vlan_entries;
+ attr->entries.qos = resp->qos_entries;
+
+ attr->num.queues = resp->num_queues;
+ attr->num.rx_tcs = resp->num_rx_tcs;
+ attr->num.tx_tcs = resp->num_tx_tcs;
+ attr->num.channels = resp->num_channels;
+ attr->num.cgs = resp->num_cgs;
+
+ attr->key_size.fs = resp->fs_key_size;
+ attr->key_size.qos = resp->qos_key_size;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_buf_layout(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_buf_layout *bl)
+{
+ struct __packed set_buf_layout_args {
+ uint8_t queue_type;
+ uint8_t _reserved1;
+ uint16_t _reserved2;
+ uint16_t options;
+ uint8_t params;
+ uint8_t _reserved3;
+ uint16_t priv_data_size;
+ uint16_t data_align;
+ uint16_t head_room;
+ uint16_t tail_room;
+ uint64_t _reserved4[5];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || bl == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct set_buf_layout_args *) &cmd->params[0];
+ args->queue_type = (uint8_t) bl->queue_type;
+ args->options = bl->options;
+ args->params = 0;
+ args->priv_data_size = bl->pd_size;
+ args->data_align = bl->fd_align;
+ args->head_room = bl->head_size;
+ args->tail_room = bl->tail_size;
+
+ args->params |= bl->pass_timestamp ? 1U : 0U;
+ args->params |= bl->pass_parser_result ? 2U : 0U;
+ args->params |= bl->pass_frame_status ? 4U : 0U;
+ args->params |= bl->pass_sw_opaque ? 8U : 0U;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_BUF_LAYOUT));
+}
+
+static int
+dpaa2_rc_ni_get_tx_data_offset(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint16_t *offset)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || offset == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_TX_DATA_OFF);
+ if (!error)
+ *offset = cmd->params[0] & 0xFFFFU;
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_get_port_mac_addr(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint8_t *mac)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || mac == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PORT_MAC_ADDR);
+ if (!error) {
+ mac[0] = (cmd->params[0] >> 56) & 0xFFU;
+ mac[1] = (cmd->params[0] >> 48) & 0xFFU;
+ mac[2] = (cmd->params[0] >> 40) & 0xFFU;
+ mac[3] = (cmd->params[0] >> 32) & 0xFFU;
+ mac[4] = (cmd->params[0] >> 24) & 0xFFU;
+ mac[5] = (cmd->params[0] >> 16) & 0xFFU;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_prim_mac_addr(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint8_t *mac)
+{
+ struct __packed set_prim_mac_args {
+ uint8_t _reserved[2];
+ uint8_t mac[ETHER_ADDR_LEN];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || mac == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ args = (struct set_prim_mac_args *) &cmd->params[0];
+ for (int i = 1; i <= ETHER_ADDR_LEN; i++)
+ args->mac[i - 1] = mac[ETHER_ADDR_LEN - i];
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_PRIM_MAC_ADDR));
+}
+
+static int
+dpaa2_rc_ni_get_prim_mac_addr(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint8_t *mac)
+{
+ struct __packed get_prim_mac_resp {
+ uint8_t _reserved[2];
+ uint8_t mac[ETHER_ADDR_LEN];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || mac == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PRIM_MAC_ADDR);
+ if (!error) {
+ resp = (struct get_prim_mac_resp *) &cmd->params[0];
+ for (int i = 1; i <= ETHER_ADDR_LEN; i++)
+ mac[ETHER_ADDR_LEN - i] = resp->mac[i - 1];
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_link_cfg *cfg)
+{
+ struct __packed link_cfg_args {
+ uint64_t _reserved1;
+ uint32_t rate;
+ uint32_t _reserved2;
+ uint64_t options;
+ uint64_t adv_speeds;
+ uint64_t _reserved3[3];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ args = (struct link_cfg_args *) &cmd->params[0];
+ args->rate = cfg->rate;
+ args->options = cfg->options;
+ args->adv_speeds = cfg->adv_speeds;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_LINK_CFG));
+}
+
+static int
+dpaa2_rc_ni_get_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_link_cfg *cfg)
+{
+ struct __packed link_cfg_resp {
+ uint64_t _reserved1;
+ uint32_t rate;
+ uint32_t _reserved2;
+ uint64_t options;
+ uint64_t adv_speeds;
+ uint64_t _reserved3[3];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_CFG);
+ if (!error) {
+ resp = (struct link_cfg_resp *) &cmd->params[0];
+ cfg->rate = resp->rate;
+ cfg->options = resp->options;
+ cfg->adv_speeds = resp->adv_speeds;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_get_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_link_state *state)
+{
+ struct __packed link_state_resp {
+ uint32_t _reserved1;
+ uint32_t flags;
+ uint32_t rate;
+ uint32_t _reserved2;
+ uint64_t options;
+ uint64_t supported;
+ uint64_t advert;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || state == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_STATE);
+ if (!error) {
+ resp = (struct link_state_resp *) &cmd->params[0];
+ state->options = resp->options;
+ state->adv_speeds = resp->advert;
+ state->sup_speeds = resp->supported;
+ state->rate = resp->rate;
+
+ state->link_up = resp->flags & 0x1u ? true : false;
+ state->state_valid = resp->flags & 0x2u ? true : false;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_qos_table *tbl)
+{
+ struct __packed qos_table_args {
+ uint32_t _reserved1;
+ uint8_t default_tc;
+ uint8_t options;
+ uint16_t _reserved2;
+ uint64_t _reserved[5];
+ uint64_t kcfg_busaddr;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || tbl == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct qos_table_args *) &cmd->params[0];
+ args->default_tc = tbl->default_tc;
+ args->kcfg_busaddr = tbl->kcfg_busaddr;
+
+ args->options |= tbl->discard_on_miss ? 1U : 0U;
+ args->options |= tbl->keep_entries ? 2U : 0U;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QOS_TABLE));
+}
+
+static int
+dpaa2_rc_ni_clear_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_QOS_TABLE));
+}
+
+static int
+dpaa2_rc_ni_set_pools(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_pools_cfg *cfg)
+{
+ struct __packed set_pools_args {
+ uint8_t pools_num;
+ uint8_t backup_pool_mask;
+ uint8_t _reserved1;
+ uint8_t pool_as; /* assigning: 0 - QPRI, 1 - QDBIN */
+ uint32_t bp_obj_id[DPAA2_NI_MAX_POOLS];
+ uint16_t buf_sz[DPAA2_NI_MAX_POOLS];
+ uint32_t _reserved2;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_pools_args *) &cmd->params[0];
+ args->pools_num = cfg->pools_num < DPAA2_NI_MAX_POOLS
+ ? cfg->pools_num : DPAA2_NI_MAX_POOLS;
+ for (uint32_t i = 0; i < args->pools_num; i++) {
+ args->bp_obj_id[i] = cfg->pools[i].bp_obj_id;
+ args->buf_sz[i] = cfg->pools[i].buf_sz;
+ args->backup_pool_mask |= (cfg->pools[i].backup_flag & 1) << i;
+ }
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_POOLS));
+}
+
+static int
+dpaa2_rc_ni_set_err_behavior(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_err_cfg *cfg)
+{
+ struct __packed err_behavior_args {
+ uint32_t err_mask;
+ uint8_t flags;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct err_behavior_args *) &cmd->params[0];
+ args->err_mask = cfg->err_mask;
+
+ args->flags |= cfg->set_err_fas ? 0x10u : 0u;
+ args->flags |= ((uint8_t) cfg->action) & 0x0Fu;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_ERR_BEHAVIOR));
+}
+
+static int
+dpaa2_rc_ni_get_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_queue_cfg *cfg)
+{
+ struct __packed get_queue_args {
+ uint8_t queue_type;
+ uint8_t tc;
+ uint8_t idx;
+ uint8_t chan_id;
+ } *args;
+ struct __packed get_queue_resp {
+ uint64_t _reserved1;
+ uint32_t dest_id;
+ uint16_t _reserved2;
+ uint8_t priority;
+ uint8_t flags;
+ uint64_t flc;
+ uint64_t user_ctx;
+ uint32_t fqid;
+ uint16_t qdbin;
+ uint16_t _reserved3;
+ uint8_t cgid;
+ uint8_t _reserved[15];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct get_queue_args *) &cmd->params[0];
+ args->queue_type = (uint8_t) cfg->type;
+ args->tc = cfg->tc;
+ args->idx = cfg->idx;
+ args->chan_id = cfg->chan_id;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QUEUE);
+ if (!error) {
+ resp = (struct get_queue_resp *) &cmd->params[0];
+
+ cfg->dest_id = resp->dest_id;
+ cfg->priority = resp->priority;
+ cfg->flow_ctx = resp->flc;
+ cfg->user_ctx = resp->user_ctx;
+ cfg->fqid = resp->fqid;
+ cfg->qdbin = resp->qdbin;
+ cfg->cgid = resp->cgid;
+
+ cfg->dest_type = (enum dpaa2_ni_dest_type) resp->flags & 0x0Fu;
+ cfg->cgid_valid = (resp->flags & 0x20u) > 0u ? true : false;
+ cfg->stash_control = (resp->flags & 0x40u) > 0u ? true : false;
+ cfg->hold_active = (resp->flags & 0x80u) > 0u ? true : false;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_ni_queue_cfg *cfg)
+{
+ struct __packed set_queue_args {
+ uint8_t queue_type;
+ uint8_t tc;
+ uint8_t idx;
+ uint8_t options;
+ uint32_t _reserved1;
+ uint32_t dest_id;
+ uint16_t _reserved2;
+ uint8_t priority;
+ uint8_t flags;
+ uint64_t flc;
+ uint64_t user_ctx;
+ uint8_t cgid;
+ uint8_t chan_id;
+ uint8_t _reserved[23];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_queue_args *) &cmd->params[0];
+ args->queue_type = (uint8_t) cfg->type;
+ args->tc = cfg->tc;
+ args->idx = cfg->idx;
+ args->options = cfg->options;
+ args->dest_id = cfg->dest_id;
+ args->priority = cfg->priority;
+ args->flc = cfg->flow_ctx;
+ args->user_ctx = cfg->user_ctx;
+ args->cgid = cfg->cgid;
+ args->chan_id = cfg->chan_id;
+
+ args->flags |= (uint8_t)(cfg->dest_type & 0x0Fu);
+ args->flags |= cfg->stash_control ? 0x40u : 0u;
+ args->flags |= cfg->hold_active ? 0x80u : 0u;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QUEUE));
+}
+
+static int
+dpaa2_rc_ni_get_qdid(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ enum dpaa2_ni_queue_type type, uint16_t *qdid)
+{
+ struct __packed get_qdid_args {
+ uint8_t queue_type;
+ } *args;
+ struct __packed get_qdid_resp {
+ uint16_t qdid;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || qdid == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct get_qdid_args *) &cmd->params[0];
+ args->queue_type = (uint8_t) type;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QDID);
+ if (!error) {
+ resp = (struct get_qdid_resp *) &cmd->params[0];
+ *qdid = resp->qdid;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_add_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+{
+ struct __packed add_mac_args {
+ uint8_t flags;
+ uint8_t _reserved;
+ uint8_t mac[ETHER_ADDR_LEN];
+ uint8_t tc_id;
+ uint8_t fq_id;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || mac == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct add_mac_args *) &cmd->params[0];
+ for (int i = 1; i <= ETHER_ADDR_LEN; i++)
+ args->mac[i - 1] = mac[ETHER_ADDR_LEN - i];
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ADD_MAC_ADDR));
+}
+
+static int
+dpaa2_rc_ni_remove_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+{
+ struct __packed rem_mac_args {
+ uint16_t _reserved;
+ uint8_t mac[ETHER_ADDR_LEN];
+ uint64_t _reserved1[6];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || mac == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct rem_mac_args *) &cmd->params[0];
+ for (int i = 1; i <= ETHER_ADDR_LEN; i++)
+ args->mac[i - 1] = mac[ETHER_ADDR_LEN - i];
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_REMOVE_MAC_ADDR));
+}
+
+static int
+dpaa2_rc_ni_clear_mac_filters(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, bool rm_uni, bool rm_multi)
+{
+ struct __packed clear_mac_filters_args {
+ uint8_t flags;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct clear_mac_filters_args *) &cmd->params[0];
+ args->flags |= rm_uni ? 0x1 : 0x0;
+ args->flags |= rm_multi ? 0x2 : 0x0;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_MAC_FILTERS));
+}
+
+static int
+dpaa2_rc_ni_set_mfl(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t length)
+{
+ struct __packed set_mfl_args {
+ uint16_t length;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_mfl_args *) &cmd->params[0];
+ args->length = length;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MFL));
+}
+
+static int
+dpaa2_rc_ni_set_offload(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ enum dpaa2_ni_ofl_type ofl_type, bool en)
+{
+ struct __packed set_ofl_args {
+ uint8_t _reserved[3];
+ uint8_t ofl_type;
+ uint32_t config;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_ofl_args *) &cmd->params[0];
+ args->ofl_type = (uint8_t) ofl_type;
+ args->config = en ? 1u : 0u;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_OFFLOAD));
+}
+
+static int
+dpaa2_rc_ni_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t mask)
+{
+ struct __packed set_irq_mask_args {
+ uint32_t mask;
+ uint8_t irq_idx;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_irq_mask_args *) &cmd->params[0];
+ args->mask = mask;
+ args->irq_idx = irq_idx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_MASK));
+}
+
+static int
+dpaa2_rc_ni_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool en)
+{
+ struct __packed set_irq_enable_args {
+ uint32_t en;
+ uint8_t irq_idx;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_irq_enable_args *) &cmd->params[0];
+ args->en = en ? 1u : 0u;
+ args->irq_idx = irq_idx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_ENABLE));
+}
+
+static int
+dpaa2_rc_ni_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t *status)
+{
+ struct __packed get_irq_stat_args {
+ uint32_t status;
+ uint8_t irq_idx;
+ } *args;
+ struct __packed get_irq_stat_resp {
+ uint32_t status;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || status == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct get_irq_stat_args *) &cmd->params[0];
+ args->status = *status;
+ args->irq_idx = irq_idx;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_IRQ_STATUS);
+ if (!error) {
+ resp = (struct get_irq_stat_resp *) &cmd->params[0];
+ *status = resp->status;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_uni_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ bool en)
+{
+ struct __packed set_uni_promisc_args {
+ uint8_t en;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_uni_promisc_args *) &cmd->params[0];
+ args->en = en ? 1u : 0u;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_UNI_PROMISC));
+}
+
+static int
+dpaa2_rc_ni_set_multi_promisc(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, bool en)
+{
+ /* TODO: Implementation is the same as for ni_set_uni_promisc(). */
+ struct __packed set_multi_promisc_args {
+ uint8_t en;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_multi_promisc_args *) &cmd->params[0];
+ args->en = en ? 1u : 0u;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MULTI_PROMISC));
+}
+
+static int
+dpaa2_rc_ni_get_statistics(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t page, uint16_t param, uint64_t *cnt)
+{
+ struct __packed get_statistics_args {
+ uint8_t page;
+ uint16_t param;
+ } *args;
+ struct __packed get_statistics_resp {
+ uint64_t cnt[7];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || cnt == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct get_statistics_args *) &cmd->params[0];
+ args->page = page;
+ args->param = param;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_STATISTICS);
+ if (!error) {
+ resp = (struct get_statistics_resp *) &cmd->params[0];
+ for (int i = 0; i < DPAA2_NI_STAT_COUNTERS; i++)
+ cnt[i] = resp->cnt[i];
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_ni_set_rx_tc_dist(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint16_t dist_size, uint8_t tc, enum dpaa2_ni_dist_mode dist_mode,
+ bus_addr_t key_cfg_buf)
+{
+ struct __packed set_rx_tc_dist_args {
+ uint16_t dist_size;
+ uint8_t tc;
+ uint8_t ma_dm; /* miss action + dist. mode */
+ uint32_t _reserved1;
+ uint64_t _reserved2[5];
+ uint64_t key_cfg_iova;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_rx_tc_dist_args *) &cmd->params[0];
+ args->dist_size = dist_size;
+ args->tc = tc;
+ args->ma_dm = ((uint8_t) dist_mode) & 0x0Fu;
+ args->key_cfg_iova = key_cfg_buf;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_RX_TC_DIST));
+}
+
+static int
+dpaa2_rc_io_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpio_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = dpio_id;
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_io_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_CLOSE));
+}
+
+static int
+dpaa2_rc_io_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ENABLE));
+}
+
+static int
+dpaa2_rc_io_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_DISABLE));
+}
+
+static int
+dpaa2_rc_io_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_RESET));
+}
+
+static int
+dpaa2_rc_io_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_io_attr *attr)
+{
+ struct __packed dpaa2_io_attr {
+ uint32_t id;
+ uint16_t swp_id;
+ uint8_t priors_num;
+ uint8_t chan_mode;
+ uint64_t swp_ce_paddr;
+ uint64_t swp_ci_paddr;
+ uint32_t swp_version;
+ uint32_t _reserved1;
+ uint32_t swp_clk;
+ uint32_t _reserved2[5];
+ } *pattr;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || attr == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_ATTR);
+ if (!error) {
+ pattr = (struct dpaa2_io_attr *) &cmd->params[0];
+
+ attr->swp_ce_paddr = pattr->swp_ce_paddr;
+ attr->swp_ci_paddr = pattr->swp_ci_paddr;
+ attr->swp_version = pattr->swp_version;
+ attr->swp_clk = pattr->swp_clk;
+ attr->id = pattr->id;
+ attr->swp_id = pattr->swp_id;
+ attr->priors_num = pattr->priors_num;
+ attr->chan_mode = (enum dpaa2_io_chan_mode)
+ pattr->chan_mode;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_io_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t mask)
+{
+ /* TODO: Extract similar *_set_irq_mask() into one function. */
+ struct __packed set_irq_mask_args {
+ uint32_t mask;
+ uint8_t irq_idx;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_irq_mask_args *) &cmd->params[0];
+ args->mask = mask;
+ args->irq_idx = irq_idx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_MASK));
+}
+
+static int
+dpaa2_rc_io_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t *status)
+{
+ /* TODO: Extract similar *_get_irq_status() into one function. */
+ struct __packed get_irq_stat_args {
+ uint32_t status;
+ uint8_t irq_idx;
+ } *args;
+ struct __packed get_irq_stat_resp {
+ uint32_t status;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || status == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct get_irq_stat_args *) &cmd->params[0];
+ args->status = *status;
+ args->irq_idx = irq_idx;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_IRQ_STATUS);
+ if (!error) {
+ resp = (struct get_irq_stat_resp *) &cmd->params[0];
+ *status = resp->status;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_io_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool en)
+{
+ /* TODO: Extract similar *_set_irq_enable() into one function. */
+ struct __packed set_irq_enable_args {
+ uint32_t en;
+ uint8_t irq_idx;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_irq_enable_args *) &cmd->params[0];
+ args->en = en ? 1u : 0u;
+ args->irq_idx = irq_idx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_ENABLE));
+}
+
+static int
+dpaa2_rc_io_add_static_dq_chan(device_t dev, device_t child,
+ struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint8_t *chan_idx)
+{
+ struct __packed add_static_dq_chan_args {
+ uint32_t dpcon_id;
+ } *args;
+ struct __packed add_static_dq_chan_resp {
+ uint8_t chan_idx;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || chan_idx == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct add_static_dq_chan_args *) &cmd->params[0];
+ args->dpcon_id = dpcon_id;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ADD_STATIC_DQ_CHAN);
+ if (!error) {
+ resp = (struct add_static_dq_chan_resp *) &cmd->params[0];
+ *chan_idx = resp->chan_idx;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_bp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpbp_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = dpbp_id;
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_bp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_CLOSE));
+}
+
+static int
+dpaa2_rc_bp_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_ENABLE));
+}
+
+static int
+dpaa2_rc_bp_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_DISABLE));
+}
+
+static int
+dpaa2_rc_bp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_RESET));
+}
+
+static int
+dpaa2_rc_bp_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_bp_attr *attr)
+{
+ struct __packed dpaa2_bp_attr {
+ uint16_t _reserved1;
+ uint16_t bpid;
+ uint32_t id;
+ } *pattr;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || attr == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_GET_ATTR);
+ if (!error) {
+ pattr = (struct dpaa2_bp_attr *) &cmd->params[0];
+ attr->id = pattr->id;
+ attr->bpid = pattr->bpid;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mac_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpmac_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = dpmac_id;
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mac_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_CLOSE));
+}
+
+static int
+dpaa2_rc_mac_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_RESET));
+}
+
+static int
+dpaa2_rc_mac_mdio_read(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t phy, uint16_t reg, uint16_t *val)
+{
+ struct __packed mdio_read_args {
+ uint8_t clause; /* set to 0 by default */
+ uint8_t phy;
+ uint16_t reg;
+ uint32_t _reserved1;
+ uint64_t _reserved2[6];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || val == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct mdio_read_args *) &cmd->params[0];
+ args->phy = phy;
+ args->reg = reg;
+ args->clause = 0;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_READ);
+ if (!error)
+ *val = cmd->params[0] & 0xFFFF;
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mac_mdio_write(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t phy, uint16_t reg, uint16_t val)
+{
+ struct __packed mdio_write_args {
+ uint8_t clause; /* set to 0 by default */
+ uint8_t phy;
+ uint16_t reg;
+ uint16_t val;
+ uint16_t _reserved1;
+ uint64_t _reserved2[6];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct mdio_write_args *) &cmd->params[0];
+ args->phy = phy;
+ args->reg = reg;
+ args->val = val;
+ args->clause = 0;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_WRITE));
+}
+
+static int
+dpaa2_rc_mac_get_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t *mac)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || mac == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ADDR);
+ if (!error) {
+ mac[0] = (cmd->params[0] >> 56) & 0xFFU;
+ mac[1] = (cmd->params[0] >> 48) & 0xFFU;
+ mac[2] = (cmd->params[0] >> 40) & 0xFFU;
+ mac[3] = (cmd->params[0] >> 32) & 0xFFU;
+ mac[4] = (cmd->params[0] >> 24) & 0xFFU;
+ mac[5] = (cmd->params[0] >> 16) & 0xFFU;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mac_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_mac_attr *attr)
+{
+ struct __packed mac_attr_resp {
+ uint8_t eth_if;
+ uint8_t link_type;
+ uint16_t id;
+ uint32_t max_rate;
+
+ uint8_t fec_mode;
+ uint8_t ifg_mode;
+ uint8_t ifg_len;
+ uint8_t _reserved1;
+ uint32_t _reserved2;
+
+ uint8_t sgn_post_pre;
+ uint8_t serdes_cfg_mode;
+ uint8_t eq_amp_red;
+ uint8_t eq_post1q;
+ uint8_t eq_preq;
+ uint8_t eq_type;
+ uint16_t _reserved3;
+
+ uint64_t _reserved[4];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || attr == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ATTR);
+ if (!error) {
+ resp = (struct mac_attr_resp *) &cmd->params[0];
+ attr->id = resp->id;
+ attr->max_rate = resp->max_rate;
+ attr->eth_if = resp->eth_if;
+ attr->link_type = resp->link_type;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mac_set_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_mac_link_state *state)
+{
+ struct __packed mac_set_link_args {
+ uint64_t options;
+ uint32_t rate;
+ uint32_t _reserved1;
+ uint32_t flags;
+ uint32_t _reserved2;
+ uint64_t supported;
+ uint64_t advert;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || state == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct mac_set_link_args *) &cmd->params[0];
+ args->options = state->options;
+ args->rate = state->rate;
+ args->supported = state->supported;
+ args->advert = state->advert;
+
+ args->flags |= state->up ? 0x1u : 0u;
+ args->flags |= state->state_valid ? 0x2u : 0u;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_LINK_STATE));
+}
+
+static int
+dpaa2_rc_mac_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t mask)
+{
+ /* TODO: Implementation is the same as for ni_set_irq_mask(). */
+ struct __packed set_irq_mask_args {
+ uint32_t mask;
+ uint8_t irq_idx;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_irq_mask_args *) &cmd->params[0];
+ args->mask = mask;
+ args->irq_idx = irq_idx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_MASK));
+}
+
+static int
+dpaa2_rc_mac_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool en)
+{
+ /* TODO: Implementation is the same as for ni_set_irq_enable(). */
+ struct __packed set_irq_enable_args {
+ uint32_t en;
+ uint8_t irq_idx;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct set_irq_enable_args *) &cmd->params[0];
+ args->en = en ? 1u : 0u;
+ args->irq_idx = irq_idx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_ENABLE));
+}
+
+static int
+dpaa2_rc_mac_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, uint32_t *status)
+{
+ /* TODO: Implementation is the same as ni_get_irq_status(). */
+ struct __packed get_irq_stat_args {
+ uint32_t status;
+ uint8_t irq_idx;
+ } *args;
+ struct __packed get_irq_stat_resp {
+ uint32_t status;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || status == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ dpaa2_rc_reset_cmd_params(cmd);
+
+ args = (struct get_irq_stat_args *) &cmd->params[0];
+ args->status = *status;
+ args->irq_idx = irq_idx;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_IRQ_STATUS);
+ if (!error) {
+ resp = (struct get_irq_stat_resp *) &cmd->params[0];
+ *status = resp->status;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_con_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpcon_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = dpcon_id;
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+
+static int
+dpaa2_rc_con_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_CLOSE));
+}
+
+static int
+dpaa2_rc_con_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_RESET));
+}
+
+static int
+dpaa2_rc_con_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_ENABLE));
+}
+
+static int
+dpaa2_rc_con_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_DISABLE));
+}
+
+static int
+dpaa2_rc_con_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_con_attr *attr)
+{
+ struct __packed con_attr_resp {
+ uint32_t id;
+ uint16_t chan_id;
+ uint8_t prior_num;
+ uint8_t _reserved1;
+ uint64_t _reserved2[6];
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || attr == NULL)
+ return (DPAA2_CMD_STAT_EINVAL);
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_GET_ATTR);
+ if (!error) {
+ resp = (struct con_attr_resp *) &cmd->params[0];
+ attr->id = resp->id;
+ attr->chan_id = resp->chan_id;
+ attr->prior_num = resp->prior_num;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_con_set_notif(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ struct dpaa2_con_notif_cfg *cfg)
+{
+ struct __packed set_notif_args {
+ uint32_t dpio_id;
+ uint8_t prior;
+ uint8_t _reserved1;
+ uint16_t _reserved2;
+ uint64_t ctx;
+ uint64_t _reserved3[5];
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL || cfg == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct set_notif_args *) &cmd->params[0];
+ args->dpio_id = cfg->dpio_id;
+ args->prior = cfg->prior;
+ args->ctx = cfg->qman_ctx;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_SET_NOTIF));
+}
+
+static int
+dpaa2_rc_mcp_create(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t portal_id, uint32_t options, uint32_t *dpmcp_id)
+{
+ struct __packed mcp_create_args {
+ uint32_t portal_id;
+ uint32_t options;
+ uint64_t _reserved[6];
+ } *args;
+ struct __packed mcp_create_resp {
+ uint32_t dpmcp_id;
+ } *resp;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ int error;
+
+ if (portal == NULL || cmd == NULL || dpmcp_id == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct mcp_create_args *) &cmd->params[0];
+ args->portal_id = portal_id;
+ args->options = options;
+
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CREATE);
+ if (!error) {
+ resp = (struct mcp_create_resp *) &cmd->params[0];
+ *dpmcp_id = resp->dpmcp_id;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mcp_destroy(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpmcp_id)
+{
+ struct __packed mcp_destroy_args {
+ uint32_t dpmcp_id;
+ } *args;
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct mcp_destroy_args *) &cmd->params[0];
+ args->dpmcp_id = dpmcp_id;
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_DESTROY));
+}
+
+static int
+dpaa2_rc_mcp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd,
+ uint32_t dpmcp_id, uint16_t *token)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+ struct dpaa2_cmd_header *hdr;
+ int error;
+
+ if (portal == NULL || cmd == NULL || token == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ cmd->params[0] = dpmcp_id;
+ error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_OPEN);
+ if (!error) {
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ *token = hdr->token;
+ }
+
+ return (error);
+}
+
+static int
+dpaa2_rc_mcp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CLOSE));
+}
+
+static int
+dpaa2_rc_mcp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child);
+
+ if (portal == NULL || cmd == NULL)
+ return (DPAA2_CMD_STAT_ERR);
+
+ return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_RESET));
+}
+
+/**
+ * @brief Create and add devices for DPAA2 objects in this resource container.
+ */
+static int
+dpaa2_rc_discover(struct dpaa2_rc_softc *sc)
+{
+ device_t rcdev = sc->dev;
+ device_t child = sc->dev;
+ struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev);
+ struct dpaa2_cmd *cmd = NULL;
+ struct dpaa2_rc_attr dprc_attr;
+ struct dpaa2_obj obj;
+ uint32_t major, minor, rev, obj_count;
+ uint16_t rc_token;
+ int rc;
+
+ /* Allocate a command to send to MC hardware. */
+ rc = dpaa2_mcp_init_command(&cmd, DPAA2_CMD_DEF);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to allocate dpaa2_cmd: "
+ "error=%d\n", __func__, rc);
+ return (ENXIO);
+ }
+
+ /* Print MC firmware version. */
+ rc = DPAA2_CMD_MNG_GET_VERSION(rcdev, child, cmd, &major, &minor, &rev);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to get MC firmware version: "
+ "error=%d\n", __func__, rc);
+ dpaa2_mcp_free_command(cmd);
+ return (ENXIO);
+ }
+ device_printf(rcdev, "MC firmware version: %u.%u.%u\n", major, minor,
+ rev);
+
+ /* Obtain container ID associated with a given MC portal. */
+ rc = DPAA2_CMD_MNG_GET_CONTAINER_ID(rcdev, child, cmd, &sc->cont_id);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to get container id: "
+ "error=%d\n", __func__, rc);
+ dpaa2_mcp_free_command(cmd);
+ return (ENXIO);
+ }
+ if (bootverbose)
+ device_printf(rcdev, "Resource container ID: %u\n", sc->cont_id);
+
+ /* Open the resource container. */
+ rc = DPAA2_CMD_RC_OPEN(rcdev, child, cmd, sc->cont_id, &rc_token);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to open container: cont_id=%u, "
+ "error=%d\n", __func__, sc->cont_id, rc);
+ dpaa2_mcp_free_command(cmd);
+ return (ENXIO);
+ }
+
+ /* Obtain a number of objects in this container. */
+ rc = DPAA2_CMD_RC_GET_OBJ_COUNT(rcdev, child, cmd, &obj_count);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to count objects in container: "
+ "cont_id=%u, error=%d\n", __func__, sc->cont_id, rc);
+ DPAA2_CMD_RC_CLOSE(rcdev, child, cmd);
+ dpaa2_mcp_free_command(cmd);
+ return (ENXIO);
+ }
+ if (bootverbose)
+ device_printf(rcdev, "Objects in container: %u\n", obj_count);
+
+ /* Obtain container attributes (including ICID). */
+ rc = DPAA2_CMD_RC_GET_ATTRIBUTES(rcdev, child, cmd, &dprc_attr);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to get attributes of the "
+ "container: cont_id=%u, error=%d\n", __func__, sc->cont_id,
+ rc);
+ DPAA2_CMD_RC_CLOSE(rcdev, child, cmd);
+ dpaa2_mcp_free_command(cmd);
+ return (ENXIO);
+ }
+ if (bootverbose)
+ device_printf(rcdev, "Isolation context ID: %u\n",
+ dprc_attr.icid);
+ if (rcinfo) {
+ rcinfo->id = dprc_attr.cont_id;
+ rcinfo->portal_id = dprc_attr.portal_id;
+ rcinfo->icid = dprc_attr.icid;
+ }
+
+ /*
+ * Add MC portals before everything else.
+ * TODO: Discover DPAA2 objects on-demand.
+ */
+ for (uint32_t i = 0; i < obj_count; i++) {
+ rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, cmd, i, &obj);
+ if (rc)
+ continue; /* Skip silently for now. */
+ if (obj.type != DPAA2_DEV_MCP)
+ continue;
+
+ dpaa2_rc_add_managed_child(sc, cmd, &obj);
+ }
+ /* Probe and attach MC portals. */
+ bus_generic_probe(rcdev);
+ rc = bus_generic_attach(rcdev);
+ if (rc) {
+ DPAA2_CMD_RC_CLOSE(rcdev, child, cmd);
+ dpaa2_mcp_free_command(cmd);
+ return (rc);
+ }
+
+ /* Add managed devices (except DPMCPs) to the resource container. */
+ for (uint32_t i = 0; i < obj_count; i++) {
+ rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, cmd, i, &obj);
+ if (rc && bootverbose) {
+ if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ) {
+ device_printf(rcdev, "%s: skip unsupported "
+ "DPAA2 object: idx=%u\n", __func__, i);
+ continue;
+ } else {
+ device_printf(rcdev, "%s: failed to get "
+ "information about DPAA2 object: idx=%u, "
+ "error=%d\n", __func__, i, rc);
+ continue;
+ }
+ }
+ if (obj.type == DPAA2_DEV_MCP)
+ continue; /* Already added. */
+
+ dpaa2_rc_add_managed_child(sc, cmd, &obj);
+ }
+ /* Probe and attach managed devices properly. */
+ bus_generic_probe(rcdev);
+ rc = bus_generic_attach(rcdev);
+ if (rc) {
+ DPAA2_CMD_RC_CLOSE(rcdev, child, cmd);
+ dpaa2_mcp_free_command(cmd);
+ return (rc);
+ }
+
+ /* Add other devices to the resource container. */
+ for (uint32_t i = 0; i < obj_count; i++) {
+ rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, cmd, i, &obj);
+ if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ && bootverbose) {
+ device_printf(rcdev, "%s: skip unsupported DPAA2 "
+ "object: idx=%u\n", __func__, i);
+ continue;
+ } else if (rc) {
+ device_printf(rcdev, "%s: failed to get object: "
+ "idx=%u, error=%d\n", __func__, i, rc);
+ continue;
+ }
+ dpaa2_rc_add_child(sc, cmd, &obj);
+ }
+
+ DPAA2_CMD_RC_CLOSE(rcdev, child, cmd);
+ dpaa2_mcp_free_command(cmd);
+
+ /* Probe and attach the rest of devices. */
+ bus_generic_probe(rcdev);
+ return (bus_generic_attach(rcdev));
+}
+
+/**
+ * @brief Add a new DPAA2 device to the resource container bus.
+ */
+static int
+dpaa2_rc_add_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd,
+ struct dpaa2_obj *obj)
+{
+ device_t rcdev, dev;
+ struct dpaa2_devinfo *rcinfo;
+ struct dpaa2_devinfo *dinfo;
+ struct resource_spec *res_spec;
+ const char *devclass;
+ int dpio_n = 0; /* to limit DPIOs by # of CPUs */
+ int dpcon_n = 0; /* to limit DPCONs by # of CPUs */
+ int rid, error;
+
+ rcdev = sc->dev;
+ rcinfo = device_get_ivars(rcdev);
+
+ switch (obj->type) {
+ case DPAA2_DEV_NI:
+ devclass = "dpaa2_ni";
+ res_spec = dpaa2_ni_spec;
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ /* Add a device for the DPAA2 object. */
+ dev = device_add_child(rcdev, devclass, -1);
+ if (dev == NULL) {
+ device_printf(rcdev, "%s: failed to add a device for DPAA2 "
+ "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type),
+ obj->id);
+ return (ENXIO);
+ }
+
+ /* Allocate devinfo for a child. */
+ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC,
+ M_WAITOK | M_ZERO);
+ if (!dinfo) {
+ device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo "
+ "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type),
+ obj->id);
+ return (ENXIO);
+ }
+ device_set_ivars(dev, dinfo);
+
+ dinfo->pdev = rcdev;
+ dinfo->dev = dev;
+ dinfo->id = obj->id;
+ dinfo->dtype = obj->type;
+ dinfo->portal = NULL;
+ /* Children share their parent container's ICID and portal ID. */
+ dinfo->icid = rcinfo->icid;
+ dinfo->portal_id = rcinfo->portal_id;
+ /* MSI configuration */
+ dinfo->msi.msi_msgnum = obj->irq_count;
+ dinfo->msi.msi_alloc = 0;
+ dinfo->msi.msi_handlers = 0;
+
+ /* Initialize a resource list for the child. */
+ resource_list_init(&dinfo->resources);
+
+ /* Add DPAA2-specific resources to the resource list. */
+ for (; res_spec && res_spec->type != -1; res_spec++) {
+ if (res_spec->type < DPAA2_DEV_MC)
+ continue; /* Skip non-DPAA2 resource. */
+ rid = res_spec->rid;
+
+ /* Limit DPIOs and DPCONs by number of CPUs. */
+ if (res_spec->type == DPAA2_DEV_IO && dpio_n >= mp_ncpus) {
+ dpio_n++;
+ continue;
+ }
+ if (res_spec->type == DPAA2_DEV_CON && dpcon_n >= mp_ncpus) {
+ dpcon_n++;
+ continue;
+ }
+
+ error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid,
+ res_spec->flags);
+ if (error)
+ device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: "
+ "error=%d\n", __func__, error);
+
+ if (res_spec->type == DPAA2_DEV_IO)
+ dpio_n++;
+ if (res_spec->type == DPAA2_DEV_CON)
+ dpcon_n++;
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Add a new managed DPAA2 device to the resource container bus.
+ *
+ * There are DPAA2 objects (DPIO, DPBP) which have their own drivers and can be
+ * allocated as resources or associated with the other DPAA2 objects. This
+ * function is supposed to discover such managed objects in the resource
+ * container and add them as children to perform a proper initialization.
+ *
+ * NOTE: It must be called together with bus_generic_probe() and
+ * bus_generic_attach() before dpaa2_rc_add_child().
+ */
+static int
+dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd,
+ struct dpaa2_obj *obj)
+{
+ device_t rcdev, dev, child;
+ struct dpaa2_devinfo *rcinfo, *dinfo;
+ struct dpaa2_rc_obj_region reg;
+ struct resource_spec *res_spec;
+ const char *devclass;
+ uint64_t start, end, count;
+ uint32_t flags = 0;
+ int rid, error;
+
+ rcdev = sc->dev;
+ child = sc->dev;
+ rcinfo = device_get_ivars(rcdev);
+
+ switch (obj->type) {
+ case DPAA2_DEV_IO:
+ devclass = "dpaa2_io";
+ res_spec = dpaa2_io_spec;
+ flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE;
+ break;
+ case DPAA2_DEV_BP:
+ devclass = "dpaa2_bp";
+ res_spec = dpaa2_bp_spec;
+ flags = DPAA2_MC_DEV_ALLOCATABLE;
+ break;
+ case DPAA2_DEV_CON:
+ devclass = "dpaa2_con";
+ res_spec = dpaa2_con_spec;
+ flags = DPAA2_MC_DEV_ALLOCATABLE;
+ break;
+ case DPAA2_DEV_MAC:
+ devclass = "dpaa2_mac";
+ res_spec = dpaa2_mac_spec;
+ flags = DPAA2_MC_DEV_ASSOCIATED;
+ break;
+ case DPAA2_DEV_MCP:
+ devclass = "dpaa2_mcp";
+ res_spec = NULL;
+ flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE;
+ break;
+ default:
+ /* Only managed devices above are supported. */
+ return (EINVAL);
+ }
+
+ /* Add a device for the DPAA2 object. */
+ dev = device_add_child(rcdev, devclass, -1);
+ if (dev == NULL) {
+ device_printf(rcdev, "%s: failed to add a device for DPAA2 "
+ "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type),
+ obj->id);
+ return (ENXIO);
+ }
+
+ /* Allocate devinfo for the child. */
+ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC,
+ M_WAITOK | M_ZERO);
+ if (!dinfo) {
+ device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo "
+ "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type),
+ obj->id);
+ return (ENXIO);
+ }
+ device_set_ivars(dev, dinfo);
+
+ dinfo->pdev = rcdev;
+ dinfo->dev = dev;
+ dinfo->id = obj->id;
+ dinfo->dtype = obj->type;
+ dinfo->portal = NULL;
+ /* Children share their parent container's ICID and portal ID. */
+ dinfo->icid = rcinfo->icid;
+ dinfo->portal_id = rcinfo->portal_id;
+ /* MSI configuration */
+ dinfo->msi.msi_msgnum = obj->irq_count;
+ dinfo->msi.msi_alloc = 0;
+ dinfo->msi.msi_handlers = 0;
+
+ /* Initialize a resource list for the child. */
+ resource_list_init(&dinfo->resources);
+
+ /* Add memory regions to the resource list. */
+ for (uint8_t i = 0; i < obj->reg_count; i++) {
+ error = DPAA2_CMD_RC_GET_OBJ_REGION(rcdev, child, cmd, obj->id,
+ i, obj->type, &reg);
+ if (error) {
+ device_printf(rcdev, "%s: failed to obtain memory "
+ "region for type=%s, id=%u, reg_idx=%u: error=%d\n",
+ __func__, dpaa2_ttos(obj->type), obj->id, i, error);
+ continue;
+ }
+ count = reg.size;
+ start = reg.base_paddr + reg.base_offset;
+ end = reg.base_paddr + reg.base_offset + reg.size - 1;
+
+ resource_list_add(&dinfo->resources, SYS_RES_MEMORY, i, start,
+ end, count);
+ }
+
+ /* Add DPAA2-specific resources to the resource list. */
+ for (; res_spec && res_spec->type != -1; res_spec++) {
+ if (res_spec->type < DPAA2_DEV_MC)
+ continue; /* Skip non-DPAA2 resource. */
+ rid = res_spec->rid;
+
+ error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid,
+ res_spec->flags);
+ if (error)
+ device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: "
+ "error=%d\n", __func__, error);
+ }
+
+ /* Inform MC about a new managed device. */
+ error = DPAA2_MC_MANAGE_DEV(rcdev, dev, flags);
+ if (error) {
+ device_printf(rcdev, "%s: failed to add a managed DPAA2 device: "
+ "type=%s, id=%u, error=%d\n", __func__,
+ dpaa2_ttos(obj->type), obj->id, error);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Configure given IRQ using MC command interface.
+ */
+static int
+dpaa2_rc_configure_irq(device_t rcdev, device_t child, int rid, uint64_t addr,
+ uint32_t data)
+{
+ struct dpaa2_devinfo *rcinfo;
+ struct dpaa2_devinfo *dinfo;
+ struct dpaa2_cmd *cmd;
+ uint16_t rc_token;
+ int rc = EINVAL;
+
+ if (device_get_parent(child) == rcdev && rid >= 1) {
+ rcinfo = device_get_ivars(rcdev);
+ dinfo = device_get_ivars(child);
+
+ /* Allocate a command to send to MC hardware. */
+ rc = dpaa2_mcp_init_command(&cmd, DPAA2_CMD_DEF);
+ if (rc) {
+ device_printf(rcdev, "%s: failed to allocate dpaa2_cmd: "
+ "error=%d\n", __func__, rc);
+ return (ENODEV);
+ }
+
+ /* Open resource container. */
+ rc = DPAA2_CMD_RC_OPEN(rcdev, child, cmd, rcinfo->id, &rc_token);
+ if (rc) {
+ dpaa2_mcp_free_command(cmd);
+ device_printf(rcdev, "%s: failed to open DPRC: "
+ "error=%d\n", __func__, rc);
+ return (ENODEV);
+ }
+ /* Set MSI address and value. */
+ rc = DPAA2_CMD_RC_SET_OBJ_IRQ(rcdev, child, cmd, rid - 1, addr,
+ data, rid, dinfo->id, dinfo->dtype);
+ if (rc) {
+ dpaa2_mcp_free_command(cmd);
+ device_printf(rcdev, "%s: failed to setup IRQ: "
+ "rid=%d, addr=%jx, data=%x, error=%d\n", __func__,
+ rid, addr, data, rc);
+ return (ENODEV);
+ }
+ /* Close resource container. */
+ rc = DPAA2_CMD_RC_CLOSE(rcdev, child, cmd);
+ if (rc) {
+ dpaa2_mcp_free_command(cmd);
+ device_printf(rcdev, "%s: failed to close DPRC: "
+ "error=%d\n", __func__, rc);
+ return (ENODEV);
+ }
+
+ dpaa2_mcp_free_command(cmd);
+ rc = 0;
+ }
+
+ return (rc);
+}
+
+/**
+ * @brief General implementation of the MC command to enable IRQ.
+ */
+static int
+dpaa2_rc_enable_irq(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd,
+ uint8_t irq_idx, bool enable, uint16_t cmdid)
+{
+ struct __packed enable_irq_args {
+ uint8_t enable;
+ uint8_t _reserved1;
+ uint16_t _reserved2;
+ uint8_t irq_idx;
+ uint8_t _reserved3;
+ uint16_t _reserved4;
+ uint64_t _reserved5[6];
+ } *args;
+
+ if (!mcp || !cmd)
+ return (DPAA2_CMD_STAT_ERR);
+
+ args = (struct enable_irq_args *) &cmd->params[0];
+ args->irq_idx = irq_idx;
+ args->enable = enable == 0u ? 0u : 1u;
+
+ return (dpaa2_rc_exec_cmd(mcp, cmd, cmdid));
+}
+
+/**
+ * @brief Sends a command to MC and waits for response.
+ */
+static int
+dpaa2_rc_exec_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint16_t cmdid)
+{
+ struct dpaa2_cmd_header *hdr;
+ uint16_t flags;
+ int error;
+
+ if (!mcp || !cmd)
+ return (DPAA2_CMD_STAT_ERR);
+
+ /* Prepare a command for the MC hardware. */
+ hdr = (struct dpaa2_cmd_header *) &cmd->header;
+ hdr->cmdid = cmdid;
+ hdr->status = DPAA2_CMD_STAT_READY;
+
+ DPAA2_MCP_LOCK(mcp, &flags);
+ if (flags & DPAA2_PORTAL_DESTROYED) {
+ /* Terminate operation if portal is destroyed. */
+ DPAA2_MCP_UNLOCK(mcp);
+ return (DPAA2_CMD_STAT_INVALID_STATE);
+ }
+
+ /* Send a command to MC and wait for the result. */
+ dpaa2_rc_send_cmd(mcp, cmd);
+ error = dpaa2_rc_wait_for_cmd(mcp, cmd);
+ if (error) {
+ DPAA2_MCP_UNLOCK(mcp);
+ return (DPAA2_CMD_STAT_ERR);
+ }
+ if (hdr->status != DPAA2_CMD_STAT_OK) {
+ DPAA2_MCP_UNLOCK(mcp);
+ return (int)(hdr->status);
+ }
+
+ DPAA2_MCP_UNLOCK(mcp);
+
+ return (DPAA2_CMD_STAT_OK);
+}
+
+/**
+ * @brief Writes a command to the MC command portal.
+ */
+static int
+dpaa2_rc_send_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd)
+{
+ /* Write command parameters. */
+ for (uint32_t i = 1; i <= DPAA2_CMD_PARAMS_N; i++)
+ bus_write_8(mcp->map, sizeof(uint64_t) * i, cmd->params[i-1]);
+
+ bus_barrier(mcp->map, 0, sizeof(struct dpaa2_cmd),
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
+
+ /* Write command header to trigger execution. */
+ bus_write_8(mcp->map, 0, cmd->header);
+
+ return (0);
+}
+
+/**
+ * @brief Polls the MC command portal in order to receive a result of the
+ * command execution.
+ */
+static int
+dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd)
+{
+ struct dpaa2_cmd_header *hdr;
+ uint64_t val;
+ uint32_t i;
+
+ /* Wait for a command execution result from the MC hardware. */
+ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
+ val = bus_read_8(mcp->map, 0);
+ hdr = (struct dpaa2_cmd_header *) &val;
+ if (hdr->status != DPAA2_CMD_STAT_READY) {
+ break;
+ }
+ DELAY(CMD_SPIN_TIMEOUT);
+ }
+
+ if (i > CMD_SPIN_ATTEMPTS) {
+ /* Return an error on expired timeout. */
+ return (DPAA2_CMD_STAT_TIMEOUT);
+ } else {
+ /* Read command response. */
+ cmd->header = val;
+ for (i = 1; i <= DPAA2_CMD_PARAMS_N; i++) {
+ cmd->params[i-1] =
+ bus_read_8(mcp->map, i * sizeof(uint64_t));
+ }
+ }
+
+ return (DPAA2_CMD_STAT_OK);
+}
+
+/**
+ * @brief Reserve a DPAA2-specific device of the given devtype for the child.
+ */
+static int
+dpaa2_rc_add_res(device_t rcdev, device_t child, enum dpaa2_dev_type devtype,
+ int *rid, int flags)
+{
+ device_t dpaa2_dev;
+ struct dpaa2_devinfo *dinfo = device_get_ivars(child);
+ struct resource *res;
+ bool shared = false;
+ int error;
+
+ /* Request a free DPAA2 device of the given type from MC. */
+ error = DPAA2_MC_GET_FREE_DEV(rcdev, &dpaa2_dev, devtype);
+ if (error && !(flags & RF_SHAREABLE)) {
+ device_printf(rcdev, "%s: failed to obtain a free %s (rid=%d) "
+ "for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid,
+ dpaa2_ttos(dinfo->dtype), dinfo->id);
+ return (error);
+ }
+
+ /* Request a shared DPAA2 device of the given type from MC. */
+ if (error) {
+ error = DPAA2_MC_GET_SHARED_DEV(rcdev, &dpaa2_dev, devtype);
+ if (error) {
+ device_printf(rcdev, "%s: failed to obtain a shared "
+ "%s (rid=%d) for: %s (id=%u)\n", __func__,
+ dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype),
+ dinfo->id);
+ return (error);
+ }
+ shared = true;
+ }
+
+ /* Add DPAA2 device to the resource list of the child device. */
+ resource_list_add(&dinfo->resources, devtype, *rid,
+ (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1);
+
+ /* Reserve a newly added DPAA2 resource. */
+ res = resource_list_reserve(&dinfo->resources, rcdev, child, devtype,
+ rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1,
+ flags & ~RF_ACTIVE);
+ if (!res) {
+ device_printf(rcdev, "%s: failed to reserve %s (rid=%d) for: %s "
+ "(id=%u)\n", __func__, dpaa2_ttos(devtype), *rid,
+ dpaa2_ttos(dinfo->dtype), dinfo->id);
+ return (EBUSY);
+ }
+
+ /* Reserve a shared DPAA2 device of the given type. */
+ if (shared) {
+ error = DPAA2_MC_RESERVE_DEV(rcdev, dpaa2_dev, devtype);
+ if (error) {
+ device_printf(rcdev, "%s: failed to reserve a shared "
+ "%s (rid=%d) for: %s (id=%u)\n", __func__,
+ dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype),
+ dinfo->id);
+ return (error);
+ }
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_rc_print_type(struct resource_list *rl, enum dpaa2_dev_type type)
+{
+ struct dpaa2_devinfo *dinfo;
+ struct resource_list_entry *rle;
+ uint32_t prev_id;
+ int printed = 0, series = 0;
+ int retval = 0;
+
+ STAILQ_FOREACH(rle, rl, link) {
+ if (rle->type == type) {
+ dinfo = device_get_ivars((device_t) rle->start);
+
+ if (printed == 0) {
+ retval += printf(" %s (id=",
+ dpaa2_ttos(dinfo->dtype));
+ } else {
+ if (dinfo->id == prev_id + 1) {
+ if (series == 0) {
+ series = 1;
+ retval += printf("-");
+ }
+ } else {
+ if (series == 1) {
+ retval += printf("%u", prev_id);
+ series = 0;
+ }
+ retval += printf(",");
+ }
+ }
+ printed++;
+
+ if (series == 0)
+ retval += printf("%u", dinfo->id);
+ prev_id = dinfo->id;
+ }
+ }
+ if (printed) {
+ if (series == 1)
+ retval += printf("%u", prev_id);
+ retval += printf(")");
+ }
+
+ return (retval);
+}
+
+static int
+dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *cmd)
+{
+ if (cmd != NULL) {
+ memset(cmd->params, 0, sizeof(cmd->params[0]) *
+ DPAA2_CMD_PARAMS_N);
+ }
+ return (0);
+}
+
+static struct dpaa2_mcp *
+dpaa2_rc_select_portal(device_t dev, device_t child)
+{
+ struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
+ struct dpaa2_devinfo *cinfo = device_get_ivars(child);
+
+ if (cinfo == NULL || dinfo == NULL || dinfo->dtype != DPAA2_DEV_RC)
+ return (NULL);
+ return (cinfo->portal != NULL ? cinfo->portal : dinfo->portal);
+}
+
+static device_method_t dpaa2_rc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, dpaa2_rc_probe),
+ DEVMETHOD(device_attach, dpaa2_rc_attach),
+ DEVMETHOD(device_detach, dpaa2_rc_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_get_resource_list, dpaa2_rc_get_resource_list),
+ DEVMETHOD(bus_delete_resource, dpaa2_rc_delete_resource),
+ DEVMETHOD(bus_alloc_resource, dpaa2_rc_alloc_resource),
+ DEVMETHOD(bus_release_resource, dpaa2_rc_release_resource),
+ DEVMETHOD(bus_child_deleted, dpaa2_rc_child_deleted),
+ DEVMETHOD(bus_child_detached, dpaa2_rc_child_detached),
+ DEVMETHOD(bus_setup_intr, dpaa2_rc_setup_intr),
+ DEVMETHOD(bus_teardown_intr, dpaa2_rc_teardown_intr),
+ DEVMETHOD(bus_print_child, dpaa2_rc_print_child),
+ DEVMETHOD(bus_add_child, device_add_child_ordered),
+ DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
+ DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
+
+ /* Pseudo-PCI interface */
+ DEVMETHOD(pci_alloc_msi, dpaa2_rc_alloc_msi),
+ DEVMETHOD(pci_release_msi, dpaa2_rc_release_msi),
+ DEVMETHOD(pci_msi_count, dpaa2_rc_msi_count),
+ DEVMETHOD(pci_get_id, dpaa2_rc_get_id),
+
+ /* DPAA2 MC command interface */
+ DEVMETHOD(dpaa2_cmd_mng_get_version, dpaa2_rc_mng_get_version),
+ DEVMETHOD(dpaa2_cmd_mng_get_soc_version, dpaa2_rc_mng_get_soc_version),
+ DEVMETHOD(dpaa2_cmd_mng_get_container_id, dpaa2_rc_mng_get_container_id),
+ /* DPRC commands */
+ DEVMETHOD(dpaa2_cmd_rc_open, dpaa2_rc_open),
+ DEVMETHOD(dpaa2_cmd_rc_close, dpaa2_rc_close),
+ DEVMETHOD(dpaa2_cmd_rc_get_obj_count, dpaa2_rc_get_obj_count),
+ DEVMETHOD(dpaa2_cmd_rc_get_obj, dpaa2_rc_get_obj),
+ DEVMETHOD(dpaa2_cmd_rc_get_obj_descriptor, dpaa2_rc_get_obj_descriptor),
+ DEVMETHOD(dpaa2_cmd_rc_get_attributes, dpaa2_rc_get_attributes),
+ DEVMETHOD(dpaa2_cmd_rc_get_obj_region, dpaa2_rc_get_obj_region),
+ DEVMETHOD(dpaa2_cmd_rc_get_api_version, dpaa2_rc_get_api_version),
+ DEVMETHOD(dpaa2_cmd_rc_set_irq_enable, dpaa2_rc_set_irq_enable),
+ DEVMETHOD(dpaa2_cmd_rc_set_obj_irq, dpaa2_rc_set_obj_irq),
+ DEVMETHOD(dpaa2_cmd_rc_get_conn, dpaa2_rc_get_conn),
+ /* DPNI commands */
+ DEVMETHOD(dpaa2_cmd_ni_open, dpaa2_rc_ni_open),
+ DEVMETHOD(dpaa2_cmd_ni_close, dpaa2_rc_ni_close),
+ DEVMETHOD(dpaa2_cmd_ni_enable, dpaa2_rc_ni_enable),
+ DEVMETHOD(dpaa2_cmd_ni_disable, dpaa2_rc_ni_disable),
+ DEVMETHOD(dpaa2_cmd_ni_get_api_version, dpaa2_rc_ni_get_api_version),
+ DEVMETHOD(dpaa2_cmd_ni_reset, dpaa2_rc_ni_reset),
+ DEVMETHOD(dpaa2_cmd_ni_get_attributes, dpaa2_rc_ni_get_attributes),
+ DEVMETHOD(dpaa2_cmd_ni_set_buf_layout, dpaa2_rc_ni_set_buf_layout),
+ DEVMETHOD(dpaa2_cmd_ni_get_tx_data_off, dpaa2_rc_ni_get_tx_data_offset),
+ DEVMETHOD(dpaa2_cmd_ni_get_port_mac_addr, dpaa2_rc_ni_get_port_mac_addr),
+ DEVMETHOD(dpaa2_cmd_ni_set_prim_mac_addr, dpaa2_rc_ni_set_prim_mac_addr),
+ DEVMETHOD(dpaa2_cmd_ni_get_prim_mac_addr, dpaa2_rc_ni_get_prim_mac_addr),
+ DEVMETHOD(dpaa2_cmd_ni_set_link_cfg, dpaa2_rc_ni_set_link_cfg),
+ DEVMETHOD(dpaa2_cmd_ni_get_link_cfg, dpaa2_rc_ni_get_link_cfg),
+ DEVMETHOD(dpaa2_cmd_ni_get_link_state, dpaa2_rc_ni_get_link_state),
+ DEVMETHOD(dpaa2_cmd_ni_set_qos_table, dpaa2_rc_ni_set_qos_table),
+ DEVMETHOD(dpaa2_cmd_ni_clear_qos_table, dpaa2_rc_ni_clear_qos_table),
+ DEVMETHOD(dpaa2_cmd_ni_set_pools, dpaa2_rc_ni_set_pools),
+ DEVMETHOD(dpaa2_cmd_ni_set_err_behavior,dpaa2_rc_ni_set_err_behavior),
+ DEVMETHOD(dpaa2_cmd_ni_get_queue, dpaa2_rc_ni_get_queue),
+ DEVMETHOD(dpaa2_cmd_ni_set_queue, dpaa2_rc_ni_set_queue),
+ DEVMETHOD(dpaa2_cmd_ni_get_qdid, dpaa2_rc_ni_get_qdid),
+ DEVMETHOD(dpaa2_cmd_ni_add_mac_addr, dpaa2_rc_ni_add_mac_addr),
+ DEVMETHOD(dpaa2_cmd_ni_remove_mac_addr, dpaa2_rc_ni_remove_mac_addr),
+ DEVMETHOD(dpaa2_cmd_ni_clear_mac_filters, dpaa2_rc_ni_clear_mac_filters),
+ DEVMETHOD(dpaa2_cmd_ni_set_mfl, dpaa2_rc_ni_set_mfl),
+ DEVMETHOD(dpaa2_cmd_ni_set_offload, dpaa2_rc_ni_set_offload),
+ DEVMETHOD(dpaa2_cmd_ni_set_irq_mask, dpaa2_rc_ni_set_irq_mask),
+ DEVMETHOD(dpaa2_cmd_ni_set_irq_enable, dpaa2_rc_ni_set_irq_enable),
+ DEVMETHOD(dpaa2_cmd_ni_get_irq_status, dpaa2_rc_ni_get_irq_status),
+ DEVMETHOD(dpaa2_cmd_ni_set_uni_promisc, dpaa2_rc_ni_set_uni_promisc),
+ DEVMETHOD(dpaa2_cmd_ni_set_multi_promisc, dpaa2_rc_ni_set_multi_promisc),
+ DEVMETHOD(dpaa2_cmd_ni_get_statistics, dpaa2_rc_ni_get_statistics),
+ DEVMETHOD(dpaa2_cmd_ni_set_rx_tc_dist, dpaa2_rc_ni_set_rx_tc_dist),
+ /* DPIO commands */
+ DEVMETHOD(dpaa2_cmd_io_open, dpaa2_rc_io_open),
+ DEVMETHOD(dpaa2_cmd_io_close, dpaa2_rc_io_close),
+ DEVMETHOD(dpaa2_cmd_io_enable, dpaa2_rc_io_enable),
+ DEVMETHOD(dpaa2_cmd_io_disable, dpaa2_rc_io_disable),
+ DEVMETHOD(dpaa2_cmd_io_reset, dpaa2_rc_io_reset),
+ DEVMETHOD(dpaa2_cmd_io_get_attributes, dpaa2_rc_io_get_attributes),
+ DEVMETHOD(dpaa2_cmd_io_set_irq_mask, dpaa2_rc_io_set_irq_mask),
+ DEVMETHOD(dpaa2_cmd_io_get_irq_status, dpaa2_rc_io_get_irq_status),
+ DEVMETHOD(dpaa2_cmd_io_set_irq_enable, dpaa2_rc_io_set_irq_enable),
+ DEVMETHOD(dpaa2_cmd_io_add_static_dq_chan, dpaa2_rc_io_add_static_dq_chan),
+ /* DPBP commands */
+ DEVMETHOD(dpaa2_cmd_bp_open, dpaa2_rc_bp_open),
+ DEVMETHOD(dpaa2_cmd_bp_close, dpaa2_rc_bp_close),
+ DEVMETHOD(dpaa2_cmd_bp_enable, dpaa2_rc_bp_enable),
+ DEVMETHOD(dpaa2_cmd_bp_disable, dpaa2_rc_bp_disable),
+ DEVMETHOD(dpaa2_cmd_bp_reset, dpaa2_rc_bp_reset),
+ DEVMETHOD(dpaa2_cmd_bp_get_attributes, dpaa2_rc_bp_get_attributes),
+ /* DPMAC commands */
+ DEVMETHOD(dpaa2_cmd_mac_open, dpaa2_rc_mac_open),
+ DEVMETHOD(dpaa2_cmd_mac_close, dpaa2_rc_mac_close),
+ DEVMETHOD(dpaa2_cmd_mac_reset, dpaa2_rc_mac_reset),
+ DEVMETHOD(dpaa2_cmd_mac_mdio_read, dpaa2_rc_mac_mdio_read),
+ DEVMETHOD(dpaa2_cmd_mac_mdio_write, dpaa2_rc_mac_mdio_write),
+ DEVMETHOD(dpaa2_cmd_mac_get_addr, dpaa2_rc_mac_get_addr),
+ DEVMETHOD(dpaa2_cmd_mac_get_attributes, dpaa2_rc_mac_get_attributes),
+ DEVMETHOD(dpaa2_cmd_mac_set_link_state, dpaa2_rc_mac_set_link_state),
+ DEVMETHOD(dpaa2_cmd_mac_set_irq_mask, dpaa2_rc_mac_set_irq_mask),
+ DEVMETHOD(dpaa2_cmd_mac_set_irq_enable, dpaa2_rc_mac_set_irq_enable),
+ DEVMETHOD(dpaa2_cmd_mac_get_irq_status, dpaa2_rc_mac_get_irq_status),
+ /* DPCON commands */
+ DEVMETHOD(dpaa2_cmd_con_open, dpaa2_rc_con_open),
+ DEVMETHOD(dpaa2_cmd_con_close, dpaa2_rc_con_close),
+ DEVMETHOD(dpaa2_cmd_con_reset, dpaa2_rc_con_reset),
+ DEVMETHOD(dpaa2_cmd_con_enable, dpaa2_rc_con_enable),
+ DEVMETHOD(dpaa2_cmd_con_disable, dpaa2_rc_con_disable),
+ DEVMETHOD(dpaa2_cmd_con_get_attributes, dpaa2_rc_con_get_attributes),
+ DEVMETHOD(dpaa2_cmd_con_set_notif, dpaa2_rc_con_set_notif),
+ /* DPMCP commands */
+ DEVMETHOD(dpaa2_cmd_mcp_create, dpaa2_rc_mcp_create),
+ DEVMETHOD(dpaa2_cmd_mcp_destroy, dpaa2_rc_mcp_destroy),
+ DEVMETHOD(dpaa2_cmd_mcp_open, dpaa2_rc_mcp_open),
+ DEVMETHOD(dpaa2_cmd_mcp_close, dpaa2_rc_mcp_close),
+ DEVMETHOD(dpaa2_cmd_mcp_reset, dpaa2_rc_mcp_reset),
+
+ DEVMETHOD_END
+};
+
+static driver_t dpaa2_rc_driver = {
+ "dpaa2_rc",
+ dpaa2_rc_methods,
+ sizeof(struct dpaa2_rc_softc),
+};
+
+/* For root container */
+DRIVER_MODULE(dpaa2_rc, dpaa2_mc, dpaa2_rc_driver, 0, 0);
+/* For child containers */
+DRIVER_MODULE(dpaa2_rc, dpaa2_rc, dpaa2_rc_driver, 0, 0);
diff --git a/sys/dev/dpaa2/dpaa2_swp.c b/sys/dev/dpaa2/dpaa2_swp.c
new file mode 100644
index 000000000000..2ceed08159ef
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_swp.c
@@ -0,0 +1,1169 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause
+ *
+ * Copyright © 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright © 2016-2019 NXP
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Original source file obtained from:
+ * drivers/soc/fsl/dpio/qbman-portal.c
+ *
+ * Commit: 4c86114194e644b6da9107d75910635c9e87179e
+ * Repository: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
+ */
+
+/*
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * DPAA2 QBMan software portal.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/condvar.h>
+#include <sys/lock.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include "pcib_if.h"
+#include "pci_if.h"
+
+#include "dpaa2_swp.h"
+#include "dpaa2_mc.h"
+#include "dpaa2_bp.h"
+
+#define CMD_SPIN_TIMEOUT 100u /* us */
+#define CMD_SPIN_ATTEMPTS 2000u /* 200 ms max. */
+
+#define CMD_VERB_MASK 0x7Fu
+
+/* Shifts in the VERB byte of the enqueue command descriptor. */
+#define ENQ_CMD_ORP_ENABLE_SHIFT 2
+#define ENQ_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define ENQ_CMD_TARGET_TYPE_SHIFT 4
+#define ENQ_CMD_DCA_EN_SHIFT 7
+/* VERB byte options of the enqueue command descriptor. */
+#define ENQ_CMD_EMPTY 0u
+#define ENQ_CMD_RESPONSE_ALWAYS 1u
+#define ENQ_CMD_REJECTS_TO_FQ 2u
+
+#define ENQ_DESC_FD_OFFSET 32u
+
+#define ENQ_DCA_IDXMASK 0x0Fu
+#define ENQ_FLAG_DCA (1ull << 31)
+
+/* QBMan portal command codes. */
+#define CMDID_SWP_MC_ACQUIRE 0x30
+#define CMDID_SWP_BP_QUERY 0x32
+#define CMDID_SWP_WQCHAN_CONFIGURE 0x46
+
+/* QBMan portal command result codes. */
+#define QBMAN_CMD_RC_OK 0xF0
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT 29u
+#define QB_SDQCR_FC_MASK 0x1u
+#define QB_SDQCR_DCT_SHIFT 24u
+#define QB_SDQCR_DCT_MASK 0x3u
+#define QB_SDQCR_TOK_SHIFT 16u
+#define QB_SDQCR_TOK_MASK 0xFFu
+#define QB_SDQCR_SRC_SHIFT 0u
+#define QB_SDQCR_SRC_MASK 0xFFFFu
+
+/* Shifts in the VERB byte of the volatile dequeue command. */
+#define QB_VDQCR_VERB_DCT0_SHIFT 0
+#define QB_VDQCR_VERB_DCT1_SHIFT 1
+#define QB_VDQCR_VERB_DT0_SHIFT 2
+#define QB_VDQCR_VERB_DT1_SHIFT 3
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+#define QB_VDQCR_VERB_RAD_SHIFT 6
+
+/* Maximum timeout period for the DQRR interrupt. */
+#define DQRR_MAX_ITP 4096u
+#define DQRR_PI_MASK 0x0Fu
+
+/* Release Array Allocation register helpers. */
+#define RAR_IDX(rar) ((rar) & 0x7u)
+#define RAR_VB(rar) ((rar) & 0x80u)
+#define RAR_SUCCESS(rar) ((rar) & 0x100u)
+
+MALLOC_DEFINE(M_DPAA2_SWP, "dpaa2_swp", "DPAA2 QBMan Software Portal");
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Routines to execute software portal commands. */
+static int dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *,
+ struct dpaa2_swp_cmd *, struct dpaa2_swp_rsp *, uint8_t);
+static int dpaa2_swp_exec_br_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *,
+ uint32_t);
+static int dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *,
+ struct dpaa2_swp_cmd *);
+
+/* Management Commands helpers. */
+static int dpaa2_swp_send_mgmt_command(struct dpaa2_swp *,
+ struct dpaa2_swp_cmd *, uint8_t);
+static int dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *,
+ struct dpaa2_swp_rsp *);
+
+/* Helper subroutines. */
+static int dpaa2_swp_cyc_diff(uint8_t, uint8_t, uint8_t);
+
+int
+dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
+ uint16_t flags)
+{
+ struct dpaa2_swp *p;
+ uint32_t reg, mask_size, eqcr_pi; /* EQCR producer index */
+
+ if (!swp || !desc)
+ return (DPAA2_SWP_STAT_EINVAL);
+
+ p = malloc(sizeof(struct dpaa2_swp), M_DPAA2_SWP,
+ flags & DPAA2_SWP_NOWAIT_ALLOC
+ ? (M_NOWAIT | M_ZERO)
+ : (M_WAITOK | M_ZERO));
+ if (!p)
+ return (DPAA2_SWP_STAT_NO_MEMORY);
+
+ mtx_init(&p->lock, "swp_sleep_lock", NULL, MTX_DEF);
+
+ p->cfg.mem_backed = false;
+ p->cfg.writes_cinh = true;
+
+ p->desc = desc;
+ p->flags = flags;
+ p->mc.valid_bit = DPAA2_SWP_VALID_BIT;
+ p->mr.valid_bit = DPAA2_SWP_VALID_BIT;
+
+ /* FIXME: Memory-backed mode doesn't work now. Why? */
+ p->cena_res = desc->cena_res;
+ p->cena_map = desc->cena_map;
+ p->cinh_res = desc->cinh_res;
+ p->cinh_map = desc->cinh_map;
+
+ /* Static Dequeue Command Register configuration. */
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= DPAA2_SWP_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+ /* Volatile Dequeue Command configuration. */
+ p->vdq.valid_bit = DPAA2_SWP_VALID_BIT;
+
+ /* Dequeue Response Ring configuration */
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = DPAA2_SWP_VALID_BIT;
+ if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_4100) {
+ p->dqrr.ring_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.ring_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_5000) {
+ reg = dpaa2_swp_set_cfg(
+ p->dqrr.ring_size, /* max. entries QMan writes to DQRR */
+ 1, /* writes enabled in the CINH memory only */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 2, /* EPM: EQCR in ring mode (FIFO) */
+ 1, /* mem stashing drop enable enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable enable */
+ 0 /* EQCR_CI stashing priority enable */
+ );
+ reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
+ } else {
+ bus_set_region_4(p->cena_map, 0, 0,
+ rman_get_size(p->cena_res) / 4);
+
+ reg = dpaa2_swp_set_cfg(
+ p->dqrr.ring_size, /* max. entries QMan writes to DQRR */ /* DQRR_MF */
+ 1, /* writes enabled in the CINH memory only */ /* WN */
+ 0, /* EQCR_CI stashing is disabled */ /* EST */
+ 3, /* RPM: RCR in array mode */ /* RPM */
+ 2, /* DCM: Discrete consumption ack */ /* DCM */
+ 2, /* EPM: EQCR in ring mode (FIFO) */ /* EPM */
+ 1, /* Dequeued frame data, annotation, and FQ context stashing drop enable */ /* SD */
+ 1, /* Dequeued frame data, annotation, and FQ context stashing priority */ /* SP */
+ 1, /* Dequeued frame data, annotation, and FQ context stashing enable */ /* SE */
+ 1, /* Dequeue response ring (DQRR) entry stashing priority */ /* DP */
+ 0, /* Dequeue response ring (DQRR) entry, or cacheable portal area, stashing enable. */ /* DE */
+ 0 /* EQCR_CI stashing priority */ /* EP */
+ );
+ /* TODO: Switch to memory-backed mode. */
+ reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
+ }
+ dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_CFG, reg);
+ reg = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_CFG);
+ if (!reg) {
+ free(p, M_DPAA2_SWP);
+ return (DPAA2_SWP_STAT_PORTAL_DISABLED);
+ }
+
+ /*
+ * Static Dequeue Command Register needs to be initialized to 0 when no
+ * channels are being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be applied when
+ * dequeues from a specific channel are enabled.
+ */
+ dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ /* if ((desc->swp_version & DPAA2_SWP_REV_MASK) >= DPAA2_SWP_REV_5000) */
+ /* p->eqcr.pi_ring_size = 32; */
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
+
+ eqcr_pi = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
+ p->eqcr.pi_vb = eqcr_pi & DPAA2_SWP_VALID_BIT;
+ p->eqcr.ci = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_CI)
+ & p->eqcr.pi_ci_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size;
+
+ /* Initialize the portal with an IRQ threshold and timeout of 0us. */
+ dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 0);
+
+ *swp = p;
+
+ return (0);
+}
+
+void
+dpaa2_swp_free_portal(struct dpaa2_swp *swp)
+{
+ uint16_t flags;
+
+ KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
+
+ DPAA2_SWP_LOCK(swp, &flags);
+ swp->flags |= DPAA2_SWP_DESTROYED;
+ DPAA2_SWP_UNLOCK(swp);
+
+ /* Let threads stop using this portal. */
+ DELAY(DPAA2_SWP_TIMEOUT);
+
+ mtx_destroy(&swp->lock);
+ free(swp, M_DPAA2_SWP);
+}
+
+uint32_t
+dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est, uint8_t rpm,
+ uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp, int de, int ep)
+{
+ return (
+ max_fill << DPAA2_SWP_CFG_DQRR_MF_SHIFT |
+ est << DPAA2_SWP_CFG_EST_SHIFT |
+ wn << DPAA2_SWP_CFG_WN_SHIFT |
+ rpm << DPAA2_SWP_CFG_RPM_SHIFT |
+ dcm << DPAA2_SWP_CFG_DCM_SHIFT |
+ epm << DPAA2_SWP_CFG_EPM_SHIFT |
+ sd << DPAA2_SWP_CFG_SD_SHIFT |
+ sp << DPAA2_SWP_CFG_SP_SHIFT |
+ se << DPAA2_SWP_CFG_SE_SHIFT |
+ dp << DPAA2_SWP_CFG_DP_SHIFT |
+ de << DPAA2_SWP_CFG_DE_SHIFT |
+ ep << DPAA2_SWP_CFG_EP_SHIFT
+ );
+}
+
+/* Read/write registers of a software portal. */
+
+void
+dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v)
+{
+ bus_write_4(swp->cinh_map, o, v);
+}
+
+uint32_t
+dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o)
+{
+ return (bus_read_4(swp->cinh_map, o));
+}
+
+/* Helper routines. */
+
+/**
+ * @brief Set enqueue descriptor without Order Point Record ID.
+ *
+ * ed: Enqueue descriptor.
+ * resp_always: Enqueue with response always (1); FD from a rejected enqueue
+ * will be returned on a FQ (0).
+ */
+void
+dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always)
+{
+ ed->verb &= ~(1 << ENQ_CMD_ORP_ENABLE_SHIFT);
+ if (resp_always)
+ ed->verb |= ENQ_CMD_RESPONSE_ALWAYS;
+ else
+ ed->verb |= ENQ_CMD_REJECTS_TO_FQ;
+}
+
+/**
+ * @brief Set FQ of the enqueue descriptor.
+ */
+void
+dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid)
+{
+ ed->verb &= ~(1 << ENQ_CMD_TARGET_TYPE_SHIFT);
+ ed->tgtid = fqid;
+}
+
+/**
+ * @brief Enable interrupts for a software portal.
+ */
+void
+dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask)
+{
+ if (swp != NULL)
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_IER, mask);
+}
+
+/**
+ * @brief Return the value in the SWP_IER register.
+ */
+uint32_t
+dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp)
+{
+ if (swp != NULL)
+ return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_IER);
+ return (0);
+}
+
+/**
+ * @brief Return the value in the SWP_ISR register.
+ */
+uint32_t
+dpaa2_swp_read_intr_status(struct dpaa2_swp *swp)
+{
+ if (swp != NULL)
+ return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_ISR);
+ return (0);
+}
+
+/**
+ * @brief Clear SWP_ISR register according to the given mask.
+ */
+void
+dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask)
+{
+ if (swp != NULL)
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ISR, mask);
+}
+
+/**
+ * @brief Enable or disable push dequeue.
+ *
+ * swp: the software portal object
+ * chan_idx: the channel index (0 to 15)
+ * en: enable or disable push dequeue
+ */
+void
+dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx, bool en)
+{
+ uint16_t dqsrc;
+
+ if (swp != NULL) {
+ if (chan_idx > 15u) {
+ device_printf(swp->desc->dpio_dev, "channel index "
+ "should be <= 15: chan_idx=%d\n", chan_idx);
+ return;
+ }
+
+ if (en)
+ swp->sdq |= 1 << chan_idx;
+ else
+ swp->sdq &= ~(1 << chan_idx);
+ /*
+ * Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors.
+ */
+ dqsrc = (swp->sdq >> DPAA2_SDQCR_SRC_SHIFT) &
+ DPAA2_SDQCR_SRC_MASK;
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_SDQCR, dqsrc != 0
+ ? swp->sdq : 0);
+ }
+}
+
+/**
+ * @brief Set new IRQ coalescing values.
+ *
+ * swp: The software portal object.
+ * threshold: Threshold for DQRR interrupt generation. The DQRR interrupt
+ * asserts when the ring contains greater than "threshold" entries.
+ * holdoff: DQRR interrupt holdoff (timeout) period in us.
+ */
+int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold,
+ uint32_t holdoff)
+{
+ uint32_t itp; /* Interrupt Timeout Period */
+
+ if (swp == NULL)
+ return (EINVAL);
+
+ /*
+ * Convert "holdoff" value from us to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (holdoff * 1000u) / swp->desc->swp_cycles_ratio;
+ if (itp > DQRR_MAX_ITP)
+ itp = DQRR_MAX_ITP;
+ if (threshold >= swp->dqrr.ring_size)
+ threshold = swp->dqrr.ring_size - 1;
+
+ swp->dqrr.irq_threshold = threshold;
+ swp->dqrr.irq_itp = itp;
+
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_DQRR_ITR, threshold);
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ITPR, itp);
+
+ return (0);
+}
+
+/*
+ * Software portal commands.
+ */
+
+/**
+ * @brief Configure the channel data availability notification (CDAN)
+ * in a particular WQ channel.
+ */
+int
+dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id,
+ uint8_t we_mask, bool cdan_en, uint64_t ctx)
+{
+ /* NOTE: 64 bytes command. */
+ struct __packed {
+ uint8_t verb;
+ uint8_t result; /* in response only! */
+ uint16_t chan_id;
+ uint8_t we;
+ uint8_t ctrl;
+ uint16_t _reserved2;
+ uint64_t ctx;
+ uint8_t _reserved3[48];
+ } cmd = {0};
+ struct __packed {
+ uint8_t verb;
+ uint8_t result;
+ uint16_t chan_id;
+ uint8_t _reserved[60];
+ } rsp;
+ int error;
+
+ if (swp == NULL)
+ return (EINVAL);
+
+ cmd.chan_id = chan_id;
+ cmd.we = we_mask;
+ cmd.ctrl = cdan_en ? 1u : 0u;
+ cmd.ctx = ctx;
+
+ error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
+ (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_WQCHAN_CONFIGURE);
+ if (error)
+ return (error);
+
+ if (rsp.result != QBMAN_CMD_RC_OK) {
+ device_printf(swp->desc->dpio_dev, "WQ channel configuration "
+ "error: channel_id=%d, result=0x%02x\n", chan_id,
+ rsp.result);
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Query current configuration/state of the buffer pool.
+ */
+int
+dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid,
+ struct dpaa2_bp_conf *conf)
+{
+ /* NOTE: 64 bytes command. */
+ struct __packed {
+ uint8_t verb;
+ uint8_t _reserved1;
+ uint16_t bpid;
+ uint8_t _reserved2[60];
+ } cmd = {0};
+ struct __packed {
+ uint8_t verb;
+ uint8_t result;
+ uint32_t _reserved1;
+ uint8_t bdi;
+ uint8_t state;
+ uint32_t fill;
+ /* TODO: Support the other fields as well. */
+ uint8_t _reserved2[52];
+ } rsp;
+ int error;
+
+ if (swp == NULL || conf == NULL)
+ return (EINVAL);
+
+ cmd.bpid = bpid;
+
+ error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
+ (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_BP_QUERY);
+ if (error)
+ return (error);
+
+ if (rsp.result != QBMAN_CMD_RC_OK) {
+ device_printf(swp->desc->dpio_dev, "BP query error: bpid=%d, "
+ "result=0x%02x\n", bpid, rsp.result);
+ return (EIO);
+ }
+
+ conf->bdi = rsp.bdi;
+ conf->state = rsp.state;
+ conf->free_bufn = rsp.fill;
+
+ return (0);
+}
+
+int
+dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf,
+ uint32_t buf_num)
+{
+ /* NOTE: 64 bytes command. */
+ struct __packed {
+ uint8_t verb;
+ uint8_t _reserved1;
+ uint16_t bpid;
+ uint32_t _reserved2;
+ uint64_t buf[DPAA2_SWP_BUFS_PER_CMD];
+ } cmd = {0};
+ int error;
+
+ if (swp == NULL || buf == NULL || buf_num == 0u ||
+ buf_num > DPAA2_SWP_BUFS_PER_CMD)
+ return (EINVAL);
+
+ for (uint32_t i = 0; i < buf_num; i++)
+ cmd.buf[i] = buf[i];
+ cmd.bpid = bpid;
+ cmd.verb |= 1 << 5; /* Switch release buffer command to valid. */
+
+ error = dpaa2_swp_exec_br_command(swp, (struct dpaa2_swp_cmd *) &cmd,
+ buf_num);
+ if (error) {
+ device_printf(swp->desc->dpio_dev, "buffers release command "
+ "failed\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+int
+dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq,
+ uint32_t *idx)
+{
+ struct resource_map *map = swp->cinh_map;
+ struct dpaa2_swp_rsp *rsp = (struct dpaa2_swp_rsp *) dq;
+ uint32_t verb, pi; /* producer index */
+ uint32_t offset = swp->cfg.mem_backed
+ ? DPAA2_SWP_CENA_DQRR_MEM(swp->dqrr.next_idx)
+ : DPAA2_SWP_CENA_DQRR(swp->dqrr.next_idx);
+
+ if (swp == NULL || dq == NULL)
+ return (EINVAL);
+
+ /*
+ * Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (swp->dqrr.reset_bug) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ pi = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_DQPI) & DQRR_PI_MASK;
+
+ /* There are new entries if pi != next_idx */
+ if (pi == swp->dqrr.next_idx)
+ return (ENOENT);
+
+ /*
+ * If next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired.
+ *
+ * NOTE: This logic needs to be based on next_idx (which
+ * increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (swp->dqrr.next_idx == (swp->dqrr.ring_size - 1))
+ swp->dqrr.reset_bug = 0;
+ }
+
+ verb = bus_read_4(map, offset);
+ if ((verb & DPAA2_SWP_VALID_BIT) != swp->dqrr.valid_bit)
+ return (ENOENT);
+
+ /* Read dequeue response message. */
+ for (int i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
+ rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
+
+ /* Return index of the current entry (if requested). */
+ if (idx != NULL)
+ *idx = swp->dqrr.next_idx;
+
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry before returning what we found.
+ */
+ swp->dqrr.next_idx++;
+ swp->dqrr.next_idx &= swp->dqrr.ring_size - 1; /* wrap around */
+ if (swp->dqrr.next_idx == 0u)
+ swp->dqrr.valid_bit ^= DPAA2_SWP_VALID_BIT;
+
+ return (0);
+}
+
+int
+dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
+ uint32_t frames_n)
+{
+ /* NOTE: 64 bytes command. */
+ struct __packed {
+ uint8_t verb;
+ uint8_t numf;
+ uint8_t tok;
+ uint8_t _reserved;
+ uint32_t dq_src;
+ uint64_t rsp_addr;
+ uint64_t _reserved1[6];
+ } cmd = {0};
+ struct dpaa2_dq *msg;
+ uint16_t flags;
+ int i, error;
+
+ KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
+ KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__));
+ KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__));
+ KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage "
+ "buffer", __func__));
+
+ cmd.numf = frames_n - 1;
+ cmd.tok = DPAA2_SWP_VDQCR_TOKEN;
+ cmd.dq_src = chan_id;
+ cmd.rsp_addr = (uint64_t) buf->store.paddr;
+
+ /* Dequeue command type */
+ cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT);
+ cmd.verb |= (1 << QB_VDQCR_VERB_DCT1_SHIFT);
+ /* Dequeue from a specific software portal channel (ID's in DQ_SRC). */
+ cmd.verb &= ~(1 << QB_VDQCR_VERB_DT0_SHIFT);
+ cmd.verb &= ~(1 << QB_VDQCR_VERB_DT1_SHIFT);
+ /* Write the response to this command into memory (at the RSP_ADDR). */
+ cmd.verb |= (1 << QB_VDQCR_VERB_RLS_SHIFT);
+ /* Response writes won't attempt to allocate into a cache. */
+ cmd.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+ /* Allow the FQ to remain active in the portal after dequeue. */
+ cmd.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
+
+ DPAA2_SWP_LOCK(swp, &flags);
+ if (flags & DPAA2_SWP_DESTROYED) {
+ /* Terminate operation if portal is destroyed. */
+ DPAA2_SWP_UNLOCK(swp);
+ return (ENOENT);
+ }
+
+ error = dpaa2_swp_exec_vdc_command_locked(swp,
+ (struct dpaa2_swp_cmd *) &cmd);
+ if (error != 0) {
+ DPAA2_SWP_UNLOCK(swp);
+ return (error);
+ }
+
+ /* Let's sync before reading VDQ response from QBMan. */
+ bus_dmamap_sync(buf->store.dmat, buf->store.dmap, BUS_DMASYNC_POSTREAD);
+
+ /* Read VDQ response from QBMan. */
+ msg = (struct dpaa2_dq *) buf->store.vaddr;
+ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
+ if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) &&
+ (msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) {
+ /* Reset token. */
+ msg->fdr.desc.tok = 0;
+ break;
+ }
+ DELAY(CMD_SPIN_TIMEOUT);
+ }
+ DPAA2_SWP_UNLOCK(swp);
+
+ /* Return an error on expired timeout. */
+ return (i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0);
+}
+
+/**
+ * @brief Issue a command to enqueue a frame using one enqueue descriptor.
+ *
+ * swp: Software portal used to send this command to.
+ * ed: Enqueue command descriptor.
+ * fd: Frame descriptor to enqueue.
+ */
+int
+dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
+ struct dpaa2_fd *fd)
+{
+ uint32_t flags = 0;
+ int rc = dpaa2_swp_enq_mult(swp, ed, fd, &flags, 1);
+
+ return (rc >= 0 ? 0 : EBUSY);
+}
+
+/**
+ * @brief Issue a command to enqueue frames using one enqueue descriptor.
+ *
+ * swp: Software portal used to send this command to.
+ * ed: Enqueue command descriptor.
+ * fd: Frame descriptor to enqueue.
+ * flags: Table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL.
+ * frames_n: Number of FDs to enqueue.
+ *
+ * NOTE: Enqueue command (64 bytes): 32 (eq. descriptor) + 32 (frame descriptor).
+ */
+int
+dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
+ struct dpaa2_fd *fd, uint32_t *flags, int frames_n)
+{
+ const uint8_t *ed_pdat8 = (const uint8_t *) ed;
+ const uint32_t *ed_pdat32 = (const uint32_t *) ed;
+ const uint64_t *ed_pdat64 = (const uint64_t *) ed;
+ const uint64_t *fd_pdat64 = (const uint64_t *) fd;
+ struct resource_map *map;
+ uint32_t eqcr_ci, eqcr_pi; /* EQCR consumer/producer index */
+ uint32_t half_mask, full_mask, val, ci_offset;
+ uint16_t swp_flags;
+ int num_enq = 0;
+
+ if (swp == NULL || ed == NULL || fd == NULL || flags == NULL ||
+ frames_n == 0)
+ return (EINVAL);
+
+ DPAA2_SWP_LOCK(swp, &swp_flags);
+ if (swp_flags & DPAA2_SWP_DESTROYED) {
+ /* Terminate operation if portal is destroyed. */
+ DPAA2_SWP_UNLOCK(swp);
+ return (ENOENT);
+ }
+
+ map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
+ ci_offset = swp->cfg.mem_backed
+ ? DPAA2_SWP_CENA_EQCR_CI_MEMBACK
+ : DPAA2_SWP_CENA_EQCR_CI;
+
+ half_mask = swp->eqcr.pi_ci_mask >> 1;
+ full_mask = swp->eqcr.pi_ci_mask;
+
+ if (swp->eqcr.available == 0) {
+ val = dpaa2_swp_read_reg(swp, ci_offset);
+ eqcr_ci = swp->eqcr.ci;
+ swp->eqcr.ci = val & full_mask;
+
+ swp->eqcr.available = dpaa2_swp_cyc_diff(swp->eqcr.pi_ring_size,
+ eqcr_ci, swp->eqcr.ci);
+
+ if (swp->eqcr.available == 0) {
+ DPAA2_SWP_UNLOCK(swp);
+ return (0);
+ }
+ }
+
+ eqcr_pi = swp->eqcr.pi;
+ num_enq = swp->eqcr.available < frames_n
+ ? swp->eqcr.available : frames_n;
+ swp->eqcr.available -= num_enq;
+
+ KASSERT(num_enq >= 0 && num_enq <= swp->eqcr.pi_ring_size,
+ ("%s: unexpected num_enq=%d", __func__, num_enq));
+ KASSERT(swp->eqcr.available >= 0 &&
+ swp->eqcr.available <= swp->eqcr.pi_ring_size,
+ ("%s: unexpected eqcr.available=%d", __func__, swp->eqcr.available));
+
+ /* Fill in the EQCR ring. */
+ for (int i = 0; i < num_enq; i++) {
+ /* Write enq. desc. without the VERB, DCA, SEQNUM and OPRID. */
+ for (int j = 1; j <= 3; j++)
+ bus_write_8(map,
+ DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
+ sizeof(uint64_t) * j, ed_pdat64[j]);
+ /* Write OPRID. */
+ bus_write_4(map,
+ DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint32_t),
+ ed_pdat32[1]);
+ /* Write DCA and SEQNUM without VERB byte. */
+ for (int j = 1; j <= 3; j++)
+ bus_write_1(map,
+ DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
+ sizeof(uint8_t) * j, ed_pdat8[j]);
+
+ /* Write frame descriptor. */
+ for (int j = 0; j <= 3; j++)
+ bus_write_8(map,
+ DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
+ ENQ_DESC_FD_OFFSET +
+ sizeof(uint64_t) * j, fd_pdat64[j]);
+ eqcr_pi++;
+ }
+
+ wmb();
+
+ /* Write the VERB byte of enqueue descriptor. */
+ eqcr_pi = swp->eqcr.pi;
+ for (int i = 0; i < num_enq; i++) {
+ bus_write_1(map,
+ DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask),
+ ed_pdat8[0] | swp->eqcr.pi_vb);
+
+ if (flags && (flags[i] & ENQ_FLAG_DCA)) {
+ /* Update DCA byte. */
+ bus_write_1(map,
+ DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + 1,
+ (1 << ENQ_CMD_DCA_EN_SHIFT) |
+ (flags[i] & ENQ_DCA_IDXMASK));
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ swp->eqcr.pi_vb ^= DPAA2_SWP_VALID_BIT;
+ }
+ swp->eqcr.pi = eqcr_pi & full_mask;
+
+ DPAA2_SWP_UNLOCK(swp);
+
+ return (num_enq);
+}
+
+static int
+dpaa2_swp_cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
+{
+ /* 'first' is included, 'last' is excluded */
+ return ((first <= last)
+ ? (last - first) : ((2 * ringsize) - (first - last)));
+}
+
+/**
+ * @brief Execute Buffer Release Command (BRC).
+ */
+static int
+dpaa2_swp_exec_br_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
+ uint32_t buf_num)
+{
+ struct __packed with_verb {
+ uint8_t verb;
+ uint8_t _reserved[63];
+ } *c;
+ const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
+ const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
+ struct resource_map *map;
+ uint32_t offset, rar; /* Release Array Allocation register */
+ uint16_t flags;
+
+ if (!swp || !cmd)
+ return (EINVAL);
+
+ DPAA2_SWP_LOCK(swp, &flags);
+ if (flags & DPAA2_SWP_DESTROYED) {
+ /* Terminate operation if portal is destroyed. */
+ DPAA2_SWP_UNLOCK(swp);
+ return (ENOENT);
+ }
+
+ rar = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_RAR);
+ if (!RAR_SUCCESS(rar)) {
+ DPAA2_SWP_UNLOCK(swp);
+ return (EBUSY);
+ }
+
+ map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
+ offset = swp->cfg.mem_backed
+ ? DPAA2_SWP_CENA_RCR_MEM(RAR_IDX(rar))
+ : DPAA2_SWP_CENA_RCR(RAR_IDX(rar));
+ c = (struct with_verb *) cmd;
+
+ /* Write command bytes (without VERB byte). */
+ for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
+ bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
+ bus_write_4(map, offset + 4, cmd_pdat32[1]);
+ for (uint32_t i = 1; i <= 3; i++)
+ bus_write_1(map, offset + i, cmd_pdat8[i]);
+
+ /* Write VERB byte and trigger command execution. */
+ if (swp->cfg.mem_backed) {
+ bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
+ wmb();
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_RCR_AM_RT +
+ RAR_IDX(rar) * 4, DPAA2_SWP_RT_MODE);
+ } else {
+ wmb();
+ bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
+ }
+
+ DPAA2_SWP_UNLOCK(swp);
+
+ return (0);
+}
+
+/**
+ * @brief Execute Volatile Dequeue Command (VDC).
+ *
+ * This command will be executed by QBMan only once in order to deliver requested
+ * number of frames (1-16 or 1-32 depending on QBMan version) to the driver via
+ * DQRR or arbitrary DMA-mapped memory.
+ *
+ * NOTE: There is a counterpart to the volatile dequeue command called static
+ * dequeue command (SDQC) which is executed periodically all the time the
+ * command is present in the SDQCR register.
+ */
+static int
+dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *swp,
+ struct dpaa2_swp_cmd *cmd)
+{
+ struct __packed with_verb {
+ uint8_t verb;
+ uint8_t _reserved[63];
+ } *c;
+ const uint8_t *p8 = (const uint8_t *) cmd->params;
+ const uint32_t *p32 = (const uint32_t *) cmd->params;
+ struct resource_map *map;
+ uint32_t offset;
+
+ map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
+ offset = swp->cfg.mem_backed
+ ? DPAA2_SWP_CENA_VDQCR_MEM : DPAA2_SWP_CENA_VDQCR;
+ c = (struct with_verb *) cmd;
+
+ /* Write command bytes (without VERB byte). */
+ for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
+ bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
+ bus_write_4(map, offset + 4, p32[1]);
+ for (uint32_t i = 1; i <= 3; i++)
+ bus_write_1(map, offset + i, p8[i]);
+
+ /* Write VERB byte and trigger command execution. */
+ if (swp->cfg.mem_backed) {
+ bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
+ swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
+ wmb();
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_VDQCR_RT,
+ DPAA2_SWP_RT_MODE);
+ } else {
+ wmb();
+ bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
+ swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Execute a QBMan management command.
+ */
+static int
+dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
+ struct dpaa2_swp_rsp *rsp, uint8_t cmdid)
+{
+#if (defined(_KERNEL) && defined(INVARIANTS))
+ struct __packed with_verb {
+ uint8_t verb;
+ uint8_t _reserved[63];
+ } *r;
+#endif
+ uint16_t flags;
+ int error;
+
+ if (swp == NULL || cmd == NULL || rsp == NULL)
+ return (EINVAL);
+
+ DPAA2_SWP_LOCK(swp, &flags);
+ if (flags & DPAA2_SWP_DESTROYED) {
+ /* Terminate operation if portal is destroyed. */
+ DPAA2_SWP_UNLOCK(swp);
+ return (ENOENT);
+ }
+
+ /*
+ * Send a command to QBMan using Management Command register and wait
+ * for response from the Management Response registers.
+ */
+ dpaa2_swp_send_mgmt_command(swp, cmd, cmdid);
+ error = dpaa2_swp_wait_for_mgmt_response(swp, rsp);
+ if (error) {
+ DPAA2_SWP_UNLOCK(swp);
+ return (error);
+ }
+ DPAA2_SWP_UNLOCK(swp);
+
+#if (defined(_KERNEL) && defined(INVARIANTS))
+ r = (struct with_verb *) rsp;
+ KASSERT((r->verb & CMD_VERB_MASK) == cmdid,
+ ("wrong VERB byte in response: resp=0x%02x, expected=0x%02x",
+ r->verb, cmdid));
+#endif
+
+ return (0);
+}
+
+static int
+dpaa2_swp_send_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
+ uint8_t cmdid)
+{
+ const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
+ const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
+ struct resource_map *map;
+ uint32_t offset;
+
+ map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
+ offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_CR_MEM : DPAA2_SWP_CENA_CR;
+
+ /* Write command bytes (without VERB byte). */
+ for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
+ bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
+ bus_write_4(map, offset + 4, cmd_pdat32[1]);
+ for (uint32_t i = 1; i <= 3; i++)
+ bus_write_1(map, offset + i, cmd_pdat8[i]);
+
+ /* Write VERB byte and trigger command execution. */
+ if (swp->cfg.mem_backed) {
+ bus_write_1(map, offset, cmdid | swp->mr.valid_bit);
+ wmb();
+ dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_CR_RT,
+ DPAA2_SWP_RT_MODE);
+ } else {
+ wmb();
+ bus_write_1(map, offset, cmdid | swp->mc.valid_bit);
+ }
+
+ return (0);
+}
+
+static int
+dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *swp, struct dpaa2_swp_rsp *rsp)
+{
+ struct resource_map *map = swp->cfg.mem_backed
+ ? swp->cena_map : swp->cinh_map;
+ /* Management command response to be read from the only RR or RR0/RR1. */
+ const uint32_t offset = swp->cfg.mem_backed
+ ? DPAA2_SWP_CENA_RR_MEM
+ : DPAA2_SWP_CENA_RR(swp->mc.valid_bit);
+ uint32_t i, verb, ret;
+ int rc;
+
+ /* Wait for a command response from QBMan. */
+ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
+ if (swp->cfg.mem_backed) {
+ verb = (uint32_t) (bus_read_4(map, offset) & 0xFFu);
+ if (swp->mr.valid_bit != (verb & DPAA2_SWP_VALID_BIT))
+ goto wait;
+ if (!(verb & ~DPAA2_SWP_VALID_BIT))
+ goto wait;
+ swp->mr.valid_bit ^= DPAA2_SWP_VALID_BIT;
+ } else {
+ ret = bus_read_4(map, offset);
+ verb = ret & ~DPAA2_SWP_VALID_BIT; /* remove valid bit */
+ if (verb == 0u)
+ goto wait;
+ swp->mc.valid_bit ^= DPAA2_SWP_VALID_BIT;
+ }
+ break;
+ wait:
+ DELAY(CMD_SPIN_TIMEOUT);
+ }
+ /* Return an error on expired timeout. */
+ rc = i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0;
+
+ /* Read command response. */
+ for (i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
+ rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
+
+ return (rc);
+}
diff --git a/sys/dev/dpaa2/dpaa2_swp.h b/sys/dev/dpaa2/dpaa2_swp.h
new file mode 100644
index 000000000000..53a8b32ef185
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_swp.h
@@ -0,0 +1,504 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_SWP_H
+#define _DPAA2_SWP_H
+
+#include <sys/bus.h>
+
+#include "dpaa2_types.h"
+#include "dpaa2_bp.h"
+
+/*
+ * DPAA2 QBMan software portal.
+ */
+
+/* All QBMan commands and result structures use this "valid bit" encoding. */
+#define DPAA2_SWP_VALID_BIT ((uint32_t) 0x80)
+
+#define DPAA2_SWP_TIMEOUT 100000 /* in us */
+#define DPAA2_SWP_CMD_PARAMS_N 8u
+#define DPAA2_SWP_RSP_PARAMS_N 8u
+
+/*
+ * Maximum number of buffers that can be acquired/released through a single
+ * QBMan command.
+ */
+#define DPAA2_SWP_BUFS_PER_CMD 7u
+
+/*
+ * Number of times to retry DPIO portal operations while waiting for portal to
+ * finish executing current command and become available.
+ *
+ * We want to avoid being stuck in a while loop in case hardware becomes
+ * unresponsive, but not give up too easily if the portal really is busy for
+ * valid reasons.
+ */
+#define DPAA2_SWP_BUSY_RETRIES 1000
+
+/* Versions of the QBMan software portals. */
+#define DPAA2_SWP_REV_4000 0x04000000
+#define DPAA2_SWP_REV_4100 0x04010000
+#define DPAA2_SWP_REV_4101 0x04010001
+#define DPAA2_SWP_REV_5000 0x05000000
+
+#define DPAA2_SWP_REV_MASK 0xFFFF0000
+
+/* Registers in the cache-inhibited area of the software portal. */
+#define DPAA2_SWP_CINH_CR 0x600 /* Management Command reg.*/
+#define DPAA2_SWP_CINH_EQCR_PI 0x800 /* Enqueue Ring, Producer Index */
+#define DPAA2_SWP_CINH_EQCR_CI 0x840 /* Enqueue Ring, Consumer Index */
+#define DPAA2_SWP_CINH_CR_RT 0x900 /* CR Read Trigger */
+#define DPAA2_SWP_CINH_VDQCR_RT 0x940 /* VDQCR Read Trigger */
+#define DPAA2_SWP_CINH_EQCR_AM_RT 0x980
+#define DPAA2_SWP_CINH_RCR_AM_RT 0x9C0
+#define DPAA2_SWP_CINH_DQPI 0xA00 /* DQRR Producer Index reg. */
+#define DPAA2_SWP_CINH_DQRR_ITR 0xA80 /* DQRR interrupt timeout reg. */
+#define DPAA2_SWP_CINH_DCAP 0xAC0 /* DQRR Consumption Ack. reg. */
+#define DPAA2_SWP_CINH_SDQCR 0xB00 /* Static Dequeue Command reg. */
+#define DPAA2_SWP_CINH_EQCR_AM_RT2 0xB40
+#define DPAA2_SWP_CINH_RCR_PI 0xC00 /* Release Ring, Producer Index */
+#define DPAA2_SWP_CINH_RAR 0xCC0 /* Release Array Allocation reg. */
+#define DPAA2_SWP_CINH_CFG 0xD00
+#define DPAA2_SWP_CINH_ISR 0xE00
+#define DPAA2_SWP_CINH_IER 0xE40
+#define DPAA2_SWP_CINH_ISDR 0xE80
+#define DPAA2_SWP_CINH_IIR 0xEC0
+#define DPAA2_SWP_CINH_ITPR 0xF40
+
+/* Registers in the cache-enabled area of the software portal. */
+#define DPAA2_SWP_CENA_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define DPAA2_SWP_CENA_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define DPAA2_SWP_CENA_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define DPAA2_SWP_CENA_CR (0x600) /* Management Command reg. */
+#define DPAA2_SWP_CENA_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define DPAA2_SWP_CENA_VDQCR (0x780)
+#define DPAA2_SWP_CENA_EQCR_CI (0x840)
+
+/* Registers in the cache-enabled area of the software portal (memory-backed). */
+#define DPAA2_SWP_CENA_DQRR_MEM(n) (0x0800 + ((uint32_t)(n) << 6))
+#define DPAA2_SWP_CENA_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
+#define DPAA2_SWP_CENA_CR_MEM (0x1600) /* Management Command reg. */
+#define DPAA2_SWP_CENA_RR_MEM (0x1680) /* Management Response reg. */
+#define DPAA2_SWP_CENA_VDQCR_MEM (0x1780)
+#define DPAA2_SWP_CENA_EQCR_CI_MEMBACK (0x1840)
+
+/* Shifts in the portal's configuration register. */
+#define DPAA2_SWP_CFG_DQRR_MF_SHIFT 20
+#define DPAA2_SWP_CFG_EST_SHIFT 16
+#define DPAA2_SWP_CFG_CPBS_SHIFT 15
+#define DPAA2_SWP_CFG_WN_SHIFT 14
+#define DPAA2_SWP_CFG_RPM_SHIFT 12
+#define DPAA2_SWP_CFG_DCM_SHIFT 10
+#define DPAA2_SWP_CFG_EPM_SHIFT 8
+#define DPAA2_SWP_CFG_VPM_SHIFT 7
+#define DPAA2_SWP_CFG_CPM_SHIFT 6
+#define DPAA2_SWP_CFG_SD_SHIFT 5
+#define DPAA2_SWP_CFG_SP_SHIFT 4
+#define DPAA2_SWP_CFG_SE_SHIFT 3
+#define DPAA2_SWP_CFG_DP_SHIFT 2
+#define DPAA2_SWP_CFG_DE_SHIFT 1
+#define DPAA2_SWP_CFG_EP_SHIFT 0
+
+/* Static Dequeue Command Register attribute codes */
+#define DPAA2_SDQCR_FC_SHIFT 29 /* Dequeue Command Frame Count */
+#define DPAA2_SDQCR_FC_MASK 0x1
+#define DPAA2_SDQCR_DCT_SHIFT 24 /* Dequeue Command Type */
+#define DPAA2_SDQCR_DCT_MASK 0x3
+#define DPAA2_SDQCR_TOK_SHIFT 16 /* Dequeue Command Token */
+#define DPAA2_SDQCR_TOK_MASK 0xff
+#define DPAA2_SDQCR_SRC_SHIFT 0 /* Dequeue Source */
+#define DPAA2_SDQCR_SRC_MASK 0xffff
+
+/*
+ * Read trigger bit is used to trigger QMan to read a command from memory,
+ * without having software perform a cache flush to force a write of the command
+ * to QMan.
+ *
+ * NOTE: Implemented in QBMan 5.0 or above.
+ */
+#define DPAA2_SWP_RT_MODE ((uint32_t)0x100)
+
+/* Interrupt Enable Register bits. */
+#define DPAA2_SWP_INTR_EQRI 0x01
+#define DPAA2_SWP_INTR_EQDI 0x02
+#define DPAA2_SWP_INTR_DQRI 0x04
+#define DPAA2_SWP_INTR_RCRI 0x08
+#define DPAA2_SWP_INTR_RCDI 0x10
+#define DPAA2_SWP_INTR_VDCI 0x20
+
+/* "Write Enable" bitmask for a command to configure SWP WQ Channel.*/
+#define DPAA2_WQCHAN_WE_EN (0x1u) /* Enable CDAN generation */
+#define DPAA2_WQCHAN_WE_ICD (0x2u) /* Interrupt Coalescing Disable */
+#define DPAA2_WQCHAN_WE_CTX (0x4u)
+
+/* Definitions for parsing DQRR entries. */
+#define DPAA2_DQRR_RESULT_MASK (0x7Fu)
+#define DPAA2_DQRR_RESULT_DQ (0x60u)
+#define DPAA2_DQRR_RESULT_FQRN (0x21u)
+#define DPAA2_DQRR_RESULT_FQRNI (0x22u)
+#define DPAA2_DQRR_RESULT_FQPN (0x24u)
+#define DPAA2_DQRR_RESULT_FQDAN (0x25u)
+#define DPAA2_DQRR_RESULT_CDAN (0x26u)
+#define DPAA2_DQRR_RESULT_CSCN_MEM (0x27u)
+#define DPAA2_DQRR_RESULT_CGCU (0x28u)
+#define DPAA2_DQRR_RESULT_BPSCN (0x29u)
+#define DPAA2_DQRR_RESULT_CSCN_WQ (0x2au)
+
+/* Frame dequeue statuses */
+#define DPAA2_DQ_STAT_FQEMPTY (0x80u) /* FQ is empty */
+#define DPAA2_DQ_STAT_HELDACTIVE (0x40u) /* FQ is held active */
+#define DPAA2_DQ_STAT_FORCEELIGIBLE (0x20u) /* FQ force eligible */
+#define DPAA2_DQ_STAT_VALIDFRAME (0x10u) /* valid frame */
+#define DPAA2_DQ_STAT_ODPVALID (0x04u) /* FQ ODP enable */
+#define DPAA2_DQ_STAT_VOLATILE (0x02u) /* volatile dequeue (VDC) */
+#define DPAA2_DQ_STAT_EXPIRED (0x01u) /* VDC is expired */
+
+/*
+ * Portal flags.
+ *
+ * TODO: Use the same flags for both MC and software portals.
+ */
+#define DPAA2_SWP_DEF 0x0u
+#define DPAA2_SWP_NOWAIT_ALLOC 0x2u /* Do not sleep during init */
+#define DPAA2_SWP_LOCKED 0x4000u /* Wait till portal's unlocked */
+#define DPAA2_SWP_DESTROYED 0x8000u /* Terminate any operations */
+
+/* Command return codes. */
+#define DPAA2_SWP_STAT_OK 0x0
+#define DPAA2_SWP_STAT_NO_MEMORY 0x9 /* No memory available */
+#define DPAA2_SWP_STAT_PORTAL_DISABLED 0xFD /* QBMan portal disabled */
+#define DPAA2_SWP_STAT_EINVAL 0xFE /* Invalid argument */
+#define DPAA2_SWP_STAT_ERR 0xFF /* General error */
+
+/* Opaque token for static dequeues. */
+#define DPAA2_SWP_SDQCR_TOKEN 0xBBu
+/* Opaque token for static dequeues. */
+#define DPAA2_SWP_VDQCR_TOKEN 0xCCu
+
+#define DPAA2_SWP_LOCK(__swp, __flags) do { \
+ mtx_assert(&(__swp)->lock, MA_NOTOWNED); \
+ mtx_lock(&(__swp)->lock); \
+ *(__flags) = (__swp)->flags; \
+ (__swp)->flags |= DPAA2_SWP_LOCKED; \
+} while (0)
+
+#define DPAA2_SWP_UNLOCK(__swp) do { \
+ mtx_assert(&(__swp)->lock, MA_OWNED); \
+ (__swp)->flags &= ~DPAA2_SWP_LOCKED; \
+ mtx_unlock(&(__swp)->lock); \
+} while (0)
+
+enum dpaa2_fd_format {
+ DPAA2_FD_SINGLE = 0,
+ DPAA2_FD_LIST,
+ DPAA2_FD_SG
+};
+
+/**
+ * @brief Enqueue command descriptor.
+ *
+ * NOTE: 32 bytes.
+ */
+struct dpaa2_eq_desc {
+ uint8_t verb;
+ uint8_t dca;
+ uint16_t seqnum;
+ uint16_t orpid;
+ uint16_t _reserved;
+ uint32_t tgtid;
+ uint32_t tag;
+ uint16_t qdbin;
+ uint8_t qpri;
+ uint8_t _reserved1[3];
+ uint8_t wae;
+ uint8_t rspid;
+ uint64_t rsp_addr;
+} __packed;
+
+/**
+ * @brief Frame Dequeue Response (FDR) descriptor.
+ *
+ * NOTE: 32 bytes.
+ */
+struct dpaa2_fdr_desc {
+ uint8_t verb;
+ uint8_t stat;
+ uint16_t seqnum;
+ uint16_t oprid;
+ uint8_t _reserved;
+ uint8_t tok;
+ uint32_t fqid;
+ uint32_t _reserved1;
+ uint32_t fq_byte_cnt;
+ uint32_t fq_frm_cnt;
+ uint64_t fqd_ctx;
+} __packed;
+
+/**
+ * @brief State Change Notification Message (SCNM).
+ *
+ * NOTE: 16 bytes.
+ */
+struct dpaa2_scn {
+ uint8_t verb;
+ uint8_t stat;
+ uint8_t state;
+ uint8_t _reserved;
+ uint32_t rid_tok;
+ uint64_t ctx;
+} __packed;
+
+/**
+ * @brief DPAA2 frame descriptor.
+ *
+ * addr: Memory address of the start of the buffer holding the
+ * frame data or the buffer containing the scatter/gather
+ * list.
+ * data_length: Length of the frame data (in bytes).
+ * bpid_ivp_bmt: Buffer pool ID (14 bit + BMT bit + IVP bit)
+ * offset_fmt_sl: Frame data offset, frame format and short-length fields.
+ * frame_ctx: Frame context. This field allows the sender of a frame
+ * to communicate some out-of-band information to the
+ * receiver of the frame.
+ * ctrl: Control bits (ERR, CBMT, ASAL, PTAC, DROPP, SC, DD).
+ * flow_ctx: Frame flow context. Associates the frame with a flow
+ * structure. QMan may use the FLC field for 3 purposes:
+ * stashing control, order definition point identification,
+ * and enqueue replication control.
+ *
+ * NOTE: 32 bytes.
+ */
+struct dpaa2_fd {
+ uint64_t addr;
+ uint32_t data_length;
+ uint16_t bpid_ivp_bmt;
+ uint16_t offset_fmt_sl;
+ uint32_t frame_ctx;
+ uint32_t ctrl;
+ uint64_t flow_ctx;
+} __packed;
+
+/**
+ * @brief DPAA2 scatter/gather entry.
+ *
+ * NOTE: 16 bytes.
+ */
+struct dpaa2_sg_entry {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t bpid;
+ uint16_t offset_fmt;
+} __packed;
+
+/**
+ * @brief Frame Dequeue Response (FDR).
+ *
+ * NOTE: 64 bytes.
+ */
+struct dpaa2_fdr {
+ struct dpaa2_fdr_desc desc;
+ struct dpaa2_fd fd;
+} __packed;
+
+/**
+ * @brief Dequeue Response Message.
+ *
+ * NOTE: 64 bytes.
+ */
+struct dpaa2_dq {
+ union {
+ struct {
+ uint8_t verb;
+ uint8_t _reserved[63];
+ } common;
+ struct dpaa2_fdr fdr; /* Frame Dequeue Response */
+ struct dpaa2_scn scn; /* State Change Notification */
+ };
+} __packed;
+
+/**
+ * @brief Descriptor of the QBMan software portal.
+ *
+ * cena_res: Unmapped cache-enabled part of the portal's I/O memory.
+ * cena_map: Mapped cache-enabled part of the portal's I/O memory.
+ * cinh_res: Unmapped cache-inhibited part of the portal's I/O memory.
+ * cinh_map: Mapped cache-inhibited part of the portal's I/O memory.
+ *
+ * dpio_dev: Device associated with the DPIO object to manage this
+ * portal.
+ * swp_version: Hardware IP version of the software portal.
+ * swp_clk: QBMAN clock frequency value in Hz.
+ * swp_cycles_ratio: How many 256 QBMAN cycles fit into one ns.
+ * swp_id: Software portal ID.
+ *
+ * has_notif: True if the notification mode is used.
+ * has_8prio: True for a channel with 8 priority WQs. Ignored unless
+ * "has_notif" is true.
+ */
+struct dpaa2_swp_desc {
+ struct resource *cena_res;
+ struct resource_map *cena_map;
+ struct resource *cinh_res;
+ struct resource_map *cinh_map;
+
+ device_t dpio_dev;
+ uint32_t swp_version;
+ uint32_t swp_clk;
+ uint32_t swp_cycles_ratio;
+ uint16_t swp_id;
+
+ bool has_notif;
+ bool has_8prio;
+};
+
+/**
+ * @brief Command holds data to be written to the software portal.
+ */
+struct dpaa2_swp_cmd {
+ uint64_t params[DPAA2_SWP_CMD_PARAMS_N];
+};
+
+/**
+ * @brief Command response holds data received from the software portal.
+ */
+struct dpaa2_swp_rsp {
+ uint64_t params[DPAA2_SWP_RSP_PARAMS_N];
+};
+
+/**
+ * @brief QBMan software portal.
+ *
+ * res: Unmapped cache-enabled and cache-inhibited parts of the portal.
+ * map: Mapped cache-enabled and cache-inhibited parts of the portal.
+ * desc: Descriptor of the QBMan software portal.
+ * lock: Lock to guard an access to the portal.
+ * cv: Conditional variable helps to wait for the helper object's state
+ * change.
+ * flags: Current state of the object.
+ * sdq: Push dequeues status.
+ * mc: Management commands data.
+ * mr: Management response data.
+ * dqrr: Dequeue Response Ring is used to issue frame dequeue responses
+ * from the QBMan to the driver.
+ * eqcr: Enqueue Command Ring is used to issue frame enqueue commands
+ * from the driver to the QBMan.
+ */
+struct dpaa2_swp {
+ struct resource *cena_res;
+ struct resource_map *cena_map;
+ struct resource *cinh_res;
+ struct resource_map *cinh_map;
+
+ struct mtx lock;
+ struct dpaa2_swp_desc *desc;
+ uint16_t flags;
+
+ /* Static Dequeue Command Register value (to obtain CDANs). */
+ uint32_t sdq;
+
+ /* Volatile Dequeue Command (to obtain frames). */
+ struct {
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } vdq;
+
+ struct {
+ bool atomic;
+ bool writes_cinh;
+ bool mem_backed;
+ } cfg; /* Software portal configuration. */
+
+ struct {
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mc;
+
+ struct {
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mr;
+
+ struct {
+ uint32_t next_idx;
+ uint32_t valid_bit;
+ uint8_t ring_size;
+ bool reset_bug; /* dqrr reset workaround */
+ uint32_t irq_threshold;
+ uint32_t irq_itp;
+ } dqrr;
+
+ struct {
+ uint32_t pi; /* producer index */
+ uint32_t pi_vb; /* PI valid bits */
+ uint32_t pi_ring_size;
+ uint32_t pi_ci_mask;
+ uint32_t ci;
+ int available;
+ uint32_t pend;
+ uint32_t no_pfdr;
+ } eqcr;
+};
+
+/* Management routines. */
+int dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
+ uint16_t flags);
+void dpaa2_swp_free_portal(struct dpaa2_swp *swp);
+uint32_t dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est,
+ uint8_t rpm, uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp,
+ int de, int ep);
+
+/* Read/write registers of a software portal. */
+void dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v);
+uint32_t dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o);
+
+/* Helper routines. */
+void dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always);
+void dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid);
+void dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask);
+uint32_t dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp);
+uint32_t dpaa2_swp_read_intr_status(struct dpaa2_swp *swp);
+void dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask);
+void dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx,
+ bool en);
+int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold,
+ uint32_t holdoff);
+
+/* Software portal commands. */
+int dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id,
+ uint8_t we_mask, bool cdan_en, uint64_t ctx);
+int dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid,
+ struct dpaa2_bp_conf *conf);
+int dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf,
+ uint32_t buf_num);
+int dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq,
+ uint32_t *idx);
+int dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id,
+ struct dpaa2_buf *buf, uint32_t frames_n);
+int dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
+ struct dpaa2_fd *fd);
+int dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
+ struct dpaa2_fd *fd, uint32_t *flags, int frames_n);
+
+#endif /* _DPAA2_SWP_H */
diff --git a/sys/dev/dpaa2/dpaa2_swp_if.m b/sys/dev/dpaa2/dpaa2_swp_if.m
new file mode 100644
index 000000000000..09c658eb04d0
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_swp_if.m
@@ -0,0 +1,96 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright © 2021-2022 Dmitry Salychev
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+#include <machine/bus.h>
+#include <dev/dpaa2/dpaa2_mc.h>
+#include <dev/dpaa2/dpaa2_swp.h>
+#include <dev/dpaa2/dpaa2_bp.h>
+
+/**
+ * @brief QBMan software portal interface.
+ *
+ * Software portals are used by data path software executing on a processor core
+ * to communicate with the Queue Manager (QMan) which acts as a central resource
+ * in DPAA2, managing the queueing of data between multiple processor cores,
+ * network interfaces, and hardware accelerators in a multicore SoC.
+ */
+INTERFACE dpaa2_swp;
+
+/**
+ * @brief Enqueue multiple frames to a frame queue using one Frame Queue ID.
+ *
+ * dev: DPIO device.
+ * fqid: Frame Queue ID.
+ * fd: Frame descriptor to enqueue.
+ * frames_n: Number of frames to enqueue.
+ */
+METHOD int enq_multiple_fq {
+ device_t dev;
+ uint32_t fqid;
+ struct dpaa2_fd *fd;
+ int frames_n;
+}
+
+/**
+ * @brief Configure the channel data availability notification (CDAN)
+ * in a particular WQ channel paired with DPIO.
+ *
+ * dev: DPIO device.
+ * ctx: Context to configure data availability notifications (CDAN).
+ */
+METHOD int conf_wq_channel {
+ device_t dev;
+ struct dpaa2_io_notif_ctx *ctx;
+};
+
+/**
+ * @brief Release one or more buffer pointers to a QBMan buffer pool.
+ *
+ * dev: DPIO device.
+ * bpid: Buffer pool ID.
+ * buf: Array of buffers physical addresses.
+ * buf_num: Number of the buffers in the array.
+ */
+METHOD int release_bufs {
+ device_t dev;
+ uint16_t bpid;
+ bus_addr_t *buf;
+ uint32_t buf_num;
+};
+
+/**
+ * @brief Query current configuration/state of the buffer pool.
+ *
+ * dev: DPIO device.
+ * bpid: Buffer pool ID.
+ * conf: Configuration/state of the buffer pool.
+ */
+METHOD int query_bp {
+ device_t dev;
+ uint16_t bpid;
+ struct dpaa2_bp_conf *conf;
+}
diff --git a/sys/dev/dpaa2/dpaa2_types.h b/sys/dev/dpaa2/dpaa2_types.h
new file mode 100644
index 000000000000..b54afa358a28
--- /dev/null
+++ b/sys/dev/dpaa2/dpaa2_types.h
@@ -0,0 +1,114 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Dmitry Salychev
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_TYPES_H
+#define _DPAA2_TYPES_H
+
+#include <machine/atomic.h>
+
+/**
+ * @brief Types of the DPAA2 devices.
+ */
+enum dpaa2_dev_type {
+ DPAA2_DEV_MC = 7500, /* Management Complex (firmware bus) */
+ DPAA2_DEV_RC, /* Resource Container (firmware bus) */
+ DPAA2_DEV_IO, /* I/O object (to work with QBMan portal) */
+ DPAA2_DEV_NI, /* Network Interface */
+ DPAA2_DEV_MCP, /* MC portal */
+ DPAA2_DEV_BP, /* Buffer Pool */
+ DPAA2_DEV_CON, /* Concentrator */
+ DPAA2_DEV_MAC, /* MAC object */
+ DPAA2_DEV_MUX, /* MUX (Datacenter bridge) object */
+ DPAA2_DEV_SW, /* Ethernet Switch */
+
+ DPAA2_DEV_NOTYPE /* Shouldn't be assigned to any DPAA2 device. */
+};
+
+/**
+ * @brief Types of the DPAA2 buffers.
+ */
+enum dpaa2_buf_type {
+ DPAA2_BUF_RX = 75, /* Rx buffer */
+ DPAA2_BUF_TX, /* Tx buffer */
+ DPAA2_BUF_STORE /* Channel storage, key configuration */
+};
+
+/**
+ * @brief DMA-mapped buffer (for Rx/Tx buffers, channel storage, etc.).
+ */
+struct dpaa2_buf {
+ enum dpaa2_buf_type type;
+ union {
+ struct {
+ bus_dma_tag_t dmat; /* DMA tag for this buffer */
+ bus_dmamap_t dmap;
+ bus_addr_t paddr;
+ void *vaddr;
+
+ struct mbuf *m; /* associated mbuf */
+ } rx;
+ struct {
+ bus_dma_tag_t dmat; /* DMA tag for this buffer */
+ bus_dmamap_t dmap;
+ bus_addr_t paddr;
+ void *vaddr;
+
+ struct mbuf *m; /* associated mbuf */
+ uint64_t idx;
+
+ /* for scatter/gather table */
+ bus_dma_tag_t sgt_dmat;
+ bus_dmamap_t sgt_dmap;
+ bus_addr_t sgt_paddr;
+ void *sgt_vaddr;
+ } tx;
+ struct {
+ bus_dma_tag_t dmat; /* DMA tag for this buffer */
+ bus_dmamap_t dmap;
+ bus_addr_t paddr;
+ void *vaddr;
+ } store;
+ };
+};
+
+struct dpaa2_atomic {
+ volatile int counter;
+};
+
+/* Handy wrappers over atomic operations. */
+#define DPAA2_ATOMIC_XCHG(a, val) \
+ (atomic_swap_int(&(a)->counter, (val)))
+#define DPAA2_ATOMIC_READ(a) \
+ (atomic_load_acq_int(&(a)->counter))
+#define DPAA2_ATOMIC_ADD(a, val) \
+ (atomic_add_acq_int(&(a)->counter, (val)))
+
+/* Convert DPAA2 type to/from string. */
+const char *dpaa2_ttos(enum dpaa2_dev_type type);
+enum dpaa2_dev_type dpaa2_stot(const char *str);
+
+#endif /* _DPAA2_TYPES_H */
diff --git a/sys/dev/dpaa2/memac_mdio.h b/sys/dev/dpaa2/memac_mdio.h
new file mode 100644
index 000000000000..02e4c081f87c
--- /dev/null
+++ b/sys/dev/dpaa2/memac_mdio.h
@@ -0,0 +1,64 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __MEMAC_MDIO_H
+#define __MEMAC_MDIO_H
+
+/* -------------------------------------------------------------------------- */
+
+struct memacphy_softc_common {
+ device_t dev;
+ device_t dpnidev;
+ int phy;
+};
+
+int memacphy_miibus_readreg(device_t, int, int);
+int memacphy_miibus_writereg(device_t, int, int, int);
+void memacphy_miibus_statchg(struct memacphy_softc_common *);
+int memacphy_set_ni_dev(struct memacphy_softc_common *, device_t);
+int memacphy_get_phy_loc(struct memacphy_softc_common *, int *);
+
+
+/* -------------------------------------------------------------------------- */
+
+struct memac_mdio_softc_common {
+ device_t dev;
+ struct resource *mem_res;
+ bool is_little_endian;
+};
+
+int memac_miibus_readreg(struct memac_mdio_softc_common *, int, int);
+int memac_miibus_writereg(struct memac_mdio_softc_common *, int, int, int);
+
+ssize_t memac_mdio_get_property(device_t, device_t, const char *,
+ void *, size_t, device_property_type_t);
+int memac_mdio_read_ivar(device_t, device_t, int, uintptr_t *);
+
+int memac_mdio_generic_attach(struct memac_mdio_softc_common *);
+int memac_mdio_generic_detach(struct memac_mdio_softc_common *);
+
+#endif /* __MEMAC_MDIO_H */
diff --git a/sys/dev/dpaa2/memac_mdio_acpi.c b/sys/dev/dpaa2/memac_mdio_acpi.c
new file mode 100644
index 000000000000..ed0c2802f60f
--- /dev/null
+++ b/sys/dev/dpaa2/memac_mdio_acpi.c
@@ -0,0 +1,310 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/endian.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include "memac_mdio.h"
+#include "memac_mdio_if.h"
+#include "acpi_bus_if.h"
+#include "miibus_if.h"
+
+/* -------------------------------------------------------------------------- */
+
+struct memacphy_softc_acpi {
+ struct memacphy_softc_common scc;
+ int uid;
+ uint64_t phy_channel;
+ char compatible[64];
+};
+
+static void
+memacphy_acpi_miibus_statchg(device_t dev)
+{
+ struct memacphy_softc_acpi *sc;
+
+ sc = device_get_softc(dev);
+ memacphy_miibus_statchg(&sc->scc);
+}
+
+static int
+memacphy_acpi_set_ni_dev(device_t dev, device_t nidev)
+{
+ struct memacphy_softc_acpi *sc;
+
+ sc = device_get_softc(dev);
+ return (memacphy_set_ni_dev(&sc->scc, nidev));
+}
+
+static int
+memacphy_acpi_get_phy_loc(device_t dev, int *phy_loc)
+{
+ struct memacphy_softc_acpi *sc;
+
+ sc = device_get_softc(dev);
+ return (memacphy_get_phy_loc(&sc->scc, phy_loc));
+}
+
+static int
+memacphy_acpi_probe(device_t dev)
+{
+
+ device_set_desc(dev, "MEMAC PHY (acpi)");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+memacphy_acpi_attach(device_t dev)
+{
+ struct memacphy_softc_acpi *sc;
+ ACPI_HANDLE h;
+ ssize_t s;
+
+ sc = device_get_softc(dev);
+ sc->scc.dev = dev;
+ h = acpi_get_handle(dev);
+
+ s = acpi_GetInteger(h, "_UID", &sc->uid);
+ if (ACPI_FAILURE(s)) {
+ device_printf(dev, "Cannot get '_UID' property: %zd\n", s);
+ return (ENXIO);
+ }
+
+ s = device_get_property(dev, "phy-channel",
+ &sc->phy_channel, sizeof(sc->phy_channel), DEVICE_PROP_UINT64);
+ if (s != -1)
+ sc->scc.phy = sc->phy_channel;
+ else
+ sc->scc.phy = -1;
+ s = device_get_property(dev, "compatible",
+ sc->compatible, sizeof(sc->compatible), DEVICE_PROP_ANY);
+
+ if (bootverbose)
+ device_printf(dev, "UID %#04x phy-channel %ju compatible '%s' phy %u\n",
+ sc->uid, sc->phy_channel,
+ sc->compatible[0] != '\0' ? sc->compatible : "", sc->scc.phy);
+
+ if (sc->scc.phy == -1)
+ return (ENXIO);
+ return (0);
+}
+
+static device_method_t memacphy_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, memacphy_acpi_probe),
+ DEVMETHOD(device_attach, memacphy_acpi_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, memacphy_miibus_readreg),
+ DEVMETHOD(miibus_writereg, memacphy_miibus_writereg),
+ DEVMETHOD(miibus_statchg, memacphy_acpi_miibus_statchg),
+
+ /* memac */
+ DEVMETHOD(memac_mdio_set_ni_dev, memacphy_acpi_set_ni_dev),
+ DEVMETHOD(memac_mdio_get_phy_loc, memacphy_acpi_get_phy_loc),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(memacphy_acpi, memacphy_acpi_driver, memacphy_acpi_methods,
+ sizeof(struct memacphy_softc_acpi));
+
+EARLY_DRIVER_MODULE(memacphy_acpi, memac_mdio_acpi, memacphy_acpi_driver, 0, 0,
+ BUS_PASS_SUPPORTDEV);
+DRIVER_MODULE(miibus, memacphy_acpi, miibus_driver, 0, 0);
+MODULE_DEPEND(memacphy_acpi, miibus, 1, 1, 1);
+
+/* -------------------------------------------------------------------------- */
+
+struct memac_mdio_softc_acpi {
+ struct memac_mdio_softc_common scc;
+};
+
+static int
+memac_acpi_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct memac_mdio_softc_acpi *sc;
+
+ sc = device_get_softc(dev);
+ return (memac_miibus_readreg(&sc->scc, phy, reg));
+}
+
+static int
+memac_acpi_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+ struct memac_mdio_softc_acpi *sc;
+
+ sc = device_get_softc(dev);
+ return (memac_miibus_writereg(&sc->scc, phy, reg, data));
+}
+
+/* Context for walking PHY child devices. */
+struct memac_mdio_walk_ctx {
+ device_t dev;
+ int count;
+ int countok;
+};
+
+static char *memac_mdio_ids[] = {
+ "NXP0006",
+ NULL
+};
+
+static int
+memac_mdio_acpi_probe(device_t dev)
+{
+ int rc;
+
+ if (acpi_disabled("fsl_memac_mdio"))
+ return (ENXIO);
+
+ rc = ACPI_ID_PROBE(device_get_parent(dev), dev, memac_mdio_ids, NULL);
+ if (rc <= 0)
+ device_set_desc(dev, "Freescale XGMAC MDIO Bus");
+
+ return (rc);
+}
+
+static ACPI_STATUS
+memac_mdio_acpi_probe_child(ACPI_HANDLE h, device_t *dev, int level, void *arg)
+{
+ struct memac_mdio_walk_ctx *ctx;
+ struct acpi_device *ad;
+ device_t child;
+ uint32_t adr;
+
+ ctx = (struct memac_mdio_walk_ctx *)arg;
+ ctx->count++;
+
+ if (ACPI_FAILURE(acpi_GetInteger(h, "_ADR", &adr)))
+ return (AE_OK);
+
+ /* Technically M_ACPIDEV */
+ if ((ad = malloc(sizeof(*ad), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL)
+ return (AE_OK);
+
+ child = device_add_child(ctx->dev, "memacphy_acpi", -1);
+ if (child == NULL) {
+ free(ad, M_DEVBUF);
+ return (AE_OK);
+ }
+ ad->ad_handle = h;
+ ad->ad_cls_class = 0xffffff;
+ resource_list_init(&ad->ad_rl);
+ device_set_ivars(child, ad);
+ *dev = child;
+
+ ctx->countok++;
+ return (AE_OK);
+}
+
+static int
+memac_mdio_acpi_attach(device_t dev)
+{
+ struct memac_mdio_softc_acpi *sc;
+ struct memac_mdio_walk_ctx ctx;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->scc.dev = dev;
+
+ error = memac_mdio_generic_attach(&sc->scc);
+ if (error != 0)
+ return (error);
+
+ ctx.dev = dev;
+ ctx.count = 0;
+ ctx.countok = 0;
+ ACPI_SCAN_CHILDREN(device_get_parent(dev), dev, 1,
+ memac_mdio_acpi_probe_child, &ctx);
+ if (ctx.countok > 0) {
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+ }
+
+ return (0);
+}
+
+static int
+memac_mdio_acpi_detach(device_t dev)
+{
+ struct memac_mdio_softc_acpi *sc;
+
+ sc = device_get_softc(dev);
+ return (memac_mdio_generic_detach(&sc->scc));
+}
+
+static device_method_t memac_mdio_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, memac_mdio_acpi_probe),
+ DEVMETHOD(device_attach, memac_mdio_acpi_attach),
+ DEVMETHOD(device_detach, memac_mdio_acpi_detach),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, memac_acpi_miibus_readreg),
+ DEVMETHOD(miibus_writereg, memac_acpi_miibus_writereg),
+
+ /* .. */
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+ DEVMETHOD(bus_read_ivar, memac_mdio_read_ivar),
+ DEVMETHOD(bus_get_property, memac_mdio_get_property),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(memac_mdio_acpi, memac_mdio_acpi_driver, memac_mdio_acpi_methods,
+ sizeof(struct memac_mdio_softc_acpi));
+
+EARLY_DRIVER_MODULE(memac_mdio_acpi, acpi, memac_mdio_acpi_driver, 0, 0,
+ BUS_PASS_SUPPORTDEV);
+
+DRIVER_MODULE(miibus, memac_mdio_acpi, miibus_driver, 0, 0);
+MODULE_DEPEND(memac_mdio_acpi, miibus, 1, 1, 1);
+MODULE_VERSION(memac_mdio_acpi, 1);
diff --git a/sys/dev/dpaa2/memac_mdio_common.c b/sys/dev/dpaa2/memac_mdio_common.c
new file mode 100644
index 000000000000..a004966ed89a
--- /dev/null
+++ b/sys/dev/dpaa2/memac_mdio_common.c
@@ -0,0 +1,306 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/endian.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include "memac_mdio.h"
+#include "miibus_if.h"
+
+/* #define MEMAC_MDIO_DEBUG */
+
+/* -------------------------------------------------------------------------- */
+
+int
+memacphy_miibus_readreg(device_t dev, int phy, int reg)
+{
+
+ return (MIIBUS_READREG(device_get_parent(dev), phy, reg));
+}
+
+int
+memacphy_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+
+ return (MIIBUS_WRITEREG(device_get_parent(dev), phy, reg, data));
+}
+
+void
+memacphy_miibus_statchg(struct memacphy_softc_common *sc)
+{
+
+ if (sc->dpnidev != NULL)
+ MIIBUS_STATCHG(sc->dpnidev);
+}
+
+int
+memacphy_set_ni_dev(struct memacphy_softc_common *sc, device_t nidev)
+{
+
+ if (nidev == NULL)
+ return (EINVAL);
+
+#if defined(MEMAC_MDIO_DEBUG)
+ if (bootverbose)
+ device_printf(sc->dev, "setting nidev %p (%s)\n",
+ nidev, device_get_nameunit(nidev));
+#endif
+
+ if (sc->dpnidev != NULL)
+ return (EBUSY);
+
+ sc->dpnidev = nidev;
+ return (0);
+}
+
+int
+memacphy_get_phy_loc(struct memacphy_softc_common *sc, int *phy_loc)
+{
+ int error;
+
+ if (phy_loc == NULL)
+ return (EINVAL);
+
+ if (sc->phy == -1) {
+ *phy_loc = MII_PHY_ANY;
+ error = ENODEV;
+ } else {
+ *phy_loc = sc->phy;
+ error = 0;
+ }
+
+#if defined(MEMAC_MDIO_DEBUG)
+ if (bootverbose)
+ device_printf(sc->dev, "returning phy_loc %d, error %d\n",
+ *phy_loc, error);
+#endif
+
+ return (error);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * MDIO Ethernet Management Interface Registers (internal PCS MDIO PHY)
+ * 0x0030 MDIO Configuration Register (MDIO_CFG)
+ * 0x0034 MDIO Control Register (MDIO_CTL)
+ * 0x0038 MDIO Data Register (MDIO_DATA)
+ * 0x003c MDIO Register Address Register (MDIO_ADDR)
+ *
+ * External MDIO interfaces
+ * 0x0030 External MDIO Configuration Register (EMDIO_CFG)
+ * 0x0034 External MDIO Control Register (EMDIO_CTL)
+ * 0x0038 External MDIO Data Register (EMDIO_DATA)
+ * 0x003c External MDIO Register Address Register (EMDIO_ADDR)
+ */
+#define MDIO_CFG 0x00030
+#define MDIO_CFG_MDIO_RD_ER (1 << 1)
+#define MDIO_CFG_ENC45 (1 << 6)
+#define MDIO_CFG_BUSY (1 << 31)
+#define MDIO_CTL 0x00034
+#define MDIO_CTL_READ (1 << 15)
+#define MDIO_CTL_PORT_ADDR(_x) (((_x) & 0x1f) << 5)
+#define MDIO_CTL_DEV_ADDR(_x) ((_x) & 0x1f)
+#define MDIO_DATA 0x00038
+#define MDIO_ADDR 0x0003c
+
+static uint32_t
+memac_read_4(struct memac_mdio_softc_common *sc, uint32_t reg)
+{
+ uint32_t v, r;
+
+ v = bus_read_4(sc->mem_res, reg);
+ if (sc->is_little_endian)
+ r = le32toh(v);
+ else
+ r = be32toh(v);
+
+ return (r);
+}
+
+static void
+memac_write_4(struct memac_mdio_softc_common *sc, uint32_t reg, uint32_t val)
+{
+ uint32_t v;
+
+ if (sc->is_little_endian)
+ v = htole32(val);
+ else
+ v = htobe32(val);
+ bus_write_4(sc->mem_res, reg, v);
+}
+
+static uint32_t
+memac_miibus_wait_no_busy(struct memac_mdio_softc_common *sc)
+{
+ uint32_t count, val;
+
+ for (count = 1000; count > 0; count--) {
+ val = memac_read_4(sc, MDIO_CFG);
+ if ((val & MDIO_CFG_BUSY) == 0)
+ break;
+ DELAY(1);
+ }
+
+ if (count == 0)
+ return (0xffff);
+
+ return (0);
+}
+
+int
+memac_miibus_readreg(struct memac_mdio_softc_common *sc, int phy, int reg)
+{
+ uint32_t cfg, ctl, val;
+
+ /* Set proper Clause 45 mode. */
+ cfg = memac_read_4(sc, MDIO_CFG);
+ /* XXX 45 support? */
+ cfg &= ~MDIO_CFG_ENC45; /* Use Clause 22 */
+ memac_write_4(sc, MDIO_CFG, cfg);
+
+ val = memac_miibus_wait_no_busy(sc);
+ if (val != 0)
+ return (0xffff);
+
+ /* To whom do we want to talk to.. */
+ ctl = MDIO_CTL_PORT_ADDR(phy) | MDIO_CTL_DEV_ADDR(reg);
+ /* XXX do we need two writes for this to work reliably? */
+ memac_write_4(sc, MDIO_CTL, ctl | MDIO_CTL_READ);
+
+ val = memac_miibus_wait_no_busy(sc);
+ if (val != 0)
+ return (0xffff);
+
+ cfg = memac_read_4(sc, MDIO_CFG);
+ if (cfg & MDIO_CFG_MDIO_RD_ER)
+ return (0xffff);
+
+ val = memac_read_4(sc, MDIO_DATA);
+ val &= 0xffff;
+
+#if defined(MEMAC_MDIO_DEBUG)
+ device_printf(sc->dev, "phy read %d:%d = %#06x\n", phy, reg, val);
+#endif
+
+ return (val);
+}
+
+int
+memac_miibus_writereg(struct memac_mdio_softc_common *sc, int phy, int reg, int data)
+{
+ uint32_t cfg, ctl, val;
+
+#if defined(MEMAC_MDIO_DEBUG)
+ device_printf(sc->dev, "phy write %d:%d\n", phy, reg);
+#endif
+
+ /* Set proper Clause 45 mode. */
+ cfg = memac_read_4(sc, MDIO_CFG);
+ /* XXX 45 support? */
+ cfg &= ~MDIO_CFG_ENC45; /* Use Clause 22 */
+ memac_write_4(sc, MDIO_CFG, cfg);
+
+ val = memac_miibus_wait_no_busy(sc);
+ if (val != 0)
+ return (0xffff);
+
+ /* To whom do we want to talk to.. */
+ ctl = MDIO_CTL_PORT_ADDR(phy) | MDIO_CTL_DEV_ADDR(reg);
+ memac_write_4(sc, MDIO_CTL, ctl);
+
+ memac_write_4(sc, MDIO_DATA, data & 0xffff);
+
+ val = memac_miibus_wait_no_busy(sc);
+ if (val != 0)
+ return (0xffff);
+
+ return (0);
+}
+
+ssize_t
+memac_mdio_get_property(device_t dev, device_t child, const char *propname,
+ void *propvalue, size_t size, device_property_type_t type)
+{
+
+ return (bus_generic_get_property(dev, child, propname, propvalue, size, type));
+}
+
+int
+memac_mdio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
+{
+
+ return (BUS_READ_IVAR(device_get_parent(dev), dev, index, result));
+}
+
+
+int
+memac_mdio_generic_attach(struct memac_mdio_softc_common *sc)
+{
+ int rid;
+
+ rid = 0;
+ sc->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE | RF_SHAREABLE);
+ if (sc->mem_res == NULL) {
+ device_printf(sc->dev, "%s: cannot allocate mem resource\n",
+ __func__);
+ return (ENXIO);
+ }
+
+ sc->is_little_endian = device_has_property(sc->dev, "little-endian");
+
+ return (0);
+}
+
+int
+memac_mdio_generic_detach(struct memac_mdio_softc_common *sc)
+{
+
+ if (sc->mem_res != NULL)
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->mem_res), sc->mem_res);
+
+ return (0);
+}
diff --git a/sys/dev/dpaa2/memac_mdio_fdt.c b/sys/dev/dpaa2/memac_mdio_fdt.c
new file mode 100644
index 000000000000..8e349566b9e5
--- /dev/null
+++ b/sys/dev/dpaa2/memac_mdio_fdt.c
@@ -0,0 +1,308 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright © 2021-2022 Bjoern A. Zeeb
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/endian.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/fdt/simplebus.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include "memac_mdio.h"
+#include "memac_mdio_if.h"
+#include "ofw_bus_if.h"
+#include "miibus_if.h"
+
+/* -------------------------------------------------------------------------- */
+
+struct memacphy_softc_fdt {
+ struct memacphy_softc_common scc;
+ uint32_t reg;
+ phandle_t xref;
+};
+
+static void
+memacphy_fdt_miibus_statchg(device_t dev)
+{
+ struct memacphy_softc_fdt *sc;
+
+ sc = device_get_softc(dev);
+ memacphy_miibus_statchg(&sc->scc);
+}
+
+static int
+memacphy_fdt_set_ni_dev(device_t dev, device_t nidev)
+{
+ struct memacphy_softc_fdt *sc;
+
+ sc = device_get_softc(dev);
+ return (memacphy_set_ni_dev(&sc->scc, nidev));
+}
+
+static int
+memacphy_fdt_get_phy_loc(device_t dev, int *phy_loc)
+{
+ struct memacphy_softc_fdt *sc;
+
+ sc = device_get_softc(dev);
+ return (memacphy_get_phy_loc(&sc->scc, phy_loc));
+}
+
+static int
+memacphy_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ device_set_desc(dev, "MEMAC PHY (fdt)");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+memacphy_fdt_attach(device_t dev)
+{
+ struct memacphy_softc_fdt *sc;
+ phandle_t node;
+ ssize_t s;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->scc.dev = dev;
+ node = ofw_bus_get_node(dev);
+
+ s = device_get_property(dev, "reg", &sc->reg, sizeof(sc->reg),
+ DEVICE_PROP_UINT32);
+ if (s != -1)
+ sc->scc.phy = sc->reg;
+ else
+ sc->scc.phy = -1;
+ sc->xref = OF_xref_from_node(node);
+
+ error = OF_device_register_xref(sc->xref, dev);
+ if (error != 0)
+ device_printf(dev, "Failed to register xref %#x\n", sc->xref);
+
+ if (bootverbose)
+ device_printf(dev, "node %#x '%s': reg %#x xref %#x phy %u\n",
+ node, ofw_bus_get_name(dev), sc->reg, sc->xref, sc->scc.phy);
+
+ if (sc->scc.phy == -1)
+ error = ENXIO;
+ return (error);
+}
+
+static device_method_t memacphy_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, memacphy_fdt_probe),
+ DEVMETHOD(device_attach, memacphy_fdt_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, memacphy_miibus_readreg),
+ DEVMETHOD(miibus_writereg, memacphy_miibus_writereg),
+ DEVMETHOD(miibus_statchg, memacphy_fdt_miibus_statchg),
+
+ /* memac */
+ DEVMETHOD(memac_mdio_set_ni_dev, memacphy_fdt_set_ni_dev),
+ DEVMETHOD(memac_mdio_get_phy_loc, memacphy_fdt_get_phy_loc),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(memacphy_fdt, memacphy_fdt_driver, memacphy_fdt_methods,
+ sizeof(struct memacphy_softc_fdt));
+
+EARLY_DRIVER_MODULE(memacphy_fdt, memac_mdio_fdt, memacphy_fdt_driver, 0, 0,
+ BUS_PASS_SUPPORTDEV);
+DRIVER_MODULE(miibus, memacphy_fdt, miibus_driver, 0, 0);
+MODULE_DEPEND(memacphy_fdt, miibus, 1, 1, 1);
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Order in this softc is important; memac_mdio_fdt_attach() calls
+ * simplebus_init() which expects sb_sc at the beginning.
+ */
+struct memac_mdio_softc_fdt {
+ struct simplebus_softc sb_sc; /* Must stay first. */
+ struct memac_mdio_softc_common scc;
+};
+
+static int
+memac_fdt_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct memac_mdio_softc_fdt *sc;
+
+ sc = device_get_softc(dev);
+ return (memac_miibus_readreg(&sc->scc, phy, reg));
+}
+
+static int
+memac_fdt_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+ struct memac_mdio_softc_fdt *sc;
+
+ sc = device_get_softc(dev);
+ return (memac_miibus_writereg(&sc->scc, phy, reg, data));
+}
+
+static struct ofw_compat_data compat_data[] = {
+ { "fsl,fman-memac-mdio", 1 },
+ { NULL, 0 }
+};
+
+static int
+memac_mdio_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "Freescale XGMAC MDIO Bus (FDT)");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+memac_mdio_fdt_probe_child(device_t bus, phandle_t child)
+{
+ device_t childdev;
+
+ /* Make sure we do not aliready have a device. */
+ childdev = ofw_bus_find_child_device_by_phandle(bus, child);
+ if (childdev != NULL)
+ return (0);
+
+ childdev = simplebus_add_device(bus, child, 0, NULL, -1, NULL);
+ if (childdev == NULL)
+ return (ENXIO);
+
+ return (device_probe_and_attach(childdev));
+}
+
+static int
+memac_mdio_fdt_attach(device_t dev)
+{
+ struct memac_mdio_softc_fdt *sc;
+ phandle_t node, child;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->scc.dev = dev;
+
+ error = memac_mdio_generic_attach(&sc->scc);
+ if (error != 0)
+ return (error);
+
+ /* Attach the *phy* children represented in the device tree. */
+ bus_generic_probe(dev);
+ bus_enumerate_hinted_children(dev);
+ node = ofw_bus_get_node(dev);
+ simplebus_init(dev, node);
+ for (child = OF_child(node); child > 0; child = OF_peer(child)) {
+ if (!OF_hasprop(child, "reg"))
+ continue;
+ if (memac_mdio_fdt_probe_child(dev, child) != 0)
+ continue;
+ }
+
+ return (0);
+}
+
+static int
+memac_mdio_fdt_detach(device_t dev)
+{
+ struct memac_mdio_softc_fdt *sc;
+
+ sc = device_get_softc(dev);
+ return (memac_mdio_generic_detach(&sc->scc));
+}
+
+static const struct ofw_bus_devinfo *
+memac_simplebus_get_devinfo(device_t bus, device_t child)
+{
+
+ return (OFW_BUS_GET_DEVINFO(device_get_parent(bus), child));
+}
+
+static device_method_t memac_mdio_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, memac_mdio_fdt_probe),
+ DEVMETHOD(device_attach, memac_mdio_fdt_attach),
+ DEVMETHOD(device_detach, memac_mdio_fdt_detach),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, memac_fdt_miibus_readreg),
+ DEVMETHOD(miibus_writereg, memac_fdt_miibus_writereg),
+
+ /* OFW/simplebus */
+ DEVMETHOD(ofw_bus_get_devinfo, memac_simplebus_get_devinfo),
+ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
+ DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
+ DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
+ DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
+ DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
+
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+ DEVMETHOD(bus_read_ivar, memac_mdio_read_ivar),
+ DEVMETHOD(bus_get_property, memac_mdio_get_property),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(memac_mdio_fdt, memac_mdio_fdt_driver, memac_mdio_fdt_methods,
+ sizeof(struct memac_mdio_softc_fdt));
+
+EARLY_DRIVER_MODULE(memac_mdio_fdt, simplebus, memac_mdio_fdt_driver, 0, 0,
+ BUS_PASS_SUPPORTDEV);
+
+DRIVER_MODULE(miibus, memac_mdio_fdt, miibus_driver, 0, 0);
+MODULE_DEPEND(memac_mdio_fdt, miibus, 1, 1, 1);
+MODULE_VERSION(memac_mdio_fdt, 1);
diff --git a/sys/dev/dpaa2/memac_mdio_if.m b/sys/dev/dpaa2/memac_mdio_if.m
new file mode 100644
index 000000000000..d49c95641409
--- /dev/null
+++ b/sys/dev/dpaa2/memac_mdio_if.m
@@ -0,0 +1,42 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright © 2022, Bjoern A. Zeeb
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <machine/bus.h>
+
+INTERFACE memac_mdio;
+
+METHOD int set_ni_dev {
+ device_t dev;
+ device_t nidev;
+};
+
+METHOD int get_phy_loc {
+ device_t dev;
+ int *phy_loc;
+};