aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Turner <andrew@FreeBSD.org>2017-02-15 13:56:04 +0000
committerAndrew Turner <andrew@FreeBSD.org>2017-02-15 13:56:04 +0000
commit9c6d6488faa5a0c98034068ad18597921589e3c3 (patch)
tree06bcd8cff1faaa526e8238695553d04ed129c48b
parent44b781cfe0b561909686778153915ec2b0ba5a21 (diff)
downloadsrc-9c6d6488faa5a0c98034068ad18597921589e3c3.tar.gz
src-9c6d6488faa5a0c98034068ad18597921589e3c3.zip
Port the Linux AMX 10G network driver to FreeBSD as axgbe. It is unlikely
we will import a newer version of the Linux code so the linuxkpi was not used. This is still missing 10G support, and multicast has not been tested. Reviewed by: gnn Obtained from: ABT Systems Ltd Sponsored by: SoftIron Inc Differential Revision: https://reviews.freebsd.org/D8549
Notes
Notes: svn path=/head/; revision=313768
-rw-r--r--sys/arm64/conf/GENERIC1
-rw-r--r--sys/conf/files.arm645
-rw-r--r--sys/dev/axgbe/if_axgbe.c619
-rw-r--r--sys/dev/axgbe/xgbe-common.h36
-rw-r--r--sys/dev/axgbe/xgbe-desc.c469
-rw-r--r--sys/dev/axgbe/xgbe-dev.c873
-rw-r--r--sys/dev/axgbe/xgbe-drv.c1369
-rw-r--r--sys/dev/axgbe/xgbe-mdio.c257
-rw-r--r--sys/dev/axgbe/xgbe.h226
-rw-r--r--sys/dev/axgbe/xgbe_osdep.h188
10 files changed, 1340 insertions, 2703 deletions
diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC
index 790c02a2d369..c25304511def 100644
--- a/sys/arm64/conf/GENERIC
+++ b/sys/arm64/conf/GENERIC
@@ -119,6 +119,7 @@ options PCI_IOV # PCI SR-IOV support
device mii
device miibus # MII bus support
device awg # Allwinner EMAC Gigabit Ethernet
+device axgbe # AMD Opteron A1100 integrated NIC
device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel 10Gb Ethernet Family
device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 94bc6e390ae5..cb2b84ca522e 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -146,6 +146,11 @@ crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support
crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb
dev/acpica/acpi_if.m optional acpi
dev/ahci/ahci_generic.c optional ahci
+dev/axgbe/if_axgbe.c optional axgbe
+dev/axgbe/xgbe-desc.c optional axgbe
+dev/axgbe/xgbe-dev.c optional axgbe
+dev/axgbe/xgbe-drv.c optional axgbe
+dev/axgbe/xgbe-mdio.c optional axgbe
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
diff --git a/sys/dev/axgbe/if_axgbe.c b/sys/dev/axgbe/if_axgbe.c
new file mode 100644
index 000000000000..9544394fd3e8
--- /dev/null
+++ b/sys/dev/axgbe/if_axgbe.c
@@ -0,0 +1,619 @@
+/*-
+ * Copyright (c) 2016,2017 SoftIron Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of SoftIron Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sx.h>
+#include <sys/taskqueue.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+
+#include "miibus_if.h"
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static device_probe_t axgbe_probe;
+static device_attach_t axgbe_attach;
+
+struct axgbe_softc {
+ /* Must be first */
+ struct xgbe_prv_data prv;
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ struct ifmedia media;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ { "amd,xgbe-seattle-v1a", true },
+ { NULL, false }
+};
+
+static struct resource_spec old_phy_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Rx/Tx regs */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Interrupt */
+ { -1, 0 }
+};
+
+static struct resource_spec old_mac_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
+ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
+ /* Per-channel interrupts */
+ { SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
+ { -1, 0 }
+};
+
+static struct resource_spec mac_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
+ { SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Rx/Tx regs */
+ { SYS_RES_MEMORY, 3, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_MEMORY, 4, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
+ /* Per-channel and auto-negotiation interrupts */
+ { SYS_RES_IRQ, 1, RF_ACTIVE },
+ { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 5, RF_ACTIVE | RF_OPTIONAL },
+ { -1, 0 }
+};
+
+MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
+
+static void
+axgbe_init(void *p)
+{
+ struct axgbe_softc *sc;
+ struct ifnet *ifp;
+
+ sc = p;
+ ifp = sc->prv.netdev;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+}
+
+static int
+axgbe_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
+{
+ struct axgbe_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error;
+
+ switch(command) {
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
+ error = EINVAL;
+ else
+ error = xgbe_change_mtu(ifp, ifr->ifr_mtu);
+ break;
+ case SIOCSIFFLAGS:
+ error = 0;
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+axgbe_qflush(struct ifnet *ifp)
+{
+
+ if_qflush(ifp);
+}
+
+static int
+axgbe_media_change(struct ifnet *ifp)
+{
+ struct axgbe_softc *sc;
+ int cur_media;
+
+ sc = ifp->if_softc;
+
+ sx_xlock(&sc->prv.an_mutex);
+ cur_media = sc->media.ifm_cur->ifm_media;
+
+ switch (IFM_SUBTYPE(cur_media)) {
+ case IFM_10G_KR:
+ sc->prv.phy.speed = SPEED_10000;
+ sc->prv.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_2500_KX:
+ sc->prv.phy.speed = SPEED_2500;
+ sc->prv.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_1000_KX:
+ sc->prv.phy.speed = SPEED_1000;
+ sc->prv.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_AUTO:
+ sc->prv.phy.autoneg = AUTONEG_ENABLE;
+ break;
+ }
+ sx_xunlock(&sc->prv.an_mutex);
+
+ return (-sc->prv.phy_if.phy_config_aneg(&sc->prv));
+}
+
+static void
+axgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct axgbe_softc *sc;
+
+ sc = ifp->if_softc;
+
+ ifmr->ifm_status = IFM_AVALID;
+ if (!sc->prv.phy.link)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (sc->prv.phy.duplex == DUPLEX_FULL)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
+
+ switch (sc->prv.phy.speed) {
+ case SPEED_10000:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case SPEED_2500:
+ ifmr->ifm_active |= IFM_2500_KX;
+ break;
+ case SPEED_1000:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ }
+}
+
+static uint64_t
+axgbe_get_counter(struct ifnet *ifp, ift_counter c)
+{
+ struct xgbe_prv_data *pdata = ifp->if_softc;
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ switch(c) {
+ case IFCOUNTER_IPACKETS:
+ return (pstats->rxframecount_gb);
+ case IFCOUNTER_IERRORS:
+ return (pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g);
+ case IFCOUNTER_OPACKETS:
+ return (pstats->txframecount_gb);
+ case IFCOUNTER_OERRORS:
+ return (pstats->txframecount_gb - pstats->txframecount_g);
+ case IFCOUNTER_IBYTES:
+ return (pstats->rxoctetcount_gb);
+ case IFCOUNTER_OBYTES:
+ return (pstats->txoctetcount_gb);
+ default:
+ return (if_get_counter_default(ifp, c));
+ }
+}
+
+static int
+axgbe_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "AMD 10 Gigabit Ethernet");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+axgbe_get_optional_prop(device_t dev, phandle_t node, const char *name,
+ int *data, size_t len)
+{
+
+ if (!OF_hasprop(node, name))
+ return (-1);
+
+ if (OF_getencprop(node, name, data, len) <= 0) {
+ device_printf(dev,"%s property is invalid\n", name);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+axgbe_attach(device_t dev)
+{
+ struct axgbe_softc *sc;
+ struct ifnet *ifp;
+ pcell_t phy_handle;
+ device_t phydev;
+ phandle_t node, phy_node;
+ struct resource *mac_res[11];
+ struct resource *phy_res[4];
+ ssize_t len;
+ int error, i, j;
+
+ sc = device_get_softc(dev);
+
+ node = ofw_bus_get_node(dev);
+ if (OF_getencprop(node, "phy-handle", &phy_handle,
+ sizeof(phy_handle)) <= 0) {
+ phy_node = node;
+
+ if (bus_alloc_resources(dev, mac_spec, mac_res)) {
+ device_printf(dev,
+ "could not allocate phy resources\n");
+ return (ENXIO);
+ }
+
+ sc->prv.xgmac_res = mac_res[0];
+ sc->prv.xpcs_res = mac_res[1];
+ sc->prv.rxtx_res = mac_res[2];
+ sc->prv.sir0_res = mac_res[3];
+ sc->prv.sir1_res = mac_res[4];
+
+ sc->prv.dev_irq_res = mac_res[5];
+ sc->prv.per_channel_irq = OF_hasprop(node,
+ XGBE_DMA_IRQS_PROPERTY);
+ for (i = 0, j = 6; j < nitems(mac_res) - 1 &&
+ mac_res[j + 1] != NULL; i++, j++) {
+ if (sc->prv.per_channel_irq) {
+ sc->prv.chan_irq_res[i] = mac_res[j];
+ }
+ }
+
+ /* The last entry is the auto-negotiation interrupt */
+ sc->prv.an_irq_res = mac_res[j];
+ } else {
+ phydev = OF_device_from_xref(phy_handle);
+ phy_node = ofw_bus_get_node(phydev);
+
+ if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) {
+ device_printf(dev,
+ "could not allocate phy resources\n");
+ return (ENXIO);
+ }
+
+ if (bus_alloc_resources(dev, old_mac_spec, mac_res)) {
+ device_printf(dev,
+ "could not allocate mac resources\n");
+ return (ENXIO);
+ }
+
+ sc->prv.rxtx_res = phy_res[0];
+ sc->prv.sir0_res = phy_res[1];
+ sc->prv.sir1_res = phy_res[2];
+ sc->prv.an_irq_res = phy_res[3];
+
+ sc->prv.xgmac_res = mac_res[0];
+ sc->prv.xpcs_res = mac_res[1];
+ sc->prv.dev_irq_res = mac_res[2];
+ sc->prv.per_channel_irq = OF_hasprop(node,
+ XGBE_DMA_IRQS_PROPERTY);
+ if (sc->prv.per_channel_irq) {
+ for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) &&
+ mac_res[j] != NULL; i++, j++) {
+ sc->prv.chan_irq_res[i] = mac_res[j];
+ }
+ }
+ }
+
+ if ((len = OF_getproplen(node, "mac-address")) < 0) {
+ device_printf(dev, "No mac-address property\n");
+ return (EINVAL);
+ }
+
+ if (len != ETHER_ADDR_LEN)
+ return (EINVAL);
+
+ OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);
+
+ sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "Cannot alloc ifnet\n");
+ return (ENXIO);
+ }
+
+ sc->prv.dev = dev;
+ sc->prv.dmat = bus_get_dma_tag(dev);
+ sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
+ ADVERTISED_1000baseKX_Full;
+
+
+ /*
+ * Read the needed properties from the phy node.
+ */
+
+ /* This is documented as optional, but Linux requires it */
+ if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set,
+ sizeof(sc->prv.speed_set)) <= 0) {
+ device_printf(dev, "%s property is missing\n",
+ XGBE_SPEEDSET_PROPERTY);
+ return (EINVAL);
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY,
+ sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC;
+ sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC;
+ sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY,
+ sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR;
+ sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR;
+ sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY,
+ sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ;
+ sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ;
+ sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY,
+ sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP;
+ sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP;
+ sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY,
+ sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG;
+ sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG;
+ sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY,
+ sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE;
+ sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE;
+ sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE;
+ }
+
+ /* Check if the NIC is DMA coherent */
+ sc->prv.coherent = OF_hasprop(node, "dma-coherent");
+ if (sc->prv.coherent) {
+ sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN;
+ sc->prv.arcache = XGBE_DMA_OS_ARCACHE;
+ sc->prv.awcache = XGBE_DMA_OS_AWCACHE;
+ } else {
+ sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN;
+ sc->prv.arcache = XGBE_DMA_SYS_ARCACHE;
+ sc->prv.awcache = XGBE_DMA_SYS_AWCACHE;
+ }
+
+ /* Create the lock & workqueues */
+ spin_lock_init(&sc->prv.xpcs_lock);
+ sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
+ taskqueue_thread_enqueue, &sc->prv.dev_workqueue);
+ taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET,
+ "axgbe taskq");
+
+ /* Set the needed pointers */
+ xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
+ xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
+ xgbe_init_function_ptrs_desc(&sc->prv.desc_if);
+
+ /* Reset the hardware */
+ sc->prv.hw_if.exit(&sc->prv);
+
+ /* Read the hardware features */
+ xgbe_get_all_hw_features(&sc->prv);
+
+ /* Set default values */
+ sc->prv.pblx8 = DMA_PBL_X8_ENABLE;
+ sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
+ sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
+ sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
+ sc->prv.tx_pbl = DMA_PBL_16;
+ sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
+ sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
+ sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
+ sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
+ sc->prv.rx_pbl = DMA_PBL_16;
+ sc->prv.pause_autoneg = 1;
+ sc->prv.tx_pause = 1;
+ sc->prv.rx_pause = 1;
+ sc->prv.phy_speed = SPEED_UNKNOWN;
+ sc->prv.power_down = 0;
+
+ /* TODO: Limit to min(ncpus, hw rings) */
+ sc->prv.tx_ring_count = 1;
+ sc->prv.tx_q_count = 1;
+ sc->prv.rx_ring_count = 1;
+ sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt;
+
+ /* Init the PHY */
+ sc->prv.phy_if.phy_init(&sc->prv);
+
+ /* Set the coalescing */
+ xgbe_init_rx_coalesce(&sc->prv);
+ xgbe_init_tx_coalesce(&sc->prv);
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_init = axgbe_init;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = axgbe_ioctl;
+ ifp->if_transmit = xgbe_xmit;
+ ifp->if_qflush = axgbe_qflush;
+ ifp->if_get_counter = axgbe_get_counter;
+
+ /* TODO: Support HW offload */
+ ifp->if_capabilities = 0;
+ ifp->if_capenable = 0;
+ ifp->if_hwassist = 0;
+
+ ether_ifattach(ifp, sc->mac_addr);
+
+ ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change,
+ axgbe_media_status);
+#ifdef notyet
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+#endif
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+ set_bit(XGBE_DOWN, &sc->prv.dev_state);
+
+ if (xgbe_open(ifp) < 0) {
+ device_printf(dev, "ndo_open failed\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static device_method_t axgbe_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, axgbe_probe),
+ DEVMETHOD(device_attach, axgbe_attach),
+
+ { 0, 0 }
+};
+
+static devclass_t axgbe_devclass;
+
+DEFINE_CLASS_0(axgbe, axgbe_driver, axgbe_methods,
+ sizeof(struct axgbe_softc));
+DRIVER_MODULE(axgbe, simplebus, axgbe_driver, axgbe_devclass, 0, 0);
+
+
+static struct ofw_compat_data phy_compat_data[] = {
+ { "amd,xgbe-phy-seattle-v1a", true },
+ { NULL, false }
+};
+
+static int
+axgbephy_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, phy_compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "AMD 10 Gigabit Ethernet");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+axgbephy_attach(device_t dev)
+{
+ phandle_t node;
+
+ node = ofw_bus_get_node(dev);
+ OF_device_register_xref(OF_xref_from_node(node), dev);
+
+ return (0);
+}
+
+static device_method_t axgbephy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, axgbephy_probe),
+ DEVMETHOD(device_attach, axgbephy_attach),
+
+ { 0, 0 }
+};
+
+static devclass_t axgbephy_devclass;
+
+DEFINE_CLASS_0(axgbephy, axgbephy_driver, axgbephy_methods, 0);
+EARLY_DRIVER_MODULE(axgbephy, simplebus, axgbephy_driver, axgbephy_devclass,
+ 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/dev/axgbe/xgbe-common.h b/sys/dev/axgbe/xgbe-common.h
index bbef95973c27..bc081352bf53 100644
--- a/sys/dev/axgbe/xgbe-common.h
+++ b/sys/dev/axgbe/xgbe-common.h
@@ -112,11 +112,16 @@
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
#ifndef __XGBE_COMMON_H__
#define __XGBE_COMMON_H__
+#include <sys/bus.h>
+#include <sys/rman.h>
+
/* DMA register offsets */
#define DMA_MR 0x3000
#define DMA_SBMR 0x3004
@@ -1123,7 +1128,7 @@ do { \
* register definitions formed using the input names
*/
#define XGMAC_IOREAD(_pdata, _reg) \
- ioread32((_pdata)->xgmac_regs + _reg)
+ bus_read_4((_pdata)->xgmac_res, _reg)
#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
@@ -1131,7 +1136,7 @@ do { \
_reg##_##_field##_WIDTH)
#define XGMAC_IOWRITE(_pdata, _reg, _val) \
- iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+ bus_write_4((_pdata)->xgmac_res, _reg, (_val))
#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
@@ -1147,7 +1152,7 @@ do { \
* base register value is calculated by the queue or traffic class number
*/
#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
- ioread32((_pdata)->xgmac_regs + \
+ bus_read_4((_pdata)->xgmac_res, \
MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
@@ -1156,8 +1161,8 @@ do { \
_reg##_##_field##_WIDTH)
#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
- iowrite32((_val), (_pdata)->xgmac_regs + \
- MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+ bus_write_4((_pdata)->xgmac_res, \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg, (_val))
#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
do { \
@@ -1173,7 +1178,7 @@ do { \
* base register value is obtained from the ring
*/
#define XGMAC_DMA_IOREAD(_channel, _reg) \
- ioread32((_channel)->dma_regs + _reg)
+ bus_space_read_4((_channel)->dma_tag, (_channel)->dma_handle, _reg)
#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
@@ -1181,7 +1186,8 @@ do { \
_reg##_##_field##_WIDTH)
#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
- iowrite32((_val), (_channel)->dma_regs + _reg)
+ bus_space_write_4((_channel)->dma_tag, (_channel)->dma_handle, \
+ _reg, (_val))
#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
do { \
@@ -1196,10 +1202,10 @@ do { \
* within the register values of XPCS registers.
*/
#define XPCS_IOWRITE(_pdata, _off, _val) \
- iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+ bus_write_4((_pdata)->xpcs_res, (_off), _val)
#define XPCS_IOREAD(_pdata, _off) \
- ioread32((_pdata)->xpcs_regs + (_off))
+ bus_read_4((_pdata)->xpcs_res, (_off))
/* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers.
@@ -1215,7 +1221,7 @@ do { \
_prefix##_##_field##_WIDTH, (_val))
#define XSIR0_IOREAD(_pdata, _reg) \
- ioread16((_pdata)->sir0_regs + _reg)
+ bus_read_2((_pdata)->sir0_res, _reg)
#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
@@ -1223,7 +1229,7 @@ do { \
_reg##_##_field##_WIDTH)
#define XSIR0_IOWRITE(_pdata, _reg, _val) \
- iowrite16((_val), (_pdata)->sir0_regs + _reg)
+ bus_write_2((_pdata)->sir0_res, _reg, (_val))
#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
@@ -1235,7 +1241,7 @@ do { \
} while (0)
#define XSIR1_IOREAD(_pdata, _reg) \
- ioread16((_pdata)->sir1_regs + _reg)
+ bus_read_2((_pdata)->sir1_res, _reg)
#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
@@ -1243,7 +1249,7 @@ do { \
_reg##_##_field##_WIDTH)
#define XSIR1_IOWRITE(_pdata, _reg, _val) \
- iowrite16((_val), (_pdata)->sir1_regs + _reg)
+ bus_write_2((_pdata)->sir1_res, _reg, (_val))
#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
@@ -1258,7 +1264,7 @@ do { \
* within the register values of SerDes RxTx registers.
*/
#define XRXTX_IOREAD(_pdata, _reg) \
- ioread16((_pdata)->rxtx_regs + _reg)
+ bus_read_2((_pdata)->rxtx_res, _reg)
#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
@@ -1266,7 +1272,7 @@ do { \
_reg##_##_field##_WIDTH)
#define XRXTX_IOWRITE(_pdata, _reg, _val) \
- iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+ bus_write_2((_pdata)->rxtx_res, _reg, (_val))
#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
diff --git a/sys/dev/axgbe/xgbe-desc.c b/sys/dev/axgbe/xgbe-desc.c
index b3bc87fe3764..a2f1f98881e9 100644
--- a/sys/dev/axgbe/xgbe-desc.c
+++ b/sys/dev/axgbe/xgbe-desc.c
@@ -114,6 +114,9 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
#include "xgbe.h"
#include "xgbe-common.h"
@@ -128,45 +131,29 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (!ring)
return;
+ bus_dmamap_destroy(ring->mbuf_dmat, ring->mbuf_map);
+ bus_dma_tag_destroy(ring->mbuf_dmat);
+
+ ring->mbuf_map = NULL;
+ ring->mbuf_dmat = NULL;
+
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_rdata(pdata, rdata);
}
- kfree(ring->rdata);
+ free(ring->rdata, M_AXGBE);
ring->rdata = NULL;
}
- if (ring->rx_hdr_pa.pages) {
- dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
- ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
- put_page(ring->rx_hdr_pa.pages);
-
- ring->rx_hdr_pa.pages = NULL;
- ring->rx_hdr_pa.pages_len = 0;
- ring->rx_hdr_pa.pages_offset = 0;
- ring->rx_hdr_pa.pages_dma = 0;
- }
-
- if (ring->rx_buf_pa.pages) {
- dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
- ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
- put_page(ring->rx_buf_pa.pages);
+ bus_dmamap_unload(ring->rdesc_dmat, ring->rdesc_map);
+ bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
+ bus_dma_tag_destroy(ring->rdesc_dmat);
- ring->rx_buf_pa.pages = NULL;
- ring->rx_buf_pa.pages_len = 0;
- ring->rx_buf_pa.pages_offset = 0;
- ring->rx_buf_pa.pages_dma = 0;
- }
-
- if (ring->rdesc) {
- dma_free_coherent(pdata->dev,
- (sizeof(struct xgbe_ring_desc) *
- ring->rdesc_count),
- ring->rdesc, ring->rdesc_dma);
- ring->rdesc = NULL;
- }
+ ring->rdesc_map = NULL;
+ ring->rdesc_dmat = NULL;
+ ring->rdesc = NULL;
}
static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
@@ -185,32 +172,71 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_ring_resources\n");
}
+static void xgbe_ring_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg,
+ int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+}
+
static int xgbe_init_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, unsigned int rdesc_count)
{
+ bus_size_t len;
+ int err, flags;
+
DBGPR("-->xgbe_init_ring\n");
if (!ring)
return 0;
+ flags = 0;
+ if (pdata->coherent)
+ flags = BUS_DMA_COHERENT;
+
/* Descriptors */
ring->rdesc_count = rdesc_count;
- ring->rdesc = dma_alloc_coherent(pdata->dev,
- (sizeof(struct xgbe_ring_desc) *
- rdesc_count), &ring->rdesc_dma,
- GFP_KERNEL);
- if (!ring->rdesc)
- return -ENOMEM;
+ len = sizeof(struct xgbe_ring_desc) * rdesc_count;
+ err = bus_dma_tag_create(pdata->dmat, 512, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, flags, NULL, NULL,
+ &ring->rdesc_dmat);
+ if (err != 0) {
+ printf("Unable to create the DMA tag: %d\n", err);
+ return -err;
+ }
+
+ err = bus_dmamem_alloc(ring->rdesc_dmat, (void **)&ring->rdesc,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &ring->rdesc_map);
+ if (err != 0) {
+ bus_dma_tag_destroy(ring->rdesc_dmat);
+ printf("Unable to allocate DMA memory: %d\n", err);
+ return -err;
+ }
+ err = bus_dmamap_load(ring->rdesc_dmat, ring->rdesc_map, ring->rdesc,
+ len, xgbe_ring_dmamap_cb, &ring->rdesc_paddr, 0);
+ if (err != 0) {
+ bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
+ bus_dma_tag_destroy(ring->rdesc_dmat);
+ printf("Unable to load DMA memory\n");
+ return -err;
+ }
/* Descriptor information */
- ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
- GFP_KERNEL);
- if (!ring->rdata)
- return -ENOMEM;
+ ring->rdata = malloc(rdesc_count * sizeof(struct xgbe_ring_data),
+ M_AXGBE, M_WAITOK | M_ZERO);
- netif_dbg(pdata, drv, pdata->netdev,
- "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
- ring->rdesc, &ring->rdesc_dma, ring->rdata);
+ /* Create the space DMA tag for mbufs */
+ err = bus_dma_tag_create(pdata->dmat, 1, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, XGBE_TX_MAX_BUF_SIZE * rdesc_count,
+ rdesc_count, XGBE_TX_MAX_BUF_SIZE, flags, NULL, NULL,
+ &ring->mbuf_dmat);
+ if (err != 0)
+ return -err;
+
+ err = bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
+ if (err != 0)
+ return -err;
DBGPR("<--xgbe_init_ring\n");
@@ -227,25 +253,17 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
- channel->name);
-
ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count);
if (ret) {
- netdev_alert(pdata->netdev,
- "error initializing Tx ring\n");
+ printf("error initializing Tx ring\n");
goto err_ring;
}
- netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
- channel->name);
-
ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count);
if (ret) {
- netdev_alert(pdata->netdev,
- "error initializing Rx ring\n");
+ printf("error initializing Rx ring\n");
goto err_ring;
}
}
@@ -260,93 +278,58 @@ err_ring:
return ret;
}
-static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
- struct xgbe_page_alloc *pa, gfp_t gfp, int order)
-{
- struct page *pages = NULL;
- dma_addr_t pages_dma;
- int ret;
-
- /* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
- while (order >= 0) {
- pages = alloc_pages(gfp, order);
- if (pages)
- break;
-
- order--;
- }
- if (!pages)
- return -ENOMEM;
-
- /* Map the pages */
- pages_dma = dma_map_page(pdata->dev, pages, 0,
- PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
- put_page(pages);
- return ret;
- }
-
- pa->pages = pages;
- pa->pages_len = PAGE_SIZE << order;
- pa->pages_offset = 0;
- pa->pages_dma = pages_dma;
-
- return 0;
-}
-
-static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
- struct xgbe_page_alloc *pa,
- unsigned int len)
-{
- get_page(pa->pages);
- bd->pa = *pa;
-
- bd->dma_base = pa->pages_dma;
- bd->dma_off = pa->pages_offset;
- bd->dma_len = len;
-
- pa->pages_offset += len;
- if ((pa->pages_offset + len) > pa->pages_len) {
- /* This data descriptor is responsible for unmapping page(s) */
- bd->pa_unmap = *pa;
-
- /* Get a new allocation next time */
- pa->pages = NULL;
- pa->pages_len = 0;
- pa->pages_offset = 0;
- pa->pages_dma = 0;
- }
-}
-
static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring,
struct xgbe_ring_data *rdata)
{
- int order, ret;
+ bus_dmamap_t mbuf_map;
+ bus_dma_segment_t segs[2];
+ struct mbuf *m0, *m1;
+ int err, nsegs;
+
+ m0 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
+ if (m0 == NULL)
+ return (-ENOBUFS);
+
+ m1 = m_getjcl(M_NOWAIT, MT_DATA, 0, MCLBYTES);
+ if (m1 == NULL) {
+ m_freem(m0);
+ return (-ENOBUFS);
+ }
+
+ m0->m_next = m1;
+ m0->m_flags |= M_PKTHDR;
+ m0->m_len = MHLEN;
+ m0->m_pkthdr.len = MHLEN + MCLBYTES;
- if (!ring->rx_hdr_pa.pages) {
- ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
- if (ret)
- return ret;
+ m1->m_len = MCLBYTES;
+ m1->m_next = NULL;
+ m1->m_pkthdr.len = MCLBYTES;
+
+ err = bus_dmamap_create(ring->mbuf_dmat, 0, &mbuf_map);
+ if (err != 0) {
+ m_freem(m0);
+ return (-err);
}
- if (!ring->rx_buf_pa.pages) {
- order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
- ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
- order);
- if (ret)
- return ret;
+ err = bus_dmamap_load_mbuf_sg(ring->mbuf_dmat, mbuf_map, m0, segs,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err != 0) {
+ m_freem(m0);
+ bus_dmamap_destroy(ring->mbuf_dmat, mbuf_map);
+ return (-err);
}
- /* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ KASSERT(nsegs == 2,
+ ("xgbe_map_rx_buffer: Unable to handle multiple segments %d",
+ nsegs));
- /* Set up the buffer page info */
- xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
- pdata->rx_buf_size);
+ rdata->mb = m0;
+ rdata->mbuf_free = 0;
+ rdata->mbuf_dmat = ring->mbuf_dmat;
+ rdata->mbuf_map = mbuf_map;
+ rdata->mbuf_hdr_paddr = segs[0].ds_addr;
+ rdata->mbuf_data_paddr = segs[1].ds_addr;
return 0;
}
@@ -358,7 +341,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
- dma_addr_t rdesc_dma;
+ bus_addr_t rdesc_paddr;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
@@ -370,16 +353,16 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
break;
rdesc = ring->rdesc;
- rdesc_dma = ring->rdesc_dma;
+ rdesc_paddr = ring->rdesc_paddr;
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc;
- rdata->rdesc_dma = rdesc_dma;
+ rdata->rdata_paddr = rdesc_paddr;
rdesc++;
- rdesc_dma += sizeof(struct xgbe_ring_desc);
+ rdesc_paddr += sizeof(struct xgbe_ring_desc);
}
ring->cur = 0;
@@ -399,7 +382,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata;
- dma_addr_t rdesc_dma;
+ bus_addr_t rdesc_paddr;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -411,19 +394,19 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
break;
rdesc = ring->rdesc;
- rdesc_dma = ring->rdesc_dma;
+ rdesc_paddr = ring->rdesc_paddr;
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc;
- rdata->rdesc_dma = rdesc_dma;
+ rdata->rdata_paddr = rdesc_paddr;
if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
rdesc++;
- rdesc_dma += sizeof(struct xgbe_ring_desc);
+ rdesc_paddr += sizeof(struct xgbe_ring_desc);
}
ring->cur = 0;
@@ -431,78 +414,81 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
hw_if->rx_desc_init(channel);
}
-
- DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
}
static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata)
{
- if (rdata->skb_dma) {
- if (rdata->mapped_as_page) {
- dma_unmap_page(pdata->dev, rdata->skb_dma,
- rdata->skb_dma_len, DMA_TO_DEVICE);
- } else {
- dma_unmap_single(pdata->dev, rdata->skb_dma,
- rdata->skb_dma_len, DMA_TO_DEVICE);
- }
- rdata->skb_dma = 0;
- rdata->skb_dma_len = 0;
- }
-
- if (rdata->skb) {
- dev_kfree_skb_any(rdata->skb);
- rdata->skb = NULL;
- }
- if (rdata->rx.hdr.pa.pages)
- put_page(rdata->rx.hdr.pa.pages);
+ if (rdata->mbuf_map != NULL)
+ bus_dmamap_destroy(rdata->mbuf_dmat, rdata->mbuf_map);
- if (rdata->rx.hdr.pa_unmap.pages) {
- dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
- rdata->rx.hdr.pa_unmap.pages_len,
- DMA_FROM_DEVICE);
- put_page(rdata->rx.hdr.pa_unmap.pages);
- }
+ if (rdata->mbuf_free)
+ m_freem(rdata->mb);
- if (rdata->rx.buf.pa.pages)
- put_page(rdata->rx.buf.pa.pages);
-
- if (rdata->rx.buf.pa_unmap.pages) {
- dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
- rdata->rx.buf.pa_unmap.pages_len,
- DMA_FROM_DEVICE);
- put_page(rdata->rx.buf.pa_unmap.pages);
- }
+ rdata->mb = NULL;
+ rdata->mbuf_free = 0;
+ rdata->mbuf_hdr_paddr = 0;
+ rdata->mbuf_data_paddr = 0;
+ rdata->mbuf_len = 0;
memset(&rdata->tx, 0, sizeof(rdata->tx));
memset(&rdata->rx, 0, sizeof(rdata->rx));
+}
+
+struct xgbe_map_tx_skb_data {
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ unsigned int cur_index;
+};
+
+static void xgbe_map_tx_skb_cb(void *callback_arg, bus_dma_segment_t *segs,
+ int nseg, bus_size_t mapsize, int error)
+{
+ struct xgbe_map_tx_skb_data *data;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring *ring;
+ int i;
+
+ if (error != 0)
+ return;
+
+ data = callback_arg;
+ ring = data->ring;
+
+ for (i = 0; i < nseg; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, data->cur_index);
+
+ KASSERT(segs[i].ds_len <= XGBE_TX_MAX_BUF_SIZE,
+ ("%s: Segment size is too large %ld > %d", __func__,
+ segs[i].ds_len, XGBE_TX_MAX_BUF_SIZE));
+
+ if (i == 0) {
+ rdata->mbuf_dmat = ring->mbuf_dmat;
+ bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
+ }
+
+ rdata->mbuf_hdr_paddr = 0;
+ rdata->mbuf_data_paddr = segs[i].ds_addr;
+ rdata->mbuf_len = segs[i].ds_len;
- rdata->mapped_as_page = 0;
+ data->packet->length += rdata->mbuf_len;
- if (rdata->state_saved) {
- rdata->state_saved = 0;
- rdata->state.skb = NULL;
- rdata->state.len = 0;
- rdata->state.error = 0;
+ data->cur_index++;
}
}
-static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct mbuf *m)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_map_tx_skb_data cbdata;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct skb_frag_struct *frag;
- dma_addr_t skb_dma;
unsigned int start_index, cur_index;
- unsigned int offset, tso, vlan, datalen, len;
- unsigned int i;
+ int err;
DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
- offset = 0;
start_index = ring->cur;
cur_index = ring->cur;
@@ -510,105 +496,24 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
packet->rdesc_count = 0;
packet->length = 0;
- tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE);
- vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- VLAN_CTAG);
-
- /* Save space for a context descriptor if needed */
- if ((tso && (packet->mss != ring->tx.cur_mss)) ||
- (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
-
- if (tso) {
- /* Map the TSO header */
- skb_dma = dma_map_single(pdata->dev, skb->data,
- packet->header_len, DMA_TO_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev, "dma_map_single failed\n");
- goto err_out;
- }
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = packet->header_len;
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "skb header: index=%u, dma=%pad, len=%u\n",
- cur_index, &skb_dma, packet->header_len);
-
- offset = packet->header_len;
-
- packet->length += packet->header_len;
-
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- }
-
- /* Map the (remainder of the) packet */
- for (datalen = skb_headlen(skb) - offset; datalen; ) {
- len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
-
- skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev, "dma_map_single failed\n");
- goto err_out;
- }
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = len;
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "skb data: index=%u, dma=%pad, len=%u\n",
- cur_index, &skb_dma, len);
+ cbdata.ring = ring;
+ cbdata.packet = packet;
+ cbdata.cur_index = cur_index;
- datalen -= len;
- offset += len;
+ err = bus_dmamap_load_mbuf(ring->mbuf_dmat, ring->mbuf_map, m,
+ xgbe_map_tx_skb_cb, &cbdata, BUS_DMA_NOWAIT);
+ if (err != 0) /* TODO: Undo the mapping */
+ return (-err);
- packet->length += len;
+ cur_index = cbdata.cur_index;
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "mapping frag %u\n", i);
-
- frag = &skb_shinfo(skb)->frags[i];
- offset = 0;
-
- for (datalen = skb_frag_size(frag); datalen; ) {
- len = min_t(unsigned int, datalen,
- XGBE_TX_MAX_BUF_SIZE);
-
- skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "skb_frag_dma_map failed\n");
- goto err_out;
- }
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = len;
- rdata->mapped_as_page = 1;
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "skb frag: index=%u, dma=%pad, len=%u\n",
- cur_index, &skb_dma, len);
-
- datalen -= len;
- offset += len;
-
- packet->length += len;
-
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- }
- }
-
- /* Save the skb address in the last entry. We always have some data
+ /* Save the mbuf address in the last entry. We always have some data
* that has been mapped so rdata is always advanced past the last
* piece of mapped data - use the entry pointed to by cur_index - 1.
*/
rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
- rdata->skb = skb;
+ rdata->mb = m;
+ rdata->mbuf_free = 1;
/* Save the number of descriptor entries used */
packet->rdesc_count = cur_index - start_index;
@@ -616,16 +521,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
return packet->rdesc_count;
-
-err_out:
- while (start_index < cur_index) {
- rdata = XGBE_GET_DESC_DATA(ring, start_index++);
- xgbe_unmap_rdata(pdata, rdata);
- }
-
- DBGPR("<--xgbe_map_tx_skb: count=0\n");
-
- return 0;
}
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
diff --git a/sys/dev/axgbe/xgbe-dev.c b/sys/dev/axgbe/xgbe-dev.c
index 1babcc11a248..3a0c65cfa7c9 100644
--- a/sys/dev/axgbe/xgbe-dev.c
+++ b/sys/dev/axgbe/xgbe-dev.c
@@ -114,15 +114,18 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/phy.h>
-#include <linux/mdio.h>
-#include <linux/clk.h>
-#include <linux/bitrev.h>
-#include <linux/crc32.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include <net/if_dl.h>
+#include <net/if_var.h>
+
static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
unsigned int usec)
{
@@ -352,118 +355,6 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
-static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
- unsigned int index, unsigned int val)
-{
- unsigned int wait;
- int ret = 0;
-
- mutex_lock(&pdata->rss_mutex);
-
- if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
-
- wait = 1000;
- while (wait--) {
- if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
- goto unlock;
-
- usleep_range(1000, 1500);
- }
-
- ret = -EBUSY;
-
-unlock:
- mutex_unlock(&pdata->rss_mutex);
-
- return ret;
-}
-
-static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
-{
- unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
- unsigned int *key = (unsigned int *)&pdata->rss_key;
- int ret;
-
- while (key_regs--) {
- ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
- key_regs, *key++);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
- ret = xgbe_write_rss_reg(pdata,
- XGBE_RSS_LOOKUP_TABLE_TYPE, i,
- pdata->rss_table[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
-{
- memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
-
- return xgbe_write_rss_hash_key(pdata);
-}
-
-static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
- const u32 *table)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
- XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
-
- return xgbe_write_rss_lookup_table(pdata);
-}
-
-static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
-{
- int ret;
-
- if (!pdata->hw_feat.rss)
- return -EOPNOTSUPP;
-
- /* Program the hash key */
- ret = xgbe_write_rss_hash_key(pdata);
- if (ret)
- return ret;
-
- /* Program the lookup table */
- ret = xgbe_write_rss_lookup_table(pdata);
- if (ret)
- return ret;
-
- /* Set the RSS options */
- XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
-
- /* Enable RSS */
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
-
- return 0;
-}
-
static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
{
if (!pdata->hw_feat.rss)
@@ -476,19 +367,11 @@ static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
static void xgbe_config_rss(struct xgbe_prv_data *pdata)
{
- int ret;
if (!pdata->hw_feat.rss)
return;
- if (pdata->netdev->features & NETIF_F_RXHASH)
- ret = xgbe_enable_rss(pdata);
- else
- ret = xgbe_disable_rss(pdata);
-
- if (ret)
- netdev_err(pdata->netdev,
- "error configuring RSS, RSS disabled\n");
+ xgbe_disable_rss(pdata);
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
@@ -518,44 +401,13 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- struct ieee_ets *ets = pdata->ets;
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
- unsigned int ehfc = 0;
-
- if (pfc && ets) {
- unsigned int prio;
-
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- unsigned int tc;
-
- /* Does this queue handle the priority? */
- if (pdata->prio2q_map[prio] != i)
- continue;
-
- /* Get the Traffic Class for this priority */
- tc = ets->prio_tc[prio];
-
- /* Check if flow control should be enabled */
- if (pfc->pfc_en & (1 << tc)) {
- ehfc = 1;
- break;
- }
- }
- } else {
- ehfc = 1;
- }
-
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
-
- netif_dbg(pdata, drv, pdata->netdev,
- "flow control %s for RXq%u\n",
- ehfc ? "enabled" : "disabled", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
}
/* Set MAC flow control */
@@ -594,9 +446,8 @@ static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- if (pdata->tx_pause || (pfc && pfc->pfc_en))
+ if (pdata->tx_pause)
xgbe_enable_tx_flow_control(pdata);
else
xgbe_disable_tx_flow_control(pdata);
@@ -606,9 +457,8 @@ static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- if (pdata->rx_pause || (pfc && pfc->pfc_en))
+ if (pdata->rx_pause)
xgbe_enable_rx_flow_control(pdata);
else
xgbe_disable_rx_flow_control(pdata);
@@ -618,13 +468,11 @@ static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
xgbe_config_tx_flow_control(pdata);
xgbe_config_rx_flow_control(pdata);
- XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
- (pfc && pfc->pfc_en) ? 1 : 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
}
static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
@@ -794,47 +642,10 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
return 0;
}
-static u32 xgbe_vid_crc32_le(__le16 vid_le)
-{
- u32 poly = 0xedb88320; /* CRCPOLY_LE */
- u32 crc = ~0;
- u32 temp = 0;
- unsigned char *data = (unsigned char *)&vid_le;
- unsigned char data_byte = 0;
- int i, bits;
-
- bits = get_bitmask_order(VLAN_VID_MASK);
- for (i = 0; i < bits; i++) {
- if ((i % 8) == 0)
- data_byte = data[i / 8];
-
- temp = ((crc & 1) ^ data_byte) & 1;
- crc >>= 1;
- data_byte >>= 1;
-
- if (temp)
- crc ^= poly;
- }
-
- return crc;
-}
-
static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
{
- u32 crc;
- u16 vid;
- __le16 vid_le;
u16 vlan_hash_table = 0;
- /* Generate the VLAN Hash Table value */
- for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
- /* Get the CRC32 value of the VLAN ID */
- vid_le = cpu_to_le16(vid);
- crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
-
- vlan_hash_table |= (1 << crc);
- }
-
/* Set the VLAN Hash Table filtering register */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
@@ -849,17 +660,10 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
return 0;
- netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
- enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
/* Hardware will still perform VLAN filtering in promiscuous mode */
- if (enable) {
- xgbe_disable_rx_vlan_filtering(pdata);
- } else {
- if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
- xgbe_enable_rx_vlan_filtering(pdata);
- }
+ xgbe_disable_rx_vlan_filtering(pdata);
return 0;
}
@@ -872,15 +676,13 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
return 0;
- netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
- enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
return 0;
}
static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
- struct netdev_hw_addr *ha, unsigned int *mac_reg)
+ char *addr, unsigned int *mac_reg)
{
unsigned int mac_addr_hi, mac_addr_lo;
u8 *mac_addr;
@@ -888,19 +690,15 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
mac_addr_lo = 0;
mac_addr_hi = 0;
- if (ha) {
+ if (addr) {
mac_addr = (u8 *)&mac_addr_lo;
- mac_addr[0] = ha->addr[0];
- mac_addr[1] = ha->addr[1];
- mac_addr[2] = ha->addr[2];
- mac_addr[3] = ha->addr[3];
+ mac_addr[0] = addr[0];
+ mac_addr[1] = addr[1];
+ mac_addr[2] = addr[2];
+ mac_addr[3] = addr[3];
mac_addr = (u8 *)&mac_addr_hi;
- mac_addr[0] = ha->addr[4];
- mac_addr[1] = ha->addr[5];
-
- netif_dbg(pdata, drv, pdata->netdev,
- "adding mac address %pM at %#x\n",
- ha->addr, *mac_reg);
+ mac_addr[0] = addr[4];
+ mac_addr[1] = addr[5];
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
@@ -913,78 +711,23 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
- struct netdev_hw_addr *ha;
unsigned int mac_reg;
unsigned int addn_macs;
mac_reg = MAC_MACA1HR;
addn_macs = pdata->hw_feat.addn_mac;
- if (netdev_uc_count(netdev) > addn_macs) {
- xgbe_set_promiscuous_mode(pdata, 1);
- } else {
- netdev_for_each_uc_addr(ha, netdev) {
- xgbe_set_mac_reg(pdata, ha, &mac_reg);
- addn_macs--;
- }
-
- if (netdev_mc_count(netdev) > addn_macs) {
- xgbe_set_all_multicast_mode(pdata, 1);
- } else {
- netdev_for_each_mc_addr(ha, netdev) {
- xgbe_set_mac_reg(pdata, ha, &mac_reg);
- addn_macs--;
- }
- }
- }
+ xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg);
+ addn_macs--;
/* Clear remaining additional MAC address entries */
while (addn_macs--)
xgbe_set_mac_reg(pdata, NULL, &mac_reg);
}
-static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
-{
- struct net_device *netdev = pdata->netdev;
- struct netdev_hw_addr *ha;
- unsigned int hash_reg;
- unsigned int hash_table_shift, hash_table_count;
- u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
- u32 crc;
- unsigned int i;
-
- hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
- hash_table_count = pdata->hw_feat.hash_table_size / 32;
- memset(hash_table, 0, sizeof(hash_table));
-
- /* Build the MAC Hash Table register values */
- netdev_for_each_uc_addr(ha, netdev) {
- crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
- crc >>= hash_table_shift;
- hash_table[crc >> 5] |= (1 << (crc & 0x1f));
- }
-
- netdev_for_each_mc_addr(ha, netdev) {
- crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
- crc >>= hash_table_shift;
- hash_table[crc >> 5] |= (1 << (crc & 0x1f));
- }
-
- /* Set the MAC Hash Table registers */
- hash_reg = MAC_HTR0;
- for (i = 0; i < hash_table_count; i++) {
- XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
- hash_reg += MAC_HTR_INC;
- }
-}
-
static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
{
- if (pdata->hw_feat.hash_table_size)
- xgbe_set_mac_hash_table(pdata);
- else
- xgbe_set_mac_addn_addrs(pdata);
+ xgbe_set_mac_addn_addrs(pdata);
return 0;
}
@@ -1005,11 +748,11 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
unsigned int pr_mode, am_mode;
- pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
- am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+ /* XXX */
+ pr_mode = 0;
+ am_mode = 0;
xgbe_set_promiscuous_mode(pdata, pr_mode);
xgbe_set_all_multicast_mode(pdata, am_mode);
@@ -1108,8 +851,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc2 = 0;
rdesc->desc3 = 0;
- /* Make sure ownership is written to the descriptor */
- dma_wmb();
+ dsb(sy);
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
@@ -1135,9 +877,9 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Update the starting address of descriptor ring */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
- upper_32_bits(rdata->rdesc_dma));
+ upper_32_bits(rdata->rdata_paddr));
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ lower_32_bits(rdata->rdata_paddr));
DBGPR("<--tx_desc_init\n");
}
@@ -1146,21 +888,9 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata, unsigned int index)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
- unsigned int rx_usecs = pdata->rx_usecs;
- unsigned int rx_frames = pdata->rx_frames;
unsigned int inte;
- dma_addr_t hdr_dma, buf_dma;
- if (!rx_usecs && !rx_frames) {
- /* No coalescing, interrupt for every descriptor */
- inte = 1;
- } else {
- /* Set interrupt based on Rx frame coalescing setting */
- if (rx_frames && !((index + 1) % rx_frames))
- inte = 1;
- else
- inte = 0;
- }
+ inte = 1;
/* Reset the Rx descriptor
* Set buffer 1 (lo) address to header dma address (lo)
@@ -1169,25 +899,18 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
- hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
- buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
- rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_hdr_paddr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_hdr_paddr));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
- /* Since the Rx DMA engine is likely running, make sure everything
- * is written to the descriptor(s) before setting the OWN bit
- * for the descriptor
- */
- dma_wmb();
+ dsb(sy);
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
- /* Make sure ownership is written to the descriptor */
- dma_wmb();
+ dsb(sy);
}
static void xgbe_rx_desc_init(struct xgbe_channel *channel)
@@ -1208,246 +931,37 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
xgbe_rx_desc_reset(pdata, rdata, i);
}
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
/* Update the starting address of descriptor ring */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
- upper_32_bits(rdata->rdesc_dma));
+ upper_32_bits(rdata->rdata_paddr));
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ lower_32_bits(rdata->rdata_paddr));
/* Update the Rx Descriptor Tail Pointer */
rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ lower_32_bits(rdata->rdata_paddr));
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr;
- u64 nsec;
-
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
-static void xgbe_config_tc(struct xgbe_prv_data *pdata)
-{
- unsigned int offset, queue, prio;
- u8 i;
-
- netdev_reset_tc(pdata->netdev);
- if (!pdata->num_tcs)
- return;
-
- netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
-
- for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
- while ((queue < pdata->tx_q_count) &&
- (pdata->q2tc_map[queue] == i))
- queue++;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
- i, offset, queue - 1);
- netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
- offset = queue;
- }
-
- if (!pdata->ets)
- return;
-
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
- netdev_set_prio_tc_map(pdata->netdev, prio,
- pdata->ets->prio_tc[prio]);
-}
-
-static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
-{
- struct ieee_ets *ets = pdata->ets;
- unsigned int total_weight, min_weight, weight;
- unsigned int mask, reg, reg_val;
- unsigned int i, prio;
-
- if (!ets)
- return;
-
- /* Set Tx to deficit weighted round robin scheduling algorithm (when
- * traffic class is using ETS algorithm)
- */
- XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
-
- /* Set Traffic Class algorithms */
- total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
- min_weight = total_weight / 100;
- if (!min_weight)
- min_weight = 1;
-
- for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
- /* Map the priorities to the traffic class */
- mask = 0;
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- if (ets->prio_tc[prio] == i)
- mask |= (1 << prio);
- }
- mask &= 0xff;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
- i, mask);
- reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
- reg_val = XGMAC_IOREAD(pdata, reg);
-
- reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
- reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
-
- XGMAC_IOWRITE(pdata, reg, reg_val);
-
- /* Set the traffic class algorithm */
- switch (ets->tc_tsa[i]) {
- case IEEE_8021QAZ_TSA_STRICT:
- netif_dbg(pdata, drv, pdata->netdev,
- "TC%u using SP\n", i);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_SP);
- break;
- case IEEE_8021QAZ_TSA_ETS:
- weight = total_weight * ets->tc_tx_bw[i] / 100;
- weight = clamp(weight, min_weight, total_weight);
-
- netif_dbg(pdata, drv, pdata->netdev,
- "TC%u using DWRR (weight %u)\n", i, weight);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_ETS);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
- weight);
- break;
- }
- }
-
- xgbe_config_tc(pdata);
-}
-
-static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
-{
- xgbe_config_flow_control(pdata);
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring_data *rdata;
- /* Make sure everything is written before the register write */
- wmb();
-
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-
- /* Start the Tx timer */
- if (pdata->tx_usecs && !channel->tx_timer_active) {
- channel->tx_timer_active = 1;
- mod_timer(&channel->tx_timer,
- jiffies + usecs_to_jiffies(pdata->tx_usecs));
- }
+ lower_32_bits(rdata->rdata_paddr));
ring->tx.xmit_more = 0;
}
@@ -1459,8 +973,6 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- unsigned int csum, tso, vlan;
- unsigned int tso_context, vlan_context;
unsigned int tx_set_ic;
int start_index = ring->cur;
int cur_index = ring->cur;
@@ -1468,23 +980,6 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
DBGPR("-->xgbe_dev_xmit\n");
- csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- CSUM_ENABLE);
- tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE);
- vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- VLAN_CTAG);
-
- if (tso && (packet->mss != ring->tx.cur_mss))
- tso_context = 1;
- else
- tso_context = 0;
-
- if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
- vlan_context = 1;
- else
- vlan_context = 0;
-
/* Determine if an interrupt should be generated for this Tx:
* Interrupt:
* - Tx frame count exceeds the frame count setting
@@ -1505,69 +1000,18 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
tx_set_ic = 1;
else
tx_set_ic = 0;
+ tx_set_ic = 1;
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
rdesc = rdata->rdesc;
- /* Create a context descriptor if this is a TSO packet */
- if (tso_context || vlan_context) {
- if (tso_context) {
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "TSO context descriptor, mss=%u\n",
- packet->mss);
-
- /* Set the MSS size */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
- MSS, packet->mss);
-
- /* Mark it as a CONTEXT descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- CTXT, 1);
-
- /* Indicate this descriptor contains the MSS */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- TCMSSV, 1);
-
- ring->tx.cur_mss = packet->mss;
- }
-
- if (vlan_context) {
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "VLAN context descriptor, ctag=%u\n",
- packet->vlan_ctag);
-
- /* Mark it as a CONTEXT descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- CTXT, 1);
-
- /* Set the VLAN tag */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- VT, packet->vlan_ctag);
-
- /* Indicate this descriptor contains the VLAN tag */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- VLTV, 1);
-
- ring->tx.cur_vlan_ctag = packet->vlan_ctag;
- }
-
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- rdesc = rdata->rdesc;
- }
-
/* Update buffer address (for TSO this is the header) */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
/* Update the buffer length */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
- rdata->skb_dma_len);
-
- /* VLAN tag insertion check */
- if (vlan)
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
- TX_NORMAL_DESC2_VLAN_INSERT);
+ rdata->mbuf_len);
/* Timestamp enablement check */
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
@@ -1583,28 +1027,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
if (cur_index != start_index)
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
- if (tso) {
- /* Enable TSO */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
- packet->tcp_payload_len);
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
- packet->tcp_header_len / 4);
-
- pdata->ext_stats.tx_tso_packets++;
- } else {
- /* Enable CRC and Pad Insertion */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
-
- /* Enable HW CSUM */
- if (csum)
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
- CIC, 0x3);
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
- /* Set the total length to be transmitted */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
- packet->length);
- }
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
cur_index++;
@@ -1612,23 +1040,18 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
rdesc = rdata->rdesc;
/* Update buffer address */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
/* Update the buffer length */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
- rdata->skb_dma_len);
+ rdata->mbuf_len);
/* Set OWN bit */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
/* Mark it as NORMAL descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-
- /* Enable HW CSUM */
- if (csum)
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
- CIC, 0x3);
}
/* Set LAST bit for the last descriptor */
@@ -1642,31 +1065,28 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
rdata->tx.packets = packet->tx_packets;
rdata->tx.bytes = packet->tx_bytes;
+ /* Sync the DMA buffers */
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->mbuf_dmat, ring->mbuf_map,
+ BUS_DMASYNC_PREWRITE);
+
/* In case the Tx DMA engine is running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
*/
- dma_wmb();
/* Set OWN bit for the first descriptor */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
- if (netif_msg_tx_queued(pdata))
- xgbe_dump_tx_desc(pdata, ring, start_index,
- packet->rdesc_count, 1);
-
- /* Make sure ownership is written to the descriptor */
- smp_wmb();
+ /* Sync to ensure the OWN bit was seen */
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
ring->cur = cur_index + 1;
- if (!packet->skb->xmit_more ||
- netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
- channel->queue_index)))
- xgbe_tx_start_xmit(channel, ring);
- else
- ring->tx.xmit_more = 1;
+ xgbe_tx_start_xmit(channel, ring);
DBGPR(" %s: descriptors %u to %u written\n",
channel->name, start_index & (ring->rdesc_count - 1),
@@ -1677,39 +1097,27 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
static int xgbe_dev_read(struct xgbe_channel *channel)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- struct net_device *netdev = pdata->netdev;
- unsigned int err, etlt, l34t;
+ unsigned int err, etlt;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ dsb(sy);
+
/* Check for data availability */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
return 1;
- /* Make sure descriptor fields are read after reading the OWN bit */
- dma_rmb();
-
- if (netif_msg_rx_status(pdata))
- xgbe_dump_rx_desc(pdata, ring, ring->cur);
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
- /* Timestamp Context Descriptor */
- xgbe_get_rx_tstamp(packet, rdesc);
-
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CONTEXT, 1);
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CONTEXT_NEXT, 0);
- return 0;
- }
+ dsb(sy);
/* Normal Descriptor, be sure Context Descriptor bit is off */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
@@ -1723,28 +1131,6 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
- if (rdata->rx.hdr_len)
- pdata->ext_stats.rx_split_header_packets++;
- }
-
- /* Get the RSS hash */
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RSS_HASH, 1);
-
- packet->rss_hash = le32_to_cpu(rdesc->desc1);
-
- l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
- switch (l34t) {
- case RX_DESC3_L34T_IPV4_TCP:
- case RX_DESC3_L34T_IPV4_UDP:
- case RX_DESC3_L34T_IPV6_TCP:
- case RX_DESC3_L34T_IPV6_UDP:
- packet->rss_hash_type = PKT_HASH_TYPE_L4;
- break;
- default:
- packet->rss_hash_type = PKT_HASH_TYPE_L3;
- }
}
/* Get the packet length */
@@ -1761,29 +1147,11 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
INCOMPLETE, 0);
- /* Set checksum done indicator as appropriate */
- if (netdev->features & NETIF_F_RXCSUM)
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CSUM_DONE, 1);
-
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
- netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
- if (!err || !etlt) {
- /* No error if err is 0 or etlt is 0 */
- if ((etlt == 0x09) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- VLAN_CTAG, 1);
- packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
- RX_NORMAL_DESC0,
- OVT);
- netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
- packet->vlan_ctag);
- }
- } else {
+ if (err && etlt) {
if ((etlt == 0x05) || (etlt == 0x06))
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CSUM_DONE, 0);
@@ -1792,6 +1160,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
FRAME, 1);
}
+ bus_dmamap_sync(ring->mbuf_dmat, rdata->mbuf_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
ring->cur & (ring->rdesc_count - 1), ring->cur);
@@ -1909,11 +1280,11 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
/* Issue a software reset */
XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
- usleep_range(10, 15);
+ DELAY(10);
/* Poll Until Poll Condition */
while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
- usleep_range(500, 600);
+ DELAY(500);
if (!count)
return -EBUSY;
@@ -1938,7 +1309,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
count = 2000;
while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
MTL_Q_TQOMR, FTQ))
- usleep_range(500, 600);
+ DELAY(500);
if (!count)
return -EBUSY;
@@ -2035,10 +1406,6 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
-
- netif_info(pdata, drv, pdata->netdev,
- "%d Tx hardware queues, %d byte fifo per queue\n",
- pdata->tx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2051,10 +1418,6 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
-
- netif_info(pdata, drv, pdata->netdev,
- "%d Rx hardware queues, %d byte fifo per queue\n",
- pdata->rx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2073,16 +1436,12 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
- netif_dbg(pdata, drv, pdata->netdev,
- "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
if (i < qptc_extra) {
- netif_dbg(pdata, drv, pdata->netdev,
- "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
@@ -2100,15 +1459,11 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
- netif_dbg(pdata, drv, pdata->netdev,
- "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
if (i < ppq_extra) {
- netif_dbg(pdata, drv, pdata->netdev,
- "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
@@ -2154,21 +1509,15 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
{
- xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
- /* Filtering is done using perfect filtering and hash filtering */
- if (pdata->hw_feat.hash_table_size) {
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
- }
+ xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev));
}
static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+ val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0;
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
}
@@ -2187,12 +1536,16 @@ static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
case SPEED_1000:
xgbe_set_gmii_speed(pdata);
break;
+ case SPEED_UNKNOWN:
+ break;
+ default:
+ panic("TODO %s:%d\n", __FILE__, __LINE__);
}
}
static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
{
- if (pdata->netdev->features & NETIF_F_RXCSUM)
+ if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM) != 0)
xgbe_enable_rx_csum(pdata);
else
xgbe_disable_rx_csum(pdata);
@@ -2207,15 +1560,8 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
/* Set the current VLAN Hash Table register value */
xgbe_update_vlan_hash_table(pdata);
- if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
- xgbe_enable_rx_vlan_filtering(pdata);
- else
- xgbe_disable_rx_vlan_filtering(pdata);
-
- if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- xgbe_enable_rx_vlan_stripping(pdata);
- else
- xgbe_disable_rx_vlan_stripping(pdata);
+ xgbe_disable_rx_vlan_filtering(pdata);
+ xgbe_disable_rx_vlan_stripping(pdata);
}
static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
@@ -2587,21 +1933,16 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
* descriptors. Wait for the Tx engine to enter the stopped or
* suspended state. Don't wait forever though...
*/
- tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
- while (time_before(jiffies, tx_timeout)) {
+ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
+ while (ticks < tx_timeout) {
tx_status = XGMAC_IOREAD(pdata, tx_dsr);
tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
if ((tx_status == DMA_TPS_STOPPED) ||
(tx_status == DMA_TPS_SUSPENDED))
break;
- usleep_range(500, 1000);
+ DELAY(500);
}
-
- if (!time_before(jiffies, tx_timeout))
- netdev_info(pdata->netdev,
- "timed out waiting for Tx DMA channel %u to stop\n",
- channel->queue_index);
}
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
@@ -2668,20 +2009,15 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
* packets. Wait for the Rx queue to empty the Rx fifo. Don't
* wait forever though...
*/
- rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
- while (time_before(jiffies, rx_timeout)) {
+ rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
+ while (ticks < rx_timeout) {
rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
(XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
break;
- usleep_range(500, 1000);
+ DELAY(500);
}
-
- if (!time_before(jiffies, rx_timeout))
- netdev_info(pdata->netdev,
- "timed out waiting for Rx queue %u to empty\n",
- queue);
}
static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
@@ -2860,8 +2196,6 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/*TODO: Error Packet and undersized good Packet forwarding enable
(FEP and FUP)
*/
- xgbe_config_dcb_tc(pdata);
- xgbe_config_dcb_pfc(pdata);
xgbe_enable_mtl_interrupts(pdata);
/*
@@ -2966,23 +2300,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
- /* For Data Center Bridging config */
- hw_if->config_tc = xgbe_config_tc;
- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
-
/* For Receive Side Scaling */
- hw_if->enable_rss = xgbe_enable_rss;
hw_if->disable_rss = xgbe_disable_rss;
- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/sys/dev/axgbe/xgbe-drv.c b/sys/dev/axgbe/xgbe-drv.c
index ebf9224b2d31..32a6cfe239c1 100644
--- a/sys/dev/axgbe/xgbe-drv.c
+++ b/sys/dev/axgbe/xgbe-drv.c
@@ -114,21 +114,17 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <net/busy_poll.h>
-#include <linux/clk.h>
-#include <linux/if_ether.h>
-#include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_one_poll(struct napi_struct *, int);
-static int xgbe_all_poll(struct napi_struct *, int);
+static int xgbe_one_poll(struct xgbe_channel *channel, int budget);
+static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget);
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
@@ -139,38 +135,28 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
- channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
- if (!channel_mem)
- goto err_channel;
-
- tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!tx_ring)
- goto err_tx_ring;
-
- rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!rx_ring)
- goto err_rx_ring;
+ channel_mem = malloc(count * sizeof(struct xgbe_channel), M_AXGBE,
+ M_WAITOK | M_ZERO);
+ tx_ring = malloc(pdata->tx_ring_count * sizeof(struct xgbe_ring),
+ M_AXGBE, M_WAITOK | M_ZERO);
+ rx_ring = malloc(pdata->rx_ring_count * sizeof(struct xgbe_ring),
+ M_AXGBE, M_WAITOK | M_ZERO);
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
- channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
- (DMA_CH_INC * i);
+ channel->dma_tag = rman_get_bustag(pdata->xgmac_res);
+ bus_space_subregion(channel->dma_tag,
+ rman_get_bushandle(pdata->xgmac_res),
+ DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC,
+ &channel->dma_handle);
if (pdata->per_channel_irq) {
- /* Get the DMA interrupt (offset 1) */
- ret = platform_get_irq(pdata->pdev, i + 1);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- i + 1);
+ if (pdata->chan_irq_res[i] == NULL)
goto err_irq;
- }
- channel->dma_irq = ret;
+ channel->dma_irq_res = pdata->chan_irq_res[i];
}
if (i < pdata->tx_ring_count) {
@@ -182,11 +168,6 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
-
- netif_dbg(pdata, drv, pdata->netdev,
- "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
- channel->name, channel->dma_regs, channel->dma_irq,
- channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
@@ -195,15 +176,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
return 0;
err_irq:
- kfree(rx_ring);
+ free(rx_ring, M_AXGBE);
+ free(tx_ring, M_AXGBE);
+ free(channel_mem, M_AXGBE);
-err_rx_ring:
- kfree(tx_ring);
-
-err_tx_ring:
- kfree(channel_mem);
-
-err_channel:
return ret;
}
@@ -212,9 +188,9 @@ static void xgbe_free_channels(struct xgbe_prv_data *pdata)
if (!pdata->channel)
return;
- kfree(pdata->channel->rx_ring);
- kfree(pdata->channel->tx_ring);
- kfree(pdata->channel);
+ free(pdata->channel->rx_ring, M_AXGBE);
+ free(pdata->channel->tx_ring, M_AXGBE);
+ free(pdata->channel, M_AXGBE);
pdata->channel = NULL;
pdata->channel_count = 0;
@@ -236,34 +212,28 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_prv_data *pdata = channel->pdata;
if (count > xgbe_tx_avail_desc(ring)) {
- netif_info(pdata, drv, pdata->netdev,
- "Tx queue stopped, not enough descriptors available\n");
- netif_stop_subqueue(pdata->netdev, channel->queue_index);
- ring->tx.queue_stopped = 1;
-
/* If we haven't notified the hardware because of xmit_more
* support, tell it now
*/
if (ring->tx.xmit_more)
pdata->hw_if.tx_start_xmit(channel, ring);
- return NETDEV_TX_BUSY;
+ return EFBIG;
}
return 0;
}
-static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+static int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
if (mtu > XGMAC_JUMBO_PACKET_MTU) {
- netdev_alert(netdev, "MTU exceeds maximum supported value\n");
return -EINVAL;
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+ rx_buf_size = MIN(XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1);
@@ -293,35 +263,13 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
-{
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
- enum xgbe_int int_id;
- unsigned int i;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring && channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
- else if (channel->tx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI;
- else if (channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_RI;
- else
- continue;
-
- hw_if->disable_int(channel, int_id);
- }
-}
-
-static irqreturn_t xgbe_isr(int irq, void *data)
+static void xgbe_isr(void *data)
{
struct xgbe_prv_data *pdata = data;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr, mac_tssr;
+ unsigned int mac_isr;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -330,9 +278,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
*/
dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
if (!dma_isr)
- goto isr_done;
-
- netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+ return;
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -341,8 +287,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
channel = pdata->channel + i;
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
- netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
- i, dma_ch_isr);
/* The TI or RI interrupt bits may still be set even if using
* per channel DMA interrupts. Check to be sure those are not
@@ -351,13 +295,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!pdata->per_channel_irq &&
(XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
- if (napi_schedule_prep(&pdata->napi)) {
- /* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
-
- /* Turn on polling */
- __napi_schedule_irqoff(&pdata->napi);
- }
+ xgbe_all_poll(pdata, 16);
}
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
@@ -365,7 +303,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
/* Restart the device on a Fatal Bus Error */
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
- schedule_work(&pdata->restart_work);
+ taskqueue_enqueue(taskqueue_thread,
+ &pdata->restart_work);
/* Clear all interrupt signals */
XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -379,123 +318,49 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
hw_if->rx_mmc_int(pdata);
-
- if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
- mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
-
- if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
- /* Read Tx Timestamp to clear interrupt */
- pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
- queue_work(pdata->dev_workqueue,
- &pdata->tx_tstamp_work);
- }
- }
}
-
-isr_done:
- return IRQ_HANDLED;
}
-static irqreturn_t xgbe_dma_isr(int irq, void *data)
+static void xgbe_dma_isr(void *data)
{
struct xgbe_channel *channel = data;
- /* Per channel DMA interrupts are enabled, so we use the per
- * channel napi structure and not the private data napi structure
- */
- if (napi_schedule_prep(&channel->napi)) {
- /* Disable Tx and Rx interrupts */
- disable_irq_nosync(channel->dma_irq);
-
- /* Turn on polling */
- __napi_schedule_irqoff(&channel->napi);
- }
-
- return IRQ_HANDLED;
-}
-
-static void xgbe_tx_timer(unsigned long data)
-{
- struct xgbe_channel *channel = (struct xgbe_channel *)data;
- struct xgbe_prv_data *pdata = channel->pdata;
- struct napi_struct *napi;
-
- DBGPR("-->xgbe_tx_timer\n");
-
- napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
-
- if (napi_schedule_prep(napi)) {
- /* Disable Tx and Rx interrupts */
- if (pdata->per_channel_irq)
- disable_irq_nosync(channel->dma_irq);
- else
- xgbe_disable_rx_tx_ints(pdata);
-
- /* Turn on polling */
- __napi_schedule(napi);
- }
-
- channel->tx_timer_active = 0;
-
- DBGPR("<--xgbe_tx_timer\n");
+ xgbe_one_poll(channel, 16);
}
-static void xgbe_service(struct work_struct *work)
+static void xgbe_service(void *ctx, int pending)
{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- service_work);
+ struct xgbe_prv_data *pdata = ctx;
pdata->phy_if.phy_status(pdata);
}
-static void xgbe_service_timer(unsigned long data)
+static void xgbe_service_timer(void *data)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = data;
- queue_work(pdata->dev_workqueue, &pdata->service_work);
+ DBGPR("--> xgbe_service_timer\n");
+ taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
- mod_timer(&pdata->service_timer, jiffies + HZ);
+ callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
+ DBGPR("<-- xgbe_service_timer\n");
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- unsigned int i;
- setup_timer(&pdata->service_timer, xgbe_service_timer,
- (unsigned long)pdata);
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- setup_timer(&channel->tx_timer, xgbe_tx_timer,
- (unsigned long)channel);
- }
+ callout_init(&pdata->service_timer, 1);
}
static void xgbe_start_timers(struct xgbe_prv_data *pdata)
{
- mod_timer(&pdata->service_timer, jiffies + HZ);
+ callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
}
static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- unsigned int i;
-
- del_timer_sync(&pdata->service_timer);
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
- del_timer_sync(&channel->tx_timer);
- }
+ callout_drain(&pdata->service_timer);
}
void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -598,62 +463,16 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_get_all_hw_features\n");
}
-static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (add)
- netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
-
- napi_enable(&channel->napi);
- }
- } else {
- if (add)
- netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
-
- napi_enable(&pdata->napi);
- }
-}
-
-static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- napi_disable(&channel->napi);
-
- if (del)
- netif_napi_del(&channel->napi);
- }
- } else {
- napi_disable(&pdata->napi);
-
- if (del)
- netif_napi_del(&pdata->napi);
- }
-}
-
static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
- struct net_device *netdev = pdata->netdev;
unsigned int i;
int ret;
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
+ ret = bus_setup_intr(pdata->dev, pdata->dev_irq_res,
+ INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_isr, pdata,
+ &pdata->dev_irq_tag);
if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
return ret;
}
@@ -662,17 +481,10 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- channel->dma_irq);
+ ret = bus_setup_intr(pdata->dev, channel->dma_irq_res,
+ INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_dma_isr, channel,
+ &channel->dma_irq_tag);
+ if (ret != 0) {
goto err_irq;
}
}
@@ -682,11 +494,12 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
err_irq:
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ bus_teardown_intr(pdata->dev, channel->dma_irq_res,
+ channel->dma_irq_tag);
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
- return ret;
+ return -ret;
}
static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
@@ -694,14 +507,15 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
if (!pdata->per_channel_irq)
return;
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ bus_teardown_intr(pdata->dev, channel->dma_irq_res,
+ channel->dma_irq_tag);
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -791,88 +605,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
return pdata->phy_if.phy_reset(pdata);
}
-int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned long flags;
-
- DBGPR("-->xgbe_powerdown\n");
-
- if (!netif_running(netdev) ||
- (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
- netdev_alert(netdev, "Device is already powered down\n");
- DBGPR("<--xgbe_powerdown\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&pdata->lock, flags);
-
- if (caller == XGMAC_DRIVER_CONTEXT)
- netif_device_detach(netdev);
-
- netif_tx_stop_all_queues(netdev);
-
- xgbe_stop_timers(pdata);
- flush_workqueue(pdata->dev_workqueue);
-
- hw_if->powerdown_tx(pdata);
- hw_if->powerdown_rx(pdata);
-
- xgbe_napi_disable(pdata, 0);
-
- pdata->power_down = 1;
-
- spin_unlock_irqrestore(&pdata->lock, flags);
-
- DBGPR("<--xgbe_powerdown\n");
-
- return 0;
-}
-
-int xgbe_powerup(struct net_device *netdev, unsigned int caller)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned long flags;
-
- DBGPR("-->xgbe_powerup\n");
-
- if (!netif_running(netdev) ||
- (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
- netdev_alert(netdev, "Device is already powered up\n");
- DBGPR("<--xgbe_powerup\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&pdata->lock, flags);
-
- pdata->power_down = 0;
-
- xgbe_napi_enable(pdata, 0);
-
- hw_if->powerup_tx(pdata);
- hw_if->powerup_rx(pdata);
-
- if (caller == XGMAC_DRIVER_CONTEXT)
- netif_device_attach(netdev);
-
- netif_tx_start_all_queues(netdev);
-
- xgbe_start_timers(pdata);
-
- spin_unlock_irqrestore(&pdata->lock, flags);
-
- DBGPR("<--xgbe_powerup\n");
-
- return 0;
-}
-
static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_phy_if *phy_if = &pdata->phy_if;
- struct net_device *netdev = pdata->netdev;
int ret;
DBGPR("-->xgbe_start\n");
@@ -883,8 +619,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
if (ret)
goto err_phy;
- xgbe_napi_enable(pdata, 1);
-
ret = xgbe_request_irqs(pdata);
if (ret)
goto err_napi;
@@ -892,18 +626,16 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
- netif_tx_start_all_queues(netdev);
+ xgbe_enable_rx_tx_ints(pdata);
xgbe_start_timers(pdata);
- queue_work(pdata->dev_workqueue, &pdata->service_work);
+ taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
DBGPR("<--xgbe_start\n");
return 0;
err_napi:
- xgbe_napi_disable(pdata, 1);
-
phy_if->phy_stop(pdata);
err_phy:
@@ -916,38 +648,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_phy_if *phy_if = &pdata->phy_if;
- struct xgbe_channel *channel;
- struct net_device *netdev = pdata->netdev;
- struct netdev_queue *txq;
- unsigned int i;
DBGPR("-->xgbe_stop\n");
- netif_tx_stop_all_queues(netdev);
-
xgbe_stop_timers(pdata);
- flush_workqueue(pdata->dev_workqueue);
+ taskqueue_drain_all(pdata->dev_workqueue);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
xgbe_free_irqs(pdata);
- xgbe_napi_disable(pdata, 1);
-
phy_if->phy_stop(pdata);
hw_if->exit(pdata);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- continue;
-
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
- netdev_tx_reset_queue(txq);
- }
-
DBGPR("<--xgbe_stop\n");
}
@@ -956,7 +671,7 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
- if (!netif_running(pdata->netdev))
+ if ((pdata->netdev->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
xgbe_stop(pdata);
@@ -969,329 +684,38 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_restart_dev\n");
}
-static void xgbe_restart(struct work_struct *work)
+static void xgbe_restart(void *ctx, int pending)
{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- restart_work);
-
- rtnl_lock();
+ struct xgbe_prv_data *pdata = ctx;
xgbe_restart_dev(pdata);
-
- rtnl_unlock();
-}
-
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->tx_tstamp_skb = NULL;
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- if (config.flags)
- return -EINVAL;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
- skb_tx_timestamp(skb);
-}
-
-static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
-{
- if (skb_vlan_tag_present(skb))
- packet->vlan_ctag = skb_vlan_tag_get(skb);
-}
-
-static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
-{
- int ret;
-
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE))
- return 0;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- packet->tcp_header_len = tcp_hdrlen(skb);
- packet->tcp_payload_len = skb->len - packet->header_len;
- packet->mss = skb_shinfo(skb)->gso_size;
- DBGPR(" packet->header_len=%u\n", packet->header_len);
- DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
- packet->tcp_header_len, packet->tcp_payload_len);
- DBGPR(" packet->mss=%u\n", packet->mss);
-
- /* Update the number of packets that will ultimately be transmitted
- * along with the extra bytes for each extra packet
- */
- packet->tx_packets = skb_shinfo(skb)->gso_segs;
- packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
-
- return 0;
-}
-
-static int xgbe_is_tso(struct sk_buff *skb)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- if (!skb_is_gso(skb))
- return 0;
-
- DBGPR(" TSO packet to be processed\n");
-
- return 1;
}
static void xgbe_packet_info(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_ring *ring, struct mbuf *m0,
struct xgbe_packet_data *packet)
{
- struct skb_frag_struct *frag;
- unsigned int context_desc;
+ struct mbuf *m;
unsigned int len;
- unsigned int i;
- packet->skb = skb;
+ packet->m = m0;
- context_desc = 0;
packet->rdesc_count = 0;
packet->tx_packets = 1;
- packet->tx_bytes = skb->len;
-
- if (xgbe_is_tso(skb)) {
- /* TSO requires an extra descriptor if mss is different */
- if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
- context_desc = 1;
- packet->rdesc_count++;
- }
-
- /* TSO requires an extra descriptor for TSO header */
- packet->rdesc_count++;
-
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE, 1);
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- CSUM_ENABLE, 1);
- } else if (skb->ip_summed == CHECKSUM_PARTIAL)
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- CSUM_ENABLE, 1);
-
- if (skb_vlan_tag_present(skb)) {
- /* VLAN requires an extra descriptor if tag is different */
- if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
- /* We can share with the TSO context descriptor */
- if (!context_desc) {
- context_desc = 1;
- packet->rdesc_count++;
- }
-
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- VLAN_CTAG, 1);
- }
-
- if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- PTP, 1);
-
- for (len = skb_headlen(skb); len;) {
- packet->rdesc_count++;
- len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
- }
+ packet->tx_bytes = m_length(m0, NULL);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- for (len = skb_frag_size(frag); len; ) {
+ for (m = m0; m != NULL; m = m->m_next) {
+ for (len = m->m_len; len != 0;) {
packet->rdesc_count++;
- len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ len -= MIN(len, XGBE_TX_MAX_BUF_SIZE);
}
}
}
-static int xgbe_open(struct net_device *netdev)
+int xgbe_open(struct ifnet *netdev)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = netdev->if_softc;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
int ret;
@@ -1302,38 +726,29 @@ static int xgbe_open(struct net_device *netdev)
if (ret)
return ret;
- /* Enable the clocks */
- ret = clk_prepare_enable(pdata->sysclk);
- if (ret) {
- netdev_alert(netdev, "dma clk_prepare_enable failed\n");
- return ret;
- }
-
- ret = clk_prepare_enable(pdata->ptpclk);
- if (ret) {
- netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
- goto err_sysclk;
- }
-
/* Calculate the Rx buffer size before allocating rings */
- ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
- if (ret < 0)
+ ret = xgbe_calc_rx_buf_size(netdev, if_getmtu(netdev));
+ if (ret < 0) {
goto err_ptpclk;
+ }
pdata->rx_buf_size = ret;
/* Allocate the channel and ring structures */
ret = xgbe_alloc_channels(pdata);
- if (ret)
+ if (ret) {
+ printf("xgbe_alloc_channels failed\n");
goto err_ptpclk;
+ }
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
- if (ret)
+ if (ret) {
+ printf("desc_if->alloc_ring_resources failed\n");
goto err_channels;
+ }
- INIT_WORK(&pdata->service_work, xgbe_service);
- INIT_WORK(&pdata->restart_work, xgbe_restart);
- INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata);
+ TASK_INIT(&pdata->restart_work, 0, xgbe_restart, pdata);
xgbe_init_timers(pdata);
ret = xgbe_start(pdata);
@@ -1353,17 +768,13 @@ err_channels:
xgbe_free_channels(pdata);
err_ptpclk:
- clk_disable_unprepare(pdata->ptpclk);
-
-err_sysclk:
- clk_disable_unprepare(pdata->sysclk);
return ret;
}
-static int xgbe_close(struct net_device *netdev)
+int xgbe_close(struct ifnet *netdev)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = netdev->if_softc;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
DBGPR("-->xgbe_close\n");
@@ -1377,10 +788,6 @@ static int xgbe_close(struct net_device *netdev)
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
- /* Disable the clocks */
- clk_disable_unprepare(pdata->ptpclk);
- clk_disable_unprepare(pdata->sysclk);
-
set_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_close\n");
@@ -1388,142 +795,66 @@ static int xgbe_close(struct net_device *netdev)
return 0;
}
-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+int xgbe_xmit(struct ifnet *ifp, struct mbuf *m)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = ifp->if_softc;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
- struct netdev_queue *txq;
int ret;
- DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_nextpkt == NULL);
- channel = pdata->channel + skb->queue_mapping;
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state) ||
+ !pdata->phy.link)) {
+ m_freem(m);
+ return (ENETDOWN);
+ }
+
+ channel = pdata->channel;
ring = channel->tx_ring;
packet = &ring->packet_data;
- ret = NETDEV_TX_OK;
-
- if (skb->len == 0) {
- netif_err(pdata, tx_err, netdev,
- "empty skb received from stack\n");
- dev_kfree_skb_any(skb);
- goto tx_netdev_return;
- }
-
/* Calculate preliminary packet info */
memset(packet, 0, sizeof(*packet));
- xgbe_packet_info(pdata, ring, skb, packet);
+ xgbe_packet_info(pdata, ring, m, packet);
/* Check that there are enough descriptors available */
ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
if (ret)
goto tx_netdev_return;
- ret = xgbe_prep_tso(skb, packet);
- if (ret) {
- netif_err(pdata, tx_err, netdev,
- "error processing TSO packet\n");
- dev_kfree_skb_any(skb);
- goto tx_netdev_return;
- }
- xgbe_prep_vlan(skb, packet);
-
- if (!desc_if->map_tx_skb(channel, skb)) {
- dev_kfree_skb_any(skb);
+ if (!desc_if->map_tx_skb(channel, m)) {
goto tx_netdev_return;
}
- xgbe_prep_tx_tstamp(pdata, skb, packet);
-
- /* Report on the actual number of bytes (to be) sent */
- netdev_tx_sent_queue(txq, packet->tx_bytes);
-
/* Configure required descriptor fields for transmission */
hw_if->dev_xmit(channel);
- if (netif_msg_pktdata(pdata))
- xgbe_print_pkt(netdev, skb, true);
-
- /* Stop the queue in advance if there may not be enough descriptors */
- xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
-
- ret = NETDEV_TX_OK;
+ return 0;
tx_netdev_return:
- return ret;
-}
-
-static void xgbe_set_rx_mode(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
-
- DBGPR("-->xgbe_set_rx_mode\n");
-
- hw_if->config_rx_mode(pdata);
-
- DBGPR("<--xgbe_set_rx_mode\n");
-}
-
-static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct sockaddr *saddr = addr;
-
- DBGPR("-->xgbe_set_mac_address\n");
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
-
- hw_if->set_mac_address(pdata, netdev->dev_addr);
-
- DBGPR("<--xgbe_set_mac_address\n");
+ m_free(m);
return 0;
}
-static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
- break;
-
- case SIOCSHWTSTAMP:
- ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+int xgbe_change_mtu(struct ifnet *netdev, int mtu)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = netdev->if_softc;
int ret;
DBGPR("-->xgbe_change_mtu\n");
ret = xgbe_calc_rx_buf_size(netdev, mtu);
if (ret < 0)
- return ret;
+ return -ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ netdev->if_mtu = mtu;
xgbe_restart_dev(pdata);
@@ -1532,187 +863,6 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
-static void xgbe_tx_timeout(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- netdev_warn(netdev, "tx timeout, device restarting\n");
- schedule_work(&pdata->restart_work);
-}
-
-static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *s)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
-
- DBGPR("-->%s\n", __func__);
-
- pdata->hw_if.read_mmc_stats(pdata);
-
- s->rx_packets = pstats->rxframecount_gb;
- s->rx_bytes = pstats->rxoctetcount_gb;
- s->rx_errors = pstats->rxframecount_gb -
- pstats->rxbroadcastframes_g -
- pstats->rxmulticastframes_g -
- pstats->rxunicastframes_g;
- s->multicast = pstats->rxmulticastframes_g;
- s->rx_length_errors = pstats->rxlengtherror;
- s->rx_crc_errors = pstats->rxcrcerror;
- s->rx_fifo_errors = pstats->rxfifooverflow;
-
- s->tx_packets = pstats->txframecount_gb;
- s->tx_bytes = pstats->txoctetcount_gb;
- s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
- s->tx_dropped = netdev->stats.tx_dropped;
-
- DBGPR("<--%s\n", __func__);
-
- return s;
-}
-
-static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
- u16 vid)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
-
- DBGPR("-->%s\n", __func__);
-
- set_bit(vid, pdata->active_vlans);
- hw_if->update_vlan_hash_table(pdata);
-
- DBGPR("<--%s\n", __func__);
-
- return 0;
-}
-
-static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
- u16 vid)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
-
- DBGPR("-->%s\n", __func__);
-
- clear_bit(vid, pdata->active_vlans);
- hw_if->update_vlan_hash_table(pdata);
-
- DBGPR("<--%s\n", __func__);
-
- return 0;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void xgbe_poll_controller(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_channel *channel;
- unsigned int i;
-
- DBGPR("-->xgbe_poll_controller\n");
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_dma_isr(channel->dma_irq, channel);
- } else {
- disable_irq(pdata->dev_irq);
- xgbe_isr(pdata->dev_irq, pdata);
- enable_irq(pdata->dev_irq);
- }
-
- DBGPR("<--xgbe_poll_controller\n");
-}
-#endif /* End CONFIG_NET_POLL_CONTROLLER */
-
-static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc_to_netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- u8 tc;
-
- if (tc_to_netdev->type != TC_SETUP_MQPRIO)
- return -EINVAL;
-
- tc = tc_to_netdev->tc;
-
- if (tc > pdata->hw_feat.tc_cnt)
- return -EINVAL;
-
- pdata->num_tcs = tc;
- pdata->hw_if.config_tc(pdata);
-
- return 0;
-}
-
-static int xgbe_set_features(struct net_device *netdev,
- netdev_features_t features)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
- int ret = 0;
-
- rxhash = pdata->netdev_features & NETIF_F_RXHASH;
- rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
- rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
- rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if ((features & NETIF_F_RXHASH) && !rxhash)
- ret = hw_if->enable_rss(pdata);
- else if (!(features & NETIF_F_RXHASH) && rxhash)
- ret = hw_if->disable_rss(pdata);
- if (ret)
- return ret;
-
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
- hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
- hw_if->disable_rx_csum(pdata);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
- hw_if->enable_rx_vlan_stripping(pdata);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
- hw_if->disable_rx_vlan_stripping(pdata);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
- hw_if->enable_rx_vlan_filtering(pdata);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
- hw_if->disable_rx_vlan_filtering(pdata);
-
- pdata->netdev_features = features;
-
- DBGPR("<--xgbe_set_features\n");
-
- return 0;
-}
-
-static const struct net_device_ops xgbe_netdev_ops = {
- .ndo_open = xgbe_open,
- .ndo_stop = xgbe_close,
- .ndo_start_xmit = xgbe_xmit,
- .ndo_set_rx_mode = xgbe_set_rx_mode,
- .ndo_set_mac_address = xgbe_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = xgbe_ioctl,
- .ndo_change_mtu = xgbe_change_mtu,
- .ndo_tx_timeout = xgbe_tx_timeout,
- .ndo_get_stats64 = xgbe_get_stats64,
- .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = xgbe_poll_controller,
-#endif
- .ndo_setup_tc = xgbe_setup_tc,
- .ndo_set_features = xgbe_set_features,
-};
-
-struct net_device_ops *xgbe_get_netdev_ops(void)
-{
- return (struct net_device_ops *)&xgbe_netdev_ops;
-}
-
static void xgbe_rx_refresh(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1736,59 +886,13 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
}
/* Make sure everything is written before the register write */
- wmb();
+ dsb(sy);
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-}
-
-static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
- struct napi_struct *napi,
- struct xgbe_ring_data *rdata,
- unsigned int len)
-{
- struct sk_buff *skb;
- u8 *packet;
- unsigned int copy_len;
-
- skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
- if (!skb)
- return NULL;
-
- /* Start with the header buffer which may contain just the header
- * or the header plus data
- */
- dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
- rdata->rx.hdr.dma_off,
- rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
-
- packet = page_address(rdata->rx.hdr.pa.pages) +
- rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
- copy_len = min(rdata->rx.hdr.dma_len, copy_len);
- skb_copy_to_linear_data(skb, packet, copy_len);
- skb_put(skb, copy_len);
-
- len -= copy_len;
- if (len) {
- /* Add the remaining data as a frag */
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- len, rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
-
- return skb;
+ lower_32_bits(rdata->rdata_paddr));
}
static int xgbe_tx_poll(struct xgbe_channel *channel)
@@ -1799,10 +903,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
- struct net_device *netdev = pdata->netdev;
- struct netdev_queue *txq;
int processed = 0;
- unsigned int tx_packets = 0, tx_bytes = 0;
unsigned int cur;
DBGPR("-->xgbe_tx_poll\n");
@@ -1814,9 +915,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
cur = ring->cur;
/* Be sure we get ring->cur before accessing descriptor data */
- smp_rmb();
-
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ dsb(sy);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
(ring->dirty != cur)) {
@@ -1828,15 +927,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
/* Make sure descriptor fields are read after reading the OWN
* bit */
- dma_rmb();
-
- if (netif_msg_tx_done(pdata))
- xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
-
- if (hw_if->is_last_desc(rdesc)) {
- tx_packets += rdata->tx.packets;
- tx_bytes += rdata->tx.bytes;
- }
+ dsb(sy);
/* Free the SKB and reset the descriptor for re-use */
desc_if->unmap_rdata(pdata, rdata);
@@ -1849,14 +940,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
if (!processed)
return 0;
- netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
-
- if ((ring->tx.queue_stopped == 1) &&
- (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
- ring->tx.queue_stopped = 0;
- netif_tx_wake_queue(txq);
- }
-
DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
return processed;
@@ -1869,12 +952,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct net_device *netdev = pdata->netdev;
- struct napi_struct *napi;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *hwtstamps;
- unsigned int incomplete, error, context_next, context;
- unsigned int len, rdesc_len, max_len;
+ struct ifnet *ifp = pdata->netdev;
+ struct mbuf *m;
+ unsigned int incomplete, context_next, context;
unsigned int received = 0;
int packet_count = 0;
@@ -1887,25 +967,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
incomplete = 0;
context_next = 0;
- napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
-
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
while (packet_count < budget) {
DBGPR(" cur = %d\n", ring->cur);
- /* First time in loop see if we need to restore state */
- if (!received && rdata->state_saved) {
- skb = rdata->state.skb;
- error = rdata->state.error;
- len = rdata->state.len;
- } else {
- memset(packet, 0, sizeof(*packet));
- skb = NULL;
- error = 0;
- len = 0;
- }
-
read_again:
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1915,6 +981,8 @@ read_again:
if (hw_if->dev_read(channel))
break;
+ m = rdata->mb;
+
received++;
ring->cur++;
@@ -1929,118 +997,41 @@ read_again:
CONTEXT);
/* Earlier error, just drain the remaining data */
- if ((incomplete || context_next) && error)
+ if (incomplete || context_next) {
goto read_again;
-
- if (error || packet->errors) {
- if (packet->errors)
- netif_err(pdata, rx_err, netdev,
- "error in received packet\n");
- dev_kfree_skb(skb);
- goto next_packet;
- }
-
- if (!context) {
- /* Length is cumulative, get this descriptor's length */
- rdesc_len = rdata->rx.len - len;
- len += rdesc_len;
-
- if (rdesc_len && !skb) {
- skb = xgbe_create_skb(pdata, napi, rdata,
- rdesc_len);
- if (!skb)
- error = 1;
- } else if (rdesc_len) {
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- rdesc_len,
- rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
}
- if (incomplete || context_next)
- goto read_again;
-
- if (!skb)
- goto next_packet;
-
- /* Be sure we don't exceed the configured MTU */
- max_len = netdev->mtu + ETH_HLEN;
- if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (skb->protocol == htons(ETH_P_8021Q)))
- max_len += VLAN_HLEN;
-
- if (skb->len > max_len) {
- netif_err(pdata, rx_err, netdev,
- "packet length exceeds configured MTU\n");
- dev_kfree_skb(skb);
+ if (packet->errors) {
+ rdata->mbuf_free = 1;
goto next_packet;
}
+ rdata->mb = NULL;
- if (netif_msg_pktdata(pdata))
- xgbe_print_pkt(netdev, skb, false);
-
- skb_checksum_none_assert(skb);
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, CSUM_DONE))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, VLAN_CTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- packet->vlan_ctag);
-
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
- hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ m->m_pkthdr.len = rdata->rx.hdr_len + rdata->rx.len;
+ if (rdata->rx.hdr_len != 0) {
+ m->m_len = rdata->rx.hdr_len;
+ m->m_next->m_len = rdata->rx.len;
+ } else {
+ m->m_len = rdata->rx.len;
+ m_freem(m->m_next);
+ m->m_next = NULL;
}
+ if_setrcvif(m, ifp);
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, RSS_HASH))
- skb_set_hash(skb, packet->rss_hash,
- packet->rss_hash_type);
-
- skb->dev = netdev;
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, channel->queue_index);
-
- napi_gro_receive(napi, skb);
+ ifp->if_input(ifp, m);
next_packet:
packet_count++;
}
- /* Check if we need to save state before leaving */
- if (received && (incomplete || context_next)) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- rdata->state_saved = 1;
- rdata->state.skb = skb;
- rdata->state.len = len;
- rdata->state.error = error;
- }
-
DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
return packet_count;
}
-static int xgbe_one_poll(struct napi_struct *napi, int budget)
+static int xgbe_one_poll(struct xgbe_channel *channel, int budget)
{
- struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
- napi);
int processed = 0;
DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
@@ -2051,24 +1042,13 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
/* Process Rx ring next */
processed = xgbe_rx_poll(channel, budget);
- /* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
- /* Enable Tx and Rx interrupts */
- enable_irq(channel->dma_irq);
- }
-
DBGPR("<--xgbe_one_poll: received = %d\n", processed);
return processed;
}
-static int xgbe_all_poll(struct napi_struct *napi, int budget)
+static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget)
{
- struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
- napi);
struct xgbe_channel *channel;
int ring_budget;
int processed, last_processed;
@@ -2093,86 +1073,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
}
} while ((processed < budget) && (processed != last_processed));
- /* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
- /* Enable Tx and Rx interrupts */
- xgbe_enable_rx_tx_ints(pdata);
- }
-
DBGPR("<--xgbe_all_poll: received = %d\n", processed);
return processed;
}
-
-void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
- unsigned int idx, unsigned int count, unsigned int flag)
-{
- struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
-
- while (count--) {
- rdata = XGBE_GET_DESC_DATA(ring, idx);
- rdesc = rdata->rdesc;
- netdev_dbg(pdata->netdev,
- "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0),
- le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2),
- le32_to_cpu(rdesc->desc3));
- idx++;
- }
-}
-
-void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
- unsigned int idx)
-{
- struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
-
- rdata = XGBE_GET_DESC_DATA(ring, idx);
- rdesc = rdata->rdesc;
- netdev_dbg(pdata->netdev,
- "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
- idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
-}
-
-void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
-{
- struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
- unsigned char buffer[128];
- unsigned int i, j;
-
- netdev_dbg(netdev, "\n************** SKB dump ****************\n");
-
- netdev_dbg(netdev, "%s packet of %d bytes\n",
- (tx_rx ? "TX" : "RX"), skb->len);
-
- netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
- netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
-
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
- }
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
-
- netdev_dbg(netdev, "\n************** SKB dump ****************\n");
-}
diff --git a/sys/dev/axgbe/xgbe-mdio.c b/sys/dev/axgbe/xgbe-mdio.c
index 84c5d296d13e..6967d54da521 100644
--- a/sys/dev/axgbe/xgbe-mdio.c
+++ b/sys/dev/axgbe/xgbe-mdio.c
@@ -114,17 +114,17 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/of.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
#include "xgbe.h"
#include "xgbe-common.h"
+static void xgbe_an_state_machine(struct xgbe_prv_data *pdata);
+
static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
@@ -154,7 +154,7 @@ static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
reg |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
- usleep_range(75, 100);
+ DELAY(75);
reg &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
@@ -177,7 +177,7 @@ static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
/* Wait for Rx and Tx ready */
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
- usleep_range(50, 75);
+ DELAY(50);
status = XSIR0_IOREAD(pdata, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
@@ -185,9 +185,6 @@ static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
goto rx_reset;
}
- netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
- status);
-
rx_reset:
/* Perform Rx reset for the DFE changes */
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
@@ -238,8 +235,6 @@ static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
xgbe_serdes_complete_ratechange(pdata);
-
- netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
}
static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
@@ -286,8 +281,6 @@ static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
xgbe_serdes_complete_ratechange(pdata);
-
- netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
}
static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
@@ -334,8 +327,6 @@ static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
xgbe_serdes_complete_ratechange(pdata);
-
- netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
}
static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
@@ -440,15 +431,11 @@ static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
static void xgbe_restart_an(struct xgbe_prv_data *pdata)
{
xgbe_set_an(pdata, true, true);
-
- netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
}
static void xgbe_disable_an(struct xgbe_prv_data *pdata)
{
xgbe_set_an(pdata, false, false);
-
- netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
}
static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
@@ -483,9 +470,6 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
reg);
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
-
- netif_dbg(pdata, link, pdata->netdev,
- "KR training initiated\n");
}
return XGBE_AN_PAGE_RECEIVED;
@@ -554,19 +538,16 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
enum xgbe_an ret;
if (!pdata->an_start) {
- pdata->an_start = jiffies;
+ pdata->an_start = ticks;
} else {
an_timeout = pdata->an_start +
- msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
- if (time_after(jiffies, an_timeout)) {
+ ((uint64_t)XGBE_AN_MS_TIMEOUT * (uint64_t)hz) / 1000ull;
+ if ((int)(ticks - an_timeout) > 0) {
/* Auto-negotiation timed out, reset state */
pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA;
- pdata->an_start = jiffies;
-
- netif_dbg(pdata, link, pdata->netdev,
- "AN timed out, resetting state\n");
+ pdata->an_start = ticks;
}
}
@@ -620,12 +601,10 @@ static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
return XGBE_AN_INCOMPAT_LINK;
}
-static irqreturn_t xgbe_an_isr(int irq, void *data)
+static void xgbe_an_isr(void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
-
/* Disable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
@@ -636,57 +615,19 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
/* Clear the interrupt(s) that fired and process them */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
- queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ xgbe_an_state_machine(pdata);
} else {
/* Enable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
XGBE_AN_INT_MASK);
}
-
- return IRQ_HANDLED;
-}
-
-static void xgbe_an_irq_work(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- an_irq_work);
-
- /* Avoid a race between enabling the IRQ and exiting the work by
- * waiting for the work to finish and then queueing it
- */
- flush_work(&pdata->an_work);
- queue_work(pdata->an_workqueue, &pdata->an_work);
}
-static const char *xgbe_state_as_string(enum xgbe_an state)
+static void xgbe_an_state_machine(struct xgbe_prv_data *pdata)
{
- switch (state) {
- case XGBE_AN_READY:
- return "Ready";
- case XGBE_AN_PAGE_RECEIVED:
- return "Page-Received";
- case XGBE_AN_INCOMPAT_LINK:
- return "Incompatible-Link";
- case XGBE_AN_COMPLETE:
- return "Complete";
- case XGBE_AN_NO_LINK:
- return "No-Link";
- case XGBE_AN_ERROR:
- return "Error";
- default:
- return "Undefined";
- }
-}
-
-static void xgbe_an_state_machine(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- an_work);
enum xgbe_an cur_state = pdata->an_state;
- mutex_lock(&pdata->an_mutex);
+ sx_xlock(&pdata->an_mutex);
if (!pdata->an_int)
goto out;
@@ -708,9 +649,6 @@ next_int:
pdata->an_result = pdata->an_state;
again:
- netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
- xgbe_state_as_string(pdata->an_state));
-
cur_state = pdata->an_state;
switch (pdata->an_state) {
@@ -731,9 +669,6 @@ again:
case XGBE_AN_COMPLETE:
pdata->parallel_detect = pdata->an_supported ? 0 : 1;
- netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
- pdata->an_supported ? "Auto negotiation"
- : "Parallel detection");
break;
case XGBE_AN_NO_LINK:
@@ -747,10 +682,6 @@ again:
pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} else if (pdata->an_state == XGBE_AN_ERROR) {
- netdev_err(pdata->netdev,
- "error during auto-negotiation, state=%u\n",
- cur_state);
-
pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
}
@@ -761,9 +692,6 @@ again:
pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
-
- netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
- xgbe_state_as_string(pdata->an_result));
}
if (cur_state != pdata->an_state)
@@ -776,7 +704,7 @@ out:
/* Enable AN interrupts on the way out */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
- mutex_unlock(&pdata->an_mutex);
+ sx_xunlock(&pdata->an_mutex);
}
static void xgbe_an_init(struct xgbe_prv_data *pdata)
@@ -785,10 +713,7 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
/* Set up Advertisement register 3 first */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
- reg |= 0xc000;
- else
- reg &= ~0xc000;
+ reg &= ~0xc000;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
@@ -823,48 +748,6 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
reg &= ~XGBE_XNP_NP_EXCHANGE;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
-
- netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
-}
-
-static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
-{
- if (pdata->tx_pause && pdata->rx_pause)
- return "rx/tx";
- else if (pdata->rx_pause)
- return "rx";
- else if (pdata->tx_pause)
- return "tx";
- else
- return "off";
-}
-
-static const char *xgbe_phy_speed_string(int speed)
-{
- switch (speed) {
- case SPEED_1000:
- return "1Gbps";
- case SPEED_2500:
- return "2.5Gbps";
- case SPEED_10000:
- return "10Gbps";
- case SPEED_UNKNOWN:
- return "Unknown";
- default:
- return "Unsupported";
- }
-}
-
-static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
-{
- if (pdata->phy.link)
- netdev_info(pdata->netdev,
- "Link is Up - %s/%s - flow control %s\n",
- xgbe_phy_speed_string(pdata->phy.speed),
- pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
- xgbe_phy_fc_string(pdata));
- else
- netdev_info(pdata->netdev, "Link is Down\n");
}
static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
@@ -902,14 +785,10 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
pdata->phy_link = 0;
pdata->phy_speed = SPEED_UNKNOWN;
}
-
- if (new_state && netif_msg_link(pdata))
- xgbe_phy_print_status(pdata);
}
static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
{
- netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
/* Disable auto-negotiation */
xgbe_disable_an(pdata);
@@ -939,15 +818,16 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{
set_bit(XGBE_LINK_INIT, &pdata->dev_state);
- pdata->link_check = jiffies;
+ pdata->link_check = ticks;
if (pdata->phy.autoneg != AUTONEG_ENABLE)
return xgbe_phy_config_fixed(pdata);
- netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
-
/* Disable auto-negotiation interrupt */
- disable_irq(pdata->an_irq);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ /* Clear any auto-negotitation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
/* Start auto-negotiation in a supported mode */
if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
@@ -956,7 +836,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
xgbe_set_mode(pdata, XGBE_MODE_KX);
} else {
- enable_irq(pdata->an_irq);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
return -EINVAL;
}
@@ -972,7 +852,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
pdata->kx_state = XGBE_RX_BPA;
/* Re-enable auto-negotiation interrupt */
- enable_irq(pdata->an_irq);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
/* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
@@ -987,7 +867,7 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{
int ret;
- mutex_lock(&pdata->an_mutex);
+ sx_xlock(&pdata->an_mutex);
ret = __xgbe_phy_config_aneg(pdata);
if (ret)
@@ -995,7 +875,7 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
else
clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
- mutex_unlock(&pdata->an_mutex);
+ sx_unlock(&pdata->an_mutex);
return ret;
}
@@ -1009,9 +889,8 @@ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
- link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
- if (time_after(jiffies, link_timeout)) {
- netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz);
+ if ((int)(ticks - link_timeout) >= 0) {
xgbe_phy_config_aneg(pdata);
}
}
@@ -1109,10 +988,6 @@ static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
/* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
- if (lp_reg & 0xc000)
- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
-
- pdata->phy.duplex = DUPLEX_FULL;
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1120,8 +995,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
unsigned int reg, link_aneg;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
- netif_carrier_off(pdata->netdev);
-
pdata->phy.link = 0;
goto adjust_link;
}
@@ -1145,8 +1018,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
-
- netif_carrier_on(pdata->netdev);
} else {
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
xgbe_check_link_timeout(pdata);
@@ -1156,8 +1027,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
}
xgbe_phy_status_aneg(pdata);
-
- netif_carrier_off(pdata->netdev);
}
adjust_link:
@@ -1166,7 +1035,6 @@ adjust_link:
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
- netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
/* Disable auto-negotiation */
xgbe_disable_an(pdata);
@@ -1174,27 +1042,22 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
pdata->phy.link = 0;
- netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
static int xgbe_phy_start(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
int ret;
- netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
-
- ret = devm_request_irq(pdata->dev, pdata->an_irq,
- xgbe_an_isr, 0, pdata->an_name,
- pdata);
+ ret = bus_setup_intr(pdata->dev, pdata->an_irq_res,
+ INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_an_isr, pdata,
+ &pdata->an_irq_tag);
if (ret) {
- netdev_err(netdev, "phy irq request failed\n");
- return ret;
+ return -ret;
}
/* Set initial mode - call the mode setting routines
@@ -1220,7 +1083,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
return xgbe_phy_config_aneg(pdata);
err_irq:
- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
return ret;
}
@@ -1235,7 +1098,7 @@ static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
count = 50;
do {
- msleep(20);
+ DELAY(20);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
} while ((reg & MDIO_CTRL1_RESET) && --count);
@@ -1251,50 +1114,9 @@ static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
return 0;
}
-static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
-
- dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
-
- dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
- dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
- dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
- dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
-
- dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
- dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
- dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
- dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
- dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 2,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
- dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
- MDIO_AN_COMP_STAT,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
-
- dev_dbg(dev, "\n*************************************************\n");
-}
-
static void xgbe_phy_init(struct xgbe_prv_data *pdata)
{
- mutex_init(&pdata->an_mutex);
- INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
- INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ sx_init(&pdata->an_mutex, "axgbe AN lock");
pdata->mdio_mmd = MDIO_MMD_PCS;
/* Initialize supported features */
@@ -1343,9 +1165,6 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
if (pdata->tx_pause)
pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
-
- if (netif_msg_drv(pdata))
- xgbe_dump_phy_registers(pdata);
}
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
diff --git a/sys/dev/axgbe/xgbe.h b/sys/dev/axgbe/xgbe.h
index 98d9d63c4353..ee55ef8f0a16 100644
--- a/sys/dev/axgbe/xgbe.h
+++ b/sys/dev/axgbe/xgbe.h
@@ -112,21 +112,17 @@
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
#ifndef __XGBE_H__
#define __XGBE_H__
-#include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-#include <linux/workqueue.h>
-#include <linux/phy.h>
-#include <linux/if_vlan.h>
-#include <linux/bitops.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <net/dcbnl.h>
+#include "xgbe_osdep.h"
+
+/* From linux/dcbnl.h */
+#define IEEE_8021QAZ_MAX_TCS 8
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.2"
@@ -151,7 +147,7 @@
*/
#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
-#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_MIN_BUF_SIZE 1522
#define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
@@ -288,7 +284,7 @@
struct xgbe_prv_data;
struct xgbe_packet_data {
- struct sk_buff *skb;
+ struct mbuf *m;
unsigned int attributes;
@@ -297,18 +293,8 @@ struct xgbe_packet_data {
unsigned int rdesc_count;
unsigned int length;
- unsigned int header_len;
- unsigned int tcp_header_len;
- unsigned int tcp_payload_len;
- unsigned short mss;
-
- unsigned short vlan_ctag;
-
u64 rx_tstamp;
- u32 rss_hash;
- enum pkt_hash_types rss_hash_type;
-
unsigned int tx_packets;
unsigned int tx_bytes;
};
@@ -321,25 +307,6 @@ struct xgbe_ring_desc {
__le32 desc3;
};
-/* Page allocation related values */
-struct xgbe_page_alloc {
- struct page *pages;
- unsigned int pages_len;
- unsigned int pages_offset;
-
- dma_addr_t pages_dma;
-};
-
-/* Ring entry buffer data */
-struct xgbe_buffer_data {
- struct xgbe_page_alloc pa;
- struct xgbe_page_alloc pa_unmap;
-
- dma_addr_t dma_base;
- unsigned long dma_off;
- unsigned int dma_len;
-};
-
/* Tx-related ring data */
struct xgbe_tx_ring_data {
unsigned int packets; /* BQL packet count */
@@ -348,9 +315,6 @@ struct xgbe_tx_ring_data {
/* Rx-related ring data */
struct xgbe_rx_ring_data {
- struct xgbe_buffer_data hdr; /* Header locations */
- struct xgbe_buffer_data buf; /* Payload locations */
-
unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received packet */
};
@@ -361,28 +325,19 @@ struct xgbe_rx_ring_data {
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
- dma_addr_t rdesc_dma; /* DMA address of descriptor */
+ bus_addr_t rdata_paddr;
- struct sk_buff *skb; /* Virtual address of SKB */
- dma_addr_t skb_dma; /* DMA address of SKB data */
- unsigned int skb_dma_len; /* Length of SKB DMA area */
+ bus_dma_tag_t mbuf_dmat;
+ bus_dmamap_t mbuf_map;
+ bus_addr_t mbuf_hdr_paddr;
+ bus_addr_t mbuf_data_paddr;
+ bus_size_t mbuf_len;
+
+ int mbuf_free;
+ struct mbuf *mb;
struct xgbe_tx_ring_data tx; /* Tx-related data */
struct xgbe_rx_ring_data rx; /* Rx-related data */
-
- unsigned int mapped_as_page;
-
- /* Incomplete receive save location. If the budget is exhausted
- * or the last descriptor (last normal descriptor or a following
- * context descriptor) has not been DMA'd yet the current state
- * of the receive processing needs to be saved.
- */
- unsigned int state_saved;
- struct {
- struct sk_buff *skb;
- unsigned int len;
- unsigned int error;
- } state;
};
struct xgbe_ring {
@@ -394,18 +349,19 @@ struct xgbe_ring {
/* Virtual/DMA addresses and count of allocated descriptor memory */
struct xgbe_ring_desc *rdesc;
- dma_addr_t rdesc_dma;
+ bus_dmamap_t rdesc_map;
+ bus_dma_tag_t rdesc_dmat;
+ bus_addr_t rdesc_paddr;
unsigned int rdesc_count;
+ bus_dma_tag_t mbuf_dmat;
+ bus_dmamap_t mbuf_map;
+
/* Array of descriptor data corresponding the descriptor memory
* (always use the XGBE_GET_DESC_DATA macro to access this data)
*/
struct xgbe_ring_data *rdata;
- /* Page allocation for RX buffers */
- struct xgbe_page_alloc rx_hdr_pa;
- struct xgbe_page_alloc rx_buf_pa;
-
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
@@ -426,7 +382,7 @@ struct xgbe_ring {
unsigned short cur_vlan_ctag;
} tx;
};
-} ____cacheline_aligned;
+} __aligned(CACHE_LINE_SIZE);
/* Structure used to describe the descriptor rings associated with
* a DMA channel.
@@ -439,23 +395,18 @@ struct xgbe_channel {
/* Queue index and base address of queue's DMA registers */
unsigned int queue_index;
- void __iomem *dma_regs;
+ bus_space_tag_t dma_tag;
+ bus_space_handle_t dma_handle;
/* Per channel interrupt irq number */
- int dma_irq;
- char dma_irq_name[IFNAMSIZ + 32];
-
- /* Netdev related settings */
- struct napi_struct napi;
+ struct resource *dma_irq_res;
+ void *dma_irq_tag;
unsigned int saved_ier;
- unsigned int tx_timer_active;
- struct timer_list tx_timer;
-
struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring;
-} ____cacheline_aligned;
+} __aligned(CACHE_LINE_SIZE);
enum xgbe_state {
XGBE_DOWN,
@@ -664,24 +615,8 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
- /* For Data Center Bridging config */
- void (*config_tc)(struct xgbe_prv_data *);
- void (*config_dcb_tc)(struct xgbe_prv_data *);
- void (*config_dcb_pfc)(struct xgbe_prv_data *);
-
/* For Receive Side Scaling */
- int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *);
- int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
- int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
struct xgbe_phy_if {
@@ -701,7 +636,7 @@ struct xgbe_phy_if {
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
- int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct mbuf *);
int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
struct xgbe_ring_data *);
void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
@@ -756,34 +691,33 @@ struct xgbe_hw_features {
};
struct xgbe_prv_data {
- struct net_device *netdev;
+ struct ifnet *netdev;
struct platform_device *pdev;
struct acpi_device *adev;
- struct device *dev;
+ device_t dev;
/* ACPI or DT flag */
unsigned int use_acpi;
/* XGMAC/XPCS related mmio registers */
- void __iomem *xgmac_regs; /* XGMAC CSRs */
- void __iomem *xpcs_regs; /* XPCS MMD registers */
- void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
- void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
- void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+ struct resource *xgmac_res; /* XGMAC CSRs */
+ struct resource *xpcs_res; /* XPCS MMD registers */
+ struct resource *rxtx_res; /* SerDes Rx/Tx CSRs */
+ struct resource *sir0_res; /* SerDes integration registers (1/2) */
+ struct resource *sir1_res; /* SerDes integration registers (2/2) */
- /* Overall device lock */
- spinlock_t lock;
+ /* DMA tag */
+ bus_dma_tag_t dmat;
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
- /* RSS addressing mutex */
- struct mutex rss_mutex;
-
/* Flags representing xgbe_state */
unsigned long dev_state;
- int dev_irq;
+ struct resource *dev_irq_res;
+ struct resource *chan_irq_res[4];
+ void *dev_irq_tag;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
@@ -797,9 +731,9 @@ struct xgbe_prv_data {
unsigned int awcache;
/* Service routine support */
- struct workqueue_struct *dev_workqueue;
- struct work_struct service_work;
- struct timer_list service_timer;
+ struct taskqueue *dev_workqueue;
+ struct task service_work;
+ struct callout service_timer;
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
@@ -850,35 +784,16 @@ struct xgbe_prv_data {
/* Netdev related settings */
unsigned char mac_addr[ETH_ALEN];
- netdev_features_t netdev_features;
- struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
struct xgbe_ext_stats ext_stats;
- /* Filtering support */
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
/* Device clocks */
struct clk *sysclk;
unsigned long sysclk_rate;
struct clk *ptpclk;
unsigned long ptpclk_rate;
- /* Timestamp support */
- spinlock_t tstamp_lock;
- struct ptp_clock_info ptp_clock_info;
- struct ptp_clock *ptp_clock;
- struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
- unsigned int tstamp_addend;
- struct work_struct tx_tstamp_work;
- struct sk_buff *tx_tstamp_skb;
- u64 tx_tstamp;
-
/* DCB support */
- struct ieee_ets *ets;
- struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
u8 num_tcs;
@@ -887,7 +802,7 @@ struct xgbe_prv_data {
struct xgbe_hw_features hw_feat;
/* Device restart work structure */
- struct work_struct restart_work;
+ struct task restart_work;
/* Keeps track of power mode */
unsigned int power_down;
@@ -896,7 +811,6 @@ struct xgbe_prv_data {
u32 msg_enable;
/* Current PHY settings */
- phy_interface_t phy_mode;
int phy_link;
int phy_speed;
@@ -906,10 +820,9 @@ struct xgbe_prv_data {
unsigned long link_check;
char an_name[IFNAMSIZ + 32];
- struct workqueue_struct *an_workqueue;
- int an_irq;
- struct work_struct an_irq_work;
+ struct resource *an_irq_res;
+ void *an_irq_tag;
unsigned int speed_set;
@@ -928,61 +841,32 @@ struct xgbe_prv_data {
/* Auto-negotiation state machine support */
unsigned int an_int;
- struct mutex an_mutex;
+ struct sx an_mutex;
enum xgbe_an an_result;
enum xgbe_an an_state;
enum xgbe_rx kr_state;
enum xgbe_rx kx_state;
- struct work_struct an_work;
unsigned int an_supported;
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
unsigned int lpm_ctrl; /* CTRL1 for resume */
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *xgbe_debugfs;
-
- unsigned int debugfs_xgmac_reg;
-
- unsigned int debugfs_xpcs_mmd;
- unsigned int debugfs_xpcs_reg;
-#endif
};
/* Function prototypes*/
+int xgbe_open(struct ifnet *);
+int xgbe_close(struct ifnet *);
+int xgbe_xmit(struct ifnet *, struct mbuf *);
+int xgbe_change_mtu(struct ifnet *, int);
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
-struct net_device_ops *xgbe_get_netdev_ops(void);
-struct ethtool_ops *xgbe_get_ethtool_ops(void);
-#ifdef CONFIG_AMD_XGBE_DCB
-const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
-#endif
-
-void xgbe_ptp_register(struct xgbe_prv_data *);
-void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
- unsigned int, unsigned int, unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
- unsigned int);
-void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
void xgbe_get_all_hw_features(struct xgbe_prv_data *);
-int xgbe_powerup(struct net_device *, unsigned int);
-int xgbe_powerdown(struct net_device *, unsigned int);
void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
-#ifdef CONFIG_DEBUG_FS
-void xgbe_debugfs_init(struct xgbe_prv_data *);
-void xgbe_debugfs_exit(struct xgbe_prv_data *);
-#else
-static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
-static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
-#endif /* CONFIG_DEBUG_FS */
-
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
#if 0
#define YDEBUG
@@ -991,13 +875,13 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
/* For debug prints */
#ifdef YDEBUG
-#define DBGPR(x...) pr_alert(x)
+#define DBGPR(x...) printf(x)
#else
#define DBGPR(x...) do { } while (0)
#endif
#ifdef YDEBUG_MDIO
-#define DBGPR_MDIO(x...) pr_alert(x)
+#define DBGPR_MDIO(x...) printf(x)
#else
#define DBGPR_MDIO(x...) do { } while (0)
#endif
diff --git a/sys/dev/axgbe/xgbe_osdep.h b/sys/dev/axgbe/xgbe_osdep.h
new file mode 100644
index 000000000000..e6d793ecc43a
--- /dev/null
+++ b/sys/dev/axgbe/xgbe_osdep.h
@@ -0,0 +1,188 @@
+/*-
+ * Copyright (c) 2016,2017 SoftIron Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of SoftIron Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _XGBE_OSDEP_H_
+#define _XGBE_OSDEP_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sx.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t __le32;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef struct {
+ struct mtx lock;
+} spinlock_t;
+
+static inline void
+spin_lock_init(spinlock_t *spinlock)
+{
+
+ mtx_init(&spinlock->lock, "axgbe_spin", NULL, MTX_DEF);
+}
+
+#define spin_lock_irqsave(spinlock, flags) \
+do { \
+ (flags) = intr_disable(); \
+ mtx_lock(&(spinlock)->lock); \
+} while (0)
+
+#define spin_unlock_irqrestore(spinlock, flags) \
+do { \
+ mtx_unlock(&(spinlock)->lock); \
+ intr_restore(flags); \
+} while (0)
+
+#define BIT(pos) (1ul << pos)
+
+static inline void
+clear_bit(int pos, unsigned long *p)
+{
+
+ atomic_clear_long(p, 1ul << pos);
+}
+
+static inline int
+test_bit(int pos, unsigned long *p)
+{
+ unsigned long val;
+
+ val = *p;
+ return ((val & 1ul << pos) != 0);
+}
+
+static inline void
+set_bit(int pos, unsigned long *p)
+{
+
+ atomic_set_long(p, 1ul << pos);
+}
+
+#define lower_32_bits(x) ((x) & 0xffffffffu)
+#define upper_32_bits(x) (((x) >> 32) & 0xffffffffu)
+#define cpu_to_le32(x) le32toh(x)
+#define le32_to_cpu(x) htole32(x)
+
+MALLOC_DECLARE(M_AXGBE);
+
+#define ADVERTISED_Pause 0x01
+#define ADVERTISED_Asym_Pause 0x02
+#define ADVERTISED_Autoneg 0x04
+#define ADVERTISED_Backplane 0x08
+#define ADVERTISED_10000baseKR_Full 0x10
+#define ADVERTISED_2500baseX_Full 0x20
+#define ADVERTISED_1000baseKX_Full 0x40
+
+#define AUTONEG_DISABLE 0
+#define AUTONEG_ENABLE 1
+
+#define DUPLEX_UNKNOWN 1
+#define DUPLEX_FULL 2
+
+#define SPEED_UNKNOWN 1
+#define SPEED_10000 2
+#define SPEED_2500 3
+#define SPEED_1000 4
+
+#define SUPPORTED_Autoneg 0x01
+#define SUPPORTED_Pause 0x02
+#define SUPPORTED_Asym_Pause 0x04
+#define SUPPORTED_Backplane 0x08
+#define SUPPORTED_10000baseKR_Full 0x10
+#define SUPPORTED_1000baseKX_Full 0x20
+#define SUPPORTED_2500baseX_Full 0x40
+#define SUPPORTED_10000baseR_FEC 0x80
+
+#define BMCR_SPEED100 0x2000
+
+#define MDIO_MMD_PMAPMD 1
+#define MDIO_MMD_PCS 3
+#define MDIO_MMD_AN 7
+#define MDIO_PMA_10GBR_FECABLE 170
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002
+#define MII_ADDR_C45 (1<<30)
+
+#define MDIO_CTRL1 0x00 /* MII_BMCR */
+#define MDIO_CTRL1_RESET 0x8000 /* BMCR_RESET */
+#define MDIO_CTRL1_SPEEDSELEXT 0x2040 /* BMCR_SPEED1000|BMCR_SPEED100*/
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x3c)
+#define MDIO_AN_CTRL1_ENABLE 0x1000 /* BMCR_AUTOEN */
+#define MDIO_CTRL1_LPOWER 0x0800 /* BMCR_PDOWN */
+#define MDIO_AN_CTRL1_RESTART 0x0200 /* BMCR_STARTNEG */
+
+#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
+
+#define MDIO_STAT1 1 /* MII_BMSR */
+#define MDIO_STAT1_LSTATUS 0x0004 /* BMSR_LINK */
+
+#define MDIO_CTRL2 0x07
+#define MDIO_PCS_CTRL2_10GBR 0x0000
+#define MDIO_PCS_CTRL2_10GBX 0x0001
+#define MDIO_PCS_CTRL2_TYPE 0x0003
+
+#define MDIO_AN_ADVERTISE 16
+
+#define MDIO_AN_LPA 19
+
+#define ETH_ALEN ETHER_ADDR_LEN
+#define ETH_HLEN ETHER_HDR_LEN
+#define ETH_FCS_LEN 4
+#define VLAN_HLEN ETHER_VLAN_ENCAP_LEN
+
+#define ARRAY_SIZE(x) nitems(x)
+
+#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
+#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
+
+#define NSEC_PER_SEC 1000000000ul
+
+#define min_t(t, a, b) MIN((t)(a), (t)(b))
+#define max_t(t, a, b) MAX((t)(a), (t)(b))
+
+#endif /* _XGBE_OSDEP_H_ */