aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/conf/GENERIC1
-rw-r--r--sys/amd64/conf/NOTES1
-rw-r--r--sys/arm64/conf/GENERIC2
-rw-r--r--sys/arm64/conf/NOTES2
-rw-r--r--sys/conf/files.amd6410
-rw-r--r--sys/conf/files.arm6414
-rw-r--r--sys/dev/axgbe/if_axgbe.c47
-rw-r--r--sys/dev/axgbe/if_axgbe_pci.c2339
-rw-r--r--sys/dev/axgbe/xgbe-common.h534
-rw-r--r--sys/dev/axgbe/xgbe-dcb.c272
-rw-r--r--sys/dev/axgbe/xgbe-desc.c360
-rw-r--r--sys/dev/axgbe/xgbe-dev.c2158
-rw-r--r--sys/dev/axgbe/xgbe-drv.c963
-rw-r--r--sys/dev/axgbe/xgbe-i2c.c532
-rw-r--r--sys/dev/axgbe/xgbe-mdio.c1466
-rw-r--r--sys/dev/axgbe/xgbe-phy-v1.c707
-rw-r--r--sys/dev/axgbe/xgbe-phy-v2.c3771
-rw-r--r--sys/dev/axgbe/xgbe-ptp.c276
-rw-r--r--sys/dev/axgbe/xgbe-sysctl.c1715
-rw-r--r--sys/dev/axgbe/xgbe-txrx.c777
-rw-r--r--sys/dev/axgbe/xgbe.h732
-rw-r--r--sys/dev/axgbe/xgbe_osdep.c47
-rw-r--r--sys/dev/axgbe/xgbe_osdep.h286
-rw-r--r--sys/modules/Makefile1
-rw-r--r--sys/modules/axgbe/Makefile9
-rw-r--r--sys/modules/axgbe/if_axa/Makefile12
-rw-r--r--sys/modules/axgbe/if_axp/Makefile12
27 files changed, 14256 insertions, 2790 deletions
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 0400b2208b95..78a9b5f88eb1 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -248,6 +248,7 @@ device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device ice # Intel 800 Series Physical Function
device vmx # VMware VMXNET3 Ethernet
+device axp # AMD EPYC integrated NIC
# PCI Ethernet NICs.
device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index 7cdded7007c8..9bfb0cfb15e6 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -328,6 +328,7 @@ device nfe # nVidia nForce MCP on-board Ethernet
device sfxge # Solarflare SFC9000 10Gb Ethernet
device vmx # VMware VMXNET3 Ethernet
device wpi # Intel 3945ABG wireless NICs.
+device axp # AMD EPYC integrated NIC
# IEEE 802.11 adapter firmware modules
diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC
index 2807284b01f8..fcaf54129c94 100644
--- a/sys/arm64/conf/GENERIC
+++ b/sys/arm64/conf/GENERIC
@@ -167,7 +167,7 @@ device mdio
device mii
device miibus # MII bus support
device awg # Allwinner EMAC Gigabit Ethernet
-device axgbe # AMD Opteron A1100 integrated NIC
+device axa # AMD Opteron A1100 integrated NIC
device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet
device neta # Marvell Armada 370/38x/XP/3700 NIC
device smc # SMSC LAN91C111
diff --git a/sys/arm64/conf/NOTES b/sys/arm64/conf/NOTES
index 2bf2337d610f..c966ab436221 100644
--- a/sys/arm64/conf/NOTES
+++ b/sys/arm64/conf/NOTES
@@ -76,7 +76,7 @@ options PCI_IOV # PCI SR-IOV support
# Ethernet NICs
device mdio
device awg # Allwinner EMAC Gigabit Ethernet
-device axgbe # AMD Opteron A1100 integrated NIC
+device axa # AMD Opteron A1100 integrated NIC
device neta # Marvell Armada 370/38x/XP/3700 NIC
device smc # SMSC LAN91C111
device vnic # Cavium ThunderX NIC
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 8815e9ad3355..79ad9dfd5a32 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -142,6 +142,16 @@ dev/agp/agp_amd64.c optional agp
dev/agp/agp_i810.c optional agp
dev/agp/agp_via.c optional agp
dev/amdgpio/amdgpio.c optional amdgpio
+dev/axgbe/if_axgbe_pci.c optional axp
+dev/axgbe/xgbe-desc.c optional axp
+dev/axgbe/xgbe-dev.c optional axp
+dev/axgbe/xgbe-drv.c optional axp
+dev/axgbe/xgbe-mdio.c optional axp
+dev/axgbe/xgbe-sysctl.c optional axp
+dev/axgbe/xgbe-txrx.c optional axp
+dev/axgbe/xgbe_osdep.c optional axp
+dev/axgbe/xgbe-i2c.c optional axp
+dev/axgbe/xgbe-phy-v2.c optional axp
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
dev/ice/if_ice_iflib.c optional ice pci \
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index ab35a133f85c..6a112168533e 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -249,11 +249,15 @@ dev/acpica/acpi_pxm.c optional acpi
dev/ahci/ahci_fsl_fdt.c optional SOC_NXP_LS ahci fdt
dev/ahci/ahci_generic.c optional ahci
dev/altera/dwc/if_dwc_socfpga.c optional fdt dwc_socfpga
-dev/axgbe/if_axgbe.c optional axgbe
-dev/axgbe/xgbe-desc.c optional axgbe
-dev/axgbe/xgbe-dev.c optional axgbe
-dev/axgbe/xgbe-drv.c optional axgbe
-dev/axgbe/xgbe-mdio.c optional axgbe
+dev/axgbe/if_axgbe.c optional axa
+dev/axgbe/xgbe-desc.c optional axa
+dev/axgbe/xgbe-dev.c optional axa
+dev/axgbe/xgbe-drv.c optional axa
+dev/axgbe/xgbe-mdio.c optional axa
+dev/axgbe/xgbe-sysctl.c optional axa
+dev/axgbe/xgbe-txrx.c optional axa
+dev/axgbe/xgbe_osdep.c optional axa
+dev/axgbe/xgbe-phy-v1.c optional axa
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/gpio/pl061.c optional pl061 gpio
dev/gpio/pl061_acpi.c optional pl061 gpio acpi
diff --git a/sys/dev/axgbe/if_axgbe.c b/sys/dev/axgbe/if_axgbe.c
index c76cd316a7ff..415c4016e3a9 100644
--- a/sys/dev/axgbe/if_axgbe.c
+++ b/sys/dev/axgbe/if_axgbe.c
@@ -1,6 +1,8 @@
/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
* Copyright (c) 2016,2017 SoftIron Inc.
- * All rights reserved.
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
*
* This software was developed by Andrew Turner under
* the sponsorship of SoftIron Inc.
@@ -114,6 +116,14 @@ static struct resource_spec mac_spec[] = {
{ -1, 0 }
};
+static struct xgbe_version_data xgbe_v1 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
+ .xpcs_access = XGBE_XPCS_ACCESS_V1,
+ .tx_max_fifo_size = 81920,
+ .rx_max_fifo_size = 81920,
+ .tx_tstamp_workaround = 1,
+};
+
MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
static void
@@ -135,14 +145,13 @@ axgbe_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
{
struct axgbe_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
- int error;
+ int error = 0;
switch(command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
error = EINVAL;
- else
- error = xgbe_change_mtu(ifp, ifr->ifr_mtu);
+ /* TODO - change it to iflib way */
break;
case SIOCSIFFLAGS:
error = 0;
@@ -307,6 +316,7 @@ axgbe_attach(device_t dev)
sc = device_get_softc(dev);
+ sc->prv.vdata = &xgbe_v1;
node = ofw_bus_get_node(dev);
if (OF_getencprop(node, "phy-handle", &phy_handle,
sizeof(phy_handle)) <= 0) {
@@ -391,6 +401,7 @@ axgbe_attach(device_t dev)
sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
ADVERTISED_1000baseKX_Full;
+
/*
* Read the needed properties from the phy node.
*/
@@ -466,13 +477,11 @@ axgbe_attach(device_t dev)
/* Check if the NIC is DMA coherent */
sc->prv.coherent = OF_hasprop(node, "dma-coherent");
if (sc->prv.coherent) {
- sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN;
- sc->prv.arcache = XGBE_DMA_OS_ARCACHE;
- sc->prv.awcache = XGBE_DMA_OS_AWCACHE;
+ sc->prv.arcr = XGBE_DMA_OS_ARCR;
+ sc->prv.awcr = XGBE_DMA_OS_AWCR;
} else {
- sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN;
- sc->prv.arcache = XGBE_DMA_SYS_ARCACHE;
- sc->prv.awcache = XGBE_DMA_SYS_AWCACHE;
+ sc->prv.arcr = XGBE_DMA_SYS_ARCR;
+ sc->prv.awcr = XGBE_DMA_SYS_AWCR;
}
/* Create the lock & workqueues */
@@ -486,6 +495,7 @@ axgbe_attach(device_t dev)
xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
xgbe_init_function_ptrs_desc(&sc->prv.desc_if);
+ sc->prv.vdata->init_function_ptrs_phy_impl(&sc->prv.phy_if);
/* Reset the hardware */
sc->prv.hw_if.exit(&sc->prv);
@@ -494,16 +504,14 @@ axgbe_attach(device_t dev)
xgbe_get_all_hw_features(&sc->prv);
/* Set default values */
- sc->prv.pblx8 = DMA_PBL_X8_ENABLE;
sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
- sc->prv.tx_pbl = DMA_PBL_16;
sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
- sc->prv.rx_pbl = DMA_PBL_16;
+ sc->prv.pbl = DMA_PBL_128;
sc->prv.pause_autoneg = 1;
sc->prv.tx_pause = 1;
sc->prv.rx_pause = 1;
@@ -528,7 +536,7 @@ axgbe_attach(device_t dev)
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = axgbe_ioctl;
- ifp->if_transmit = xgbe_xmit;
+ /* TODO - change it to iflib way */
ifp->if_qflush = axgbe_qflush;
ifp->if_get_counter = axgbe_get_counter;
@@ -550,11 +558,7 @@ axgbe_attach(device_t dev)
set_bit(XGBE_DOWN, &sc->prv.dev_state);
- if (xgbe_open(ifp) < 0) {
- device_printf(dev, "ndo_open failed\n");
- return (ENXIO);
- }
-
+ /* TODO - change it to iflib way */
return (0);
}
@@ -562,6 +566,7 @@ static device_method_t axgbe_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axgbe_probe),
DEVMETHOD(device_attach, axgbe_attach),
+
{ 0, 0 }
};
@@ -569,7 +574,8 @@ static devclass_t axgbe_devclass;
DEFINE_CLASS_0(axgbe, axgbe_driver, axgbe_methods,
sizeof(struct axgbe_softc));
-DRIVER_MODULE(axgbe, simplebus, axgbe_driver, axgbe_devclass, 0, 0);
+DRIVER_MODULE(axa, simplebus, axgbe_driver, axgbe_devclass, 0, 0);
+
static struct ofw_compat_data phy_compat_data[] = {
{ "amd,xgbe-phy-seattle-v1a", true },
@@ -605,6 +611,7 @@ static device_method_t axgbephy_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axgbephy_probe),
DEVMETHOD(device_attach, axgbephy_attach),
+
{ 0, 0 }
};
diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c
new file mode 100644
index 000000000000..4c4883e1cb4f
--- /dev/null
+++ b/sys/dev/axgbe/if_axgbe_pci.c
@@ -0,0 +1,2339 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Contact Information :
+ * Rajesh Kumar <rajesh1.kumar@amd.com>
+ * Shreyank Amartya <Shreyank.Amartya@amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#include "miibus_if.h"
+#include "ifdi_if.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
+
+extern struct if_txrx axgbe_txrx;
+
+/* Function prototypes */
+static void *axgbe_register(device_t);
+static int axgbe_if_attach_pre(if_ctx_t);
+static int axgbe_if_attach_post(if_ctx_t);
+static int axgbe_if_detach(if_ctx_t);
+static void axgbe_if_stop(if_ctx_t);
+static void axgbe_if_init(if_ctx_t);
+
+/* Queue related routines */
+static int axgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int axgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int axgbe_alloc_channels(if_ctx_t);
+static void axgbe_if_queues_free(if_ctx_t);
+static int axgbe_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
+static int axgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
+
+/* Interrupt related routines */
+static void axgbe_if_disable_intr(if_ctx_t);
+static void axgbe_if_enable_intr(if_ctx_t);
+static int axgbe_if_msix_intr_assign(if_ctx_t, int);
+static void xgbe_free_intr(struct xgbe_prv_data *, struct resource *, void *, int);
+
+/* Init and Iflib routines */
+static void axgbe_pci_init(struct xgbe_prv_data *);
+static void axgbe_pci_stop(if_ctx_t);
+static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *, struct xgbe_channel *);
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *);
+static int axgbe_if_mtu_set(if_ctx_t, uint32_t);
+static void axgbe_if_update_admin_status(if_ctx_t);
+static void axgbe_if_media_status(if_ctx_t, struct ifmediareq *);
+static int axgbe_if_media_change(if_ctx_t);
+static int axgbe_if_promisc_set(if_ctx_t, int);
+static uint64_t axgbe_if_get_counter(if_ctx_t, ift_counter);
+static void axgbe_if_vlan_register(if_ctx_t, uint16_t);
+static void axgbe_if_vlan_unregister(if_ctx_t, uint16_t);
+#if __FreeBSD_version >= 1300000
+static bool axgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
+#endif
+static void axgbe_set_counts(if_ctx_t);
+static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *);
+
+/* MII interface registered functions */
+static int axgbe_miibus_readreg(device_t, int, int);
+static int axgbe_miibus_writereg(device_t, int, int, int);
+static void axgbe_miibus_statchg(device_t);
+
+/* ISR routines */
+static int axgbe_dev_isr(void *);
+static void axgbe_ecc_isr(void *);
+static void axgbe_i2c_isr(void *);
+static void axgbe_an_isr(void *);
+static int axgbe_msix_que(void *);
+
+/* Timer routines */
+static void xgbe_service(void *, int);
+static void xgbe_service_timer(void *);
+static void xgbe_init_timers(struct xgbe_prv_data *);
+static void xgbe_stop_timers(struct xgbe_prv_data *);
+
+/* Dump routines */
+static void xgbe_dump_prop_registers(struct xgbe_prv_data *);
+
+/*
+ * Allocate only for MAC (BAR0) and PCS (BAR1) registers, and just point the
+ * MSI-X table bar (BAR5) to iflib. iflib will do the allocation for MSI-X
+ * table.
+ */
+static struct resource_spec axgbe_pci_mac_spec[] = {
+ { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* MAC regs */
+ { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, /* PCS regs */
+ { -1, 0 }
+};
+
+static pci_vendor_info_t axgbe_vendor_info_array[] =
+{
+ PVID(0x1022, 0x1458, "AMD 10 Gigabit Ethernet Driver"),
+ PVID(0x1022, 0x1459, "AMD 10 Gigabit Ethernet Driver"),
+ PVID_END
+};
+
+static struct xgbe_version_data xgbe_v2a = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
+};
+
+static struct xgbe_version_data xgbe_v2b = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
+};
+
+/* Device Interface */
+static device_method_t ax_methods[] = {
+ DEVMETHOD(device_register, axgbe_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, axgbe_miibus_readreg),
+ DEVMETHOD(miibus_writereg, axgbe_miibus_writereg),
+ DEVMETHOD(miibus_statchg, axgbe_miibus_statchg),
+
+ DEVMETHOD_END
+};
+
+static driver_t ax_driver = {
+ "ax", ax_methods, sizeof(struct axgbe_if_softc),
+};
+
+devclass_t ax_devclass;
+DRIVER_MODULE(axp, pci, ax_driver, ax_devclass, 0, 0);
+DRIVER_MODULE(miibus, ax, miibus_driver, miibus_devclass, 0, 0);
+IFLIB_PNP_INFO(pci, ax_driver, axgbe_vendor_info_array);
+
+MODULE_DEPEND(ax, pci, 1, 1, 1);
+MODULE_DEPEND(ax, ether, 1, 1, 1);
+MODULE_DEPEND(ax, iflib, 1, 1, 1);
+MODULE_DEPEND(ax, miibus, 1, 1, 1);
+
+/* Iflib Interface */
+static device_method_t axgbe_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, axgbe_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, axgbe_if_attach_post),
+ DEVMETHOD(ifdi_detach, axgbe_if_detach),
+ DEVMETHOD(ifdi_init, axgbe_if_init),
+ DEVMETHOD(ifdi_stop, axgbe_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, axgbe_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, axgbe_if_enable_intr),
+ DEVMETHOD(ifdi_intr_disable, axgbe_if_disable_intr),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, axgbe_if_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, axgbe_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queues_alloc, axgbe_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, axgbe_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, axgbe_if_queues_free),
+ DEVMETHOD(ifdi_update_admin_status, axgbe_if_update_admin_status),
+ DEVMETHOD(ifdi_mtu_set, axgbe_if_mtu_set),
+ DEVMETHOD(ifdi_media_status, axgbe_if_media_status),
+ DEVMETHOD(ifdi_media_change, axgbe_if_media_change),
+ DEVMETHOD(ifdi_promisc_set, axgbe_if_promisc_set),
+ DEVMETHOD(ifdi_get_counter, axgbe_if_get_counter),
+ DEVMETHOD(ifdi_vlan_register, axgbe_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, axgbe_if_vlan_unregister),
+#if __FreeBSD_version >= 1300000
+ DEVMETHOD(ifdi_needs_restart, axgbe_if_needs_restart),
+#endif
+ DEVMETHOD_END
+};
+
+static driver_t axgbe_if_driver = {
+ "axgbe_if", axgbe_if_methods, sizeof(struct axgbe_if_softc)
+};
+
+/* Iflib Shared Context */
+static struct if_shared_ctx axgbe_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_driver = &axgbe_if_driver,
+ .isc_q_align = PAGE_SIZE,
+ .isc_tx_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header),
+ .isc_tx_maxsegsize = PAGE_SIZE,
+ .isc_tso_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header),
+ .isc_tso_maxsegsize = PAGE_SIZE,
+ .isc_rx_maxsize = MJUM9BYTES,
+ .isc_rx_maxsegsize = MJUM9BYTES,
+ .isc_rx_nsegments = 1,
+ .isc_admin_intrcnt = 4,
+
+ .isc_vendor_info = axgbe_vendor_info_array,
+ .isc_driver_version = XGBE_DRV_VERSION,
+
+ .isc_nrxd_min = {XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MIN},
+ .isc_nrxd_default = {XGBE_RX_DESC_CNT_DEFAULT, XGBE_RX_DESC_CNT_DEFAULT},
+ .isc_nrxd_max = {XGBE_RX_DESC_CNT_MAX, XGBE_RX_DESC_CNT_MAX},
+ .isc_ntxd_min = {XGBE_TX_DESC_CNT_MIN},
+ .isc_ntxd_default = {XGBE_TX_DESC_CNT_DEFAULT},
+ .isc_ntxd_max = {XGBE_TX_DESC_CNT_MAX},
+
+ .isc_nfl = 2,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 2,
+ .isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_SCRATCH |
+ IFLIB_NEED_ZERO_CSUM | IFLIB_NEED_ETHER_PAD,
+};
+
+static void *
+axgbe_register(device_t dev)
+{
+ return (&axgbe_sctx_init);
+}
+
+/* MII Interface Functions */
+static int
+axgbe_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev));
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ int val;
+
+ axgbe_printf(3, "%s: phy %d reg %d\n", __func__, phy, reg);
+
+ val = xgbe_phy_mii_read(pdata, phy, reg);
+
+ axgbe_printf(2, "%s: val 0x%x\n", __func__, val);
+ return (val & 0xFFFF);
+}
+
+static int
+axgbe_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev));
+ struct xgbe_prv_data *pdata = &sc->pdata;
+
+ axgbe_printf(3, "%s: phy %d reg %d val 0x%x\n", __func__, phy, reg, val);
+
+ xgbe_phy_mii_write(pdata, phy, reg, val);
+
+ return(0);
+}
+
+static void
+axgbe_miibus_statchg(device_t dev)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev));
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct mii_data *mii = device_get_softc(pdata->axgbe_miibus);
+ struct ifnet *ifp = pdata->netdev;
+ int bmsr;
+
+ axgbe_printf(2, "%s: Link %d/%d\n", __func__, pdata->phy.link,
+ pdata->phy_link);
+
+ if (mii == NULL || ifp == NULL ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+ (IFM_ACTIVE | IFM_AVALID)) {
+
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ pdata->phy.link = 1;
+ break;
+ case IFM_1000_T:
+ case IFM_1000_SX:
+ case IFM_2500_SX:
+ pdata->phy.link = 1;
+ break;
+ default:
+ pdata->phy.link = 0;
+ break;
+ }
+ } else
+ pdata->phy_link = 0;
+
+ bmsr = axgbe_miibus_readreg(pdata->dev, pdata->mdio_addr, MII_BMSR);
+ if (bmsr & BMSR_ANEG) {
+
+ axgbe_printf(2, "%s: Autoneg Done\n", __func__);
+
+ /* Raise AN Interrupt */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+ XGBE_AN_CL73_INT_MASK);
+ }
+}
+
+static int
+axgbe_if_attach_pre(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc;
+ struct xgbe_prv_data *pdata;
+ struct resource *mac_res[2];
+ if_softc_ctx_t scctx;
+ if_shared_ctx_t sctx;
+ device_t dev;
+ unsigned int ma_lo, ma_hi;
+ unsigned int reg;
+
+ sc = iflib_get_softc(ctx);
+ sc->pdata.dev = dev = iflib_get_dev(ctx);
+ sc->sctx = sctx = iflib_get_sctx(ctx);
+ sc->scctx = scctx = iflib_get_softc_ctx(ctx);
+ sc->media = iflib_get_media(ctx);
+ sc->ctx = ctx;
+ sc->link_status = LINK_STATE_DOWN;
+ pdata = &sc->pdata;
+ pdata->netdev = iflib_get_ifp(ctx);
+
+ spin_lock_init(&pdata->xpcs_lock);
+
+ /* Initialize locks */
+ mtx_init(&pdata->rss_mutex, "xgbe rss mutex lock", NULL, MTX_DEF);
+ mtx_init(&pdata->mdio_mutex, "xgbe MDIO mutex lock", NULL, MTX_SPIN);
+
+ /* Allocate VLAN bitmap */
+ pdata->active_vlans = bit_alloc(VLAN_NVID, M_AXGBE, M_WAITOK|M_ZERO);
+ pdata->num_active_vlans = 0;
+
+ /* Get the version data */
+ DBGPR("%s: Device ID: 0x%x\n", __func__, pci_get_device(dev));
+ if (pci_get_device(dev) == 0x1458)
+ sc->pdata.vdata = &xgbe_v2a;
+ else if (pci_get_device(dev) == 0x1459)
+ sc->pdata.vdata = &xgbe_v2b;
+
+ /* PCI setup */
+ if (bus_alloc_resources(dev, axgbe_pci_mac_spec, mac_res))
+ return (ENXIO);
+
+ sc->pdata.xgmac_res = mac_res[0];
+ sc->pdata.xpcs_res = mac_res[1];
+
+ /* Set the PCS indirect addressing definition registers*/
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ DBGPR("xpcs window def : %#010x\n",
+ pdata->xpcs_window_def_reg);
+ DBGPR("xpcs window sel : %#010x\n",
+ pdata->xpcs_window_sel_reg);
+ DBGPR("xpcs window : %#010x\n",
+ pdata->xpcs_window);
+ DBGPR("xpcs window size : %#010x\n",
+ pdata->xpcs_window_size);
+ DBGPR("xpcs window mask : %#010x\n",
+ pdata->xpcs_window_mask);
+
+ /* Enable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr[0] = ma_lo & 0xff;
+ pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
+ pdata->mac_addr[2] = (ma_lo >>16) & 0xff;
+ pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
+ pdata->mac_addr[4] = ma_hi & 0xff;
+ pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
+ if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID)) {
+ axgbe_error("Invalid mac address\n");
+ return (EINVAL);
+ }
+ iflib_set_mac(ctx, pdata->mac_addr);
+
+ /* Clock settings */
+ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->arcr = XGBE_DMA_PCI_ARCR;
+ pdata->awcr = XGBE_DMA_PCI_AWCR;
+ pdata->awarcr = XGBE_DMA_PCI_AWARCR;
+
+ /* Read the port property registers */
+ pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
+ pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
+ pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
+ DBGPR("port property 0 = %#010x\n", pdata->pp0);
+ DBGPR("port property 1 = %#010x\n", pdata->pp1);
+ DBGPR("port property 2 = %#010x\n", pdata->pp2);
+ DBGPR("port property 3 = %#010x\n", pdata->pp3);
+ DBGPR("port property 4 = %#010x\n", pdata->pp4);
+
+ /* Set the maximum channels and queues */
+ pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_RX_QUEUES);
+ DBGPR("max tx/rx channel count = %u/%u\n",
+ pdata->tx_max_channel_count, pdata->rx_max_channel_count);
+ DBGPR("max tx/rx hw queue count = %u/%u\n",
+ pdata->tx_max_q_count, pdata->rx_max_q_count);
+
+ axgbe_set_counts(ctx);
+
+ /* Set the maximum fifo amounts */
+ pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
+ TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
+ RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ DBGPR("max tx/rx max fifo size = %u/%u\n",
+ pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
+
+ /* Initialize IFLIB if_softc_ctx_t */
+ axgbe_init_iflib_softc_ctx(sc);
+
+ /* Alloc channels */
+ if (axgbe_alloc_channels(ctx)) {
+ axgbe_error("Unable to allocate channel memory\n");
+ return (ENOMEM);
+ }
+
+ TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata);
+
+ /* create the workqueue */
+ pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
+ taskqueue_thread_enqueue, &pdata->dev_workqueue);
+ taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET,
+ "axgbe dev taskq");
+
+ /* Init timers */
+ xgbe_init_timers(pdata);
+
+ return (0);
+} /* axgbe_if_attach_pre */
+
+static void
+xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_phy(&pdata->phy_if);
+ xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+}
+
+static void
+axgbe_set_counts(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ cpuset_t lcpus;
+ int cpu_count, err;
+ size_t len;
+
+ /* Set all function pointers */
+ xgbe_init_all_fptrs(pdata);
+
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+ /*
+ * Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+
+ /* Get cpu count from sysctl */
+ len = sizeof(cpu_count);
+ err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL,
+ 0, NULL, 0);
+ if (err) {
+ axgbe_error("Unable to fetch number of cpus\n");
+ cpu_count = 1;
+ }
+
+ if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) {
+ axgbe_error("Unable to fetch CPU list\n");
+ /* TODO - handle CPU_COPY(&all_cpus, &lcpus); */
+ }
+
+ DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus));
+
+ pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_ring_count = min(pdata->tx_ring_count,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_q_count);
+
+ pdata->tx_q_count = pdata->tx_ring_count;
+
+ pdata->rx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_ring_count = min(pdata->rx_ring_count,
+ pdata->rx_max_channel_count);
+
+ pdata->rx_q_count = min(pdata->hw_feat.rx_q_cnt, pdata->rx_max_q_count);
+
+ DBGPR("TX/RX max channel count = %u/%u\n",
+ pdata->tx_max_channel_count, pdata->rx_max_channel_count);
+ DBGPR("TX/RX max queue count = %u/%u\n",
+ pdata->tx_max_q_count, pdata->rx_max_q_count);
+ DBGPR("TX/RX DMA ring count = %u/%u\n",
+ pdata->tx_ring_count, pdata->rx_ring_count);
+ DBGPR("TX/RX hardware queue count = %u/%u\n",
+ pdata->tx_q_count, pdata->rx_q_count);
+} /* axgbe_set_counts */
+
+static void
+axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *sc)
+{
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ if_softc_ctx_t scctx = sc->scctx;
+ if_shared_ctx_t sctx = sc->sctx;
+ int i;
+
+ scctx->isc_nrxqsets = pdata->rx_q_count;
+ scctx->isc_ntxqsets = pdata->tx_q_count;
+ scctx->isc_msix_bar = pci_msix_table_bar(pdata->dev);
+ scctx->isc_tx_nsegments = 32;
+
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ scctx->isc_txqsizes[i] =
+ roundup2(scctx->isc_ntxd[i] * sizeof(struct xgbe_ring_desc),
+ 128);
+ scctx->isc_txd_size[i] = sizeof(struct xgbe_ring_desc);
+ }
+
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ scctx->isc_rxqsizes[i] =
+ roundup2(scctx->isc_nrxd[i] * sizeof(struct xgbe_ring_desc),
+ 128);
+ scctx->isc_rxd_size[i] = sizeof(struct xgbe_ring_desc);
+ }
+
+ scctx->isc_tx_tso_segments_max = 32;
+ scctx->isc_tx_tso_size_max = XGBE_TSO_MAX_SIZE;
+ scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
+
+ /*
+ * Set capabilities
+ * 1) IFLIB automatically adds IFCAP_HWSTATS, so need to set explicitly
+ * 2) isc_tx_csum_flags is mandatory if IFCAP_TXCSUM (included in
+ * IFCAP_HWCSUM) is set
+ */
+ scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP |
+ CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 |
+ CSUM_TSO);
+ scctx->isc_capenable = (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
+ IFCAP_JUMBO_MTU |
+ IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER |
+ IFCAP_VLAN_HWCSUM |
+ IFCAP_TSO | IFCAP_VLAN_HWTSO);
+ scctx->isc_capabilities = scctx->isc_capenable;
+
+ /*
+ * Set rss_table_size alone when adding RSS support. rss_table_mask
+ * will be set by IFLIB based on rss_table_size
+ */
+ scctx->isc_rss_table_size = XGBE_RSS_MAX_TABLE_SIZE;
+
+ scctx->isc_ntxqsets_max = XGBE_MAX_QUEUES;
+ scctx->isc_nrxqsets_max = XGBE_MAX_QUEUES;
+
+ scctx->isc_txrx = &axgbe_txrx;
+}
+
+static int
+axgbe_alloc_channels(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel;
+ int i, j, count;
+
+ DBGPR("%s: txqs %d rxqs %d\n", __func__, pdata->tx_ring_count,
+ pdata->rx_ring_count);
+
+ /* Iflibe sets based on isc_ntxqsets/nrxqsets */
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ /* Allocate channel memory */
+ for (i = 0; i < count ; i++) {
+ channel = (struct xgbe_channel*)malloc(sizeof(struct xgbe_channel),
+ M_AXGBE, M_NOWAIT | M_ZERO);
+
+ if (channel == NULL) {
+ for (j = 0; j < i; j++) {
+ free(pdata->channel[j], M_AXGBE);
+ pdata->channel[j] = NULL;
+ }
+ return (ENOMEM);
+ }
+
+ pdata->channel[i] = channel;
+ }
+
+ pdata->total_channel_count = count;
+ DBGPR("Channel count set to: %u\n", pdata->total_channel_count);
+
+ for (i = 0; i < count; i++) {
+
+ channel = pdata->channel[i];
+ snprintf(channel->name, sizeof(channel->name), "channel-%d",i);
+
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_tag = rman_get_bustag(pdata->xgmac_res);
+ bus_space_subregion(channel->dma_tag,
+ rman_get_bushandle(pdata->xgmac_res),
+ DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC,
+ &channel->dma_handle);
+ channel->tx_ring = NULL;
+ channel->rx_ring = NULL;
+ }
+
+ return (0);
+} /* axgbe_alloc_channels */
+
+static void
+xgbe_service(void *ctx, int pending)
+{
+ struct xgbe_prv_data *pdata = ctx;
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc *)pdata;
+ bool prev_state = false;
+
+ /* Get previous link status */
+ prev_state = pdata->phy.link;
+
+ pdata->phy_if.phy_status(pdata);
+
+ if (prev_state != pdata->phy.link) {
+ pdata->phy_link = pdata->phy.link;
+ axgbe_if_update_admin_status(sc->ctx);
+ }
+
+ callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata);
+}
+
+static void
+xgbe_service_timer(void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+
+ taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
+}
+
+static void
+xgbe_init_timers(struct xgbe_prv_data *pdata)
+{
+ callout_init(&pdata->service_timer, 1*hz);
+}
+
+static void
+xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+ callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata);
+}
+
+static void
+xgbe_stop_timers(struct xgbe_prv_data *pdata)
+{
+ callout_drain(&pdata->service_timer);
+ callout_stop(&pdata->service_timer);
+}
+
+static void
+xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ axgbe_printf(1, "\n************* PHY Reg dump *********************\n");
+
+ axgbe_printf(1, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ axgbe_printf(1, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ axgbe_printf(1, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ axgbe_printf(1, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+ axgbe_printf(1, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ axgbe_printf(1, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ axgbe_printf(1, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ axgbe_printf(1, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ axgbe_printf(1, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ axgbe_printf(1, "Auto-Neg Completion Reg (%#06x) = %#06x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ axgbe_printf(1, "\n************************************************\n");
+}
+
+static void
+xgbe_dump_prop_registers(struct xgbe_prv_data *pdata)
+{
+ int i;
+
+ axgbe_printf(1, "\n************* PROP Reg dump ********************\n");
+
+ for (i = 0 ; i < 38 ; i++) {
+ axgbe_printf(1, "PROP Offset 0x%08x = %08x\n",
+ (XP_PROP_0 + (i * 4)), XP_IOREAD(pdata,
+ (XP_PROP_0 + (i * 4))));
+ }
+}
+
+static void
+xgbe_dump_dma_registers(struct xgbe_prv_data *pdata, int ch)
+{
+ struct xgbe_channel *channel;
+ int i;
+
+ axgbe_printf(1, "\n************* DMA Reg dump *********************\n");
+
+ axgbe_printf(1, "DMA MR Reg (%08x) = %08x\n", DMA_MR,
+ XGMAC_IOREAD(pdata, DMA_MR));
+ axgbe_printf(1, "DMA SBMR Reg (%08x) = %08x\n", DMA_SBMR,
+ XGMAC_IOREAD(pdata, DMA_SBMR));
+ axgbe_printf(1, "DMA ISR Reg (%08x) = %08x\n", DMA_ISR,
+ XGMAC_IOREAD(pdata, DMA_ISR));
+ axgbe_printf(1, "DMA AXIARCR Reg (%08x) = %08x\n", DMA_AXIARCR,
+ XGMAC_IOREAD(pdata, DMA_AXIARCR));
+ axgbe_printf(1, "DMA AXIAWCR Reg (%08x) = %08x\n", DMA_AXIAWCR,
+ XGMAC_IOREAD(pdata, DMA_AXIAWCR));
+ axgbe_printf(1, "DMA AXIAWARCR Reg (%08x) = %08x\n", DMA_AXIAWARCR,
+ XGMAC_IOREAD(pdata, DMA_AXIAWARCR));
+ axgbe_printf(1, "DMA DSR0 Reg (%08x) = %08x\n", DMA_DSR0,
+ XGMAC_IOREAD(pdata, DMA_DSR0));
+ axgbe_printf(1, "DMA DSR1 Reg (%08x) = %08x\n", DMA_DSR1,
+ XGMAC_IOREAD(pdata, DMA_DSR1));
+ axgbe_printf(1, "DMA DSR2 Reg (%08x) = %08x\n", DMA_DSR2,
+ XGMAC_IOREAD(pdata, DMA_DSR2));
+ axgbe_printf(1, "DMA DSR3 Reg (%08x) = %08x\n", DMA_DSR3,
+ XGMAC_IOREAD(pdata, DMA_DSR3));
+ axgbe_printf(1, "DMA DSR4 Reg (%08x) = %08x\n", DMA_DSR4,
+ XGMAC_IOREAD(pdata, DMA_DSR4));
+ axgbe_printf(1, "DMA TXEDMACR Reg (%08x) = %08x\n", DMA_TXEDMACR,
+ XGMAC_IOREAD(pdata, DMA_TXEDMACR));
+ axgbe_printf(1, "DMA RXEDMACR Reg (%08x) = %08x\n", DMA_RXEDMACR,
+ XGMAC_IOREAD(pdata, DMA_RXEDMACR));
+
+ for (i = 0 ; i < 8 ; i++ ) {
+
+ if (ch >= 0) {
+ if (i != ch)
+ continue;
+ }
+
+ channel = pdata->channel[i];
+
+ axgbe_printf(1, "\n************* DMA CH %d dump ****************\n", i);
+
+ axgbe_printf(1, "DMA_CH_CR Reg (%08x) = %08x\n",
+ DMA_CH_CR, XGMAC_DMA_IOREAD(channel, DMA_CH_CR));
+ axgbe_printf(1, "DMA_CH_TCR Reg (%08x) = %08x\n",
+ DMA_CH_TCR, XGMAC_DMA_IOREAD(channel, DMA_CH_TCR));
+ axgbe_printf(1, "DMA_CH_RCR Reg (%08x) = %08x\n",
+ DMA_CH_RCR, XGMAC_DMA_IOREAD(channel, DMA_CH_RCR));
+ axgbe_printf(1, "DMA_CH_TDLR_HI Reg (%08x) = %08x\n",
+ DMA_CH_TDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_HI));
+ axgbe_printf(1, "DMA_CH_TDLR_LO Reg (%08x) = %08x\n",
+ DMA_CH_TDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_LO));
+ axgbe_printf(1, "DMA_CH_RDLR_HI Reg (%08x) = %08x\n",
+ DMA_CH_RDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_HI));
+ axgbe_printf(1, "DMA_CH_RDLR_LO Reg (%08x) = %08x\n",
+ DMA_CH_RDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_LO));
+ axgbe_printf(1, "DMA_CH_TDTR_LO Reg (%08x) = %08x\n",
+ DMA_CH_TDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO));
+ axgbe_printf(1, "DMA_CH_RDTR_LO Reg (%08x) = %08x\n",
+ DMA_CH_RDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTR_LO));
+ axgbe_printf(1, "DMA_CH_TDRLR Reg (%08x) = %08x\n",
+ DMA_CH_TDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_TDRLR));
+ axgbe_printf(1, "DMA_CH_RDRLR Reg (%08x) = %08x\n",
+ DMA_CH_RDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_RDRLR));
+ axgbe_printf(1, "DMA_CH_IER Reg (%08x) = %08x\n",
+ DMA_CH_IER, XGMAC_DMA_IOREAD(channel, DMA_CH_IER));
+ axgbe_printf(1, "DMA_CH_RIWT Reg (%08x) = %08x\n",
+ DMA_CH_RIWT, XGMAC_DMA_IOREAD(channel, DMA_CH_RIWT));
+ axgbe_printf(1, "DMA_CH_CATDR_LO Reg (%08x) = %08x\n",
+ DMA_CH_CATDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATDR_LO));
+ axgbe_printf(1, "DMA_CH_CARDR_LO Reg (%08x) = %08x\n",
+ DMA_CH_CARDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARDR_LO));
+ axgbe_printf(1, "DMA_CH_CATBR_HI Reg (%08x) = %08x\n",
+ DMA_CH_CATBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_HI));
+ axgbe_printf(1, "DMA_CH_CATBR_LO Reg (%08x) = %08x\n",
+ DMA_CH_CATBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_LO));
+ axgbe_printf(1, "DMA_CH_CARBR_HI Reg (%08x) = %08x\n",
+ DMA_CH_CARBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_HI));
+ axgbe_printf(1, "DMA_CH_CARBR_LO Reg (%08x) = %08x\n",
+ DMA_CH_CARBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_LO));
+ axgbe_printf(1, "DMA_CH_SR Reg (%08x) = %08x\n",
+ DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
+ axgbe_printf(1, "DMA_CH_DSR Reg (%08x) = %08x\n",
+ DMA_CH_DSR, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR));
+ axgbe_printf(1, "DMA_CH_DCFL Reg (%08x) = %08x\n",
+ DMA_CH_DCFL, XGMAC_DMA_IOREAD(channel, DMA_CH_DCFL));
+ axgbe_printf(1, "DMA_CH_MFC Reg (%08x) = %08x\n",
+ DMA_CH_MFC, XGMAC_DMA_IOREAD(channel, DMA_CH_MFC));
+ axgbe_printf(1, "DMA_CH_TDTRO Reg (%08x) = %08x\n",
+ DMA_CH_TDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTRO));
+ axgbe_printf(1, "DMA_CH_RDTRO Reg (%08x) = %08x\n",
+ DMA_CH_RDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTRO));
+ axgbe_printf(1, "DMA_CH_TDWRO Reg (%08x) = %08x\n",
+ DMA_CH_TDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDWRO));
+ axgbe_printf(1, "DMA_CH_RDWRO Reg (%08x) = %08x\n",
+ DMA_CH_RDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDWRO));
+ }
+}
+
+static void
+xgbe_dump_mtl_registers(struct xgbe_prv_data *pdata)
+{
+ int i;
+
+ axgbe_printf(1, "\n************* MTL Reg dump *********************\n");
+
+ axgbe_printf(1, "MTL OMR Reg (%08x) = %08x\n", MTL_OMR,
+ XGMAC_IOREAD(pdata, MTL_OMR));
+ axgbe_printf(1, "MTL FDCR Reg (%08x) = %08x\n", MTL_FDCR,
+ XGMAC_IOREAD(pdata, MTL_FDCR));
+ axgbe_printf(1, "MTL FDSR Reg (%08x) = %08x\n", MTL_FDSR,
+ XGMAC_IOREAD(pdata, MTL_FDSR));
+ axgbe_printf(1, "MTL FDDR Reg (%08x) = %08x\n", MTL_FDDR,
+ XGMAC_IOREAD(pdata, MTL_FDDR));
+ axgbe_printf(1, "MTL ISR Reg (%08x) = %08x\n", MTL_ISR,
+ XGMAC_IOREAD(pdata, MTL_ISR));
+ axgbe_printf(1, "MTL RQDCM0R Reg (%08x) = %08x\n", MTL_RQDCM0R,
+ XGMAC_IOREAD(pdata, MTL_RQDCM0R));
+ axgbe_printf(1, "MTL RQDCM1R Reg (%08x) = %08x\n", MTL_RQDCM1R,
+ XGMAC_IOREAD(pdata, MTL_RQDCM1R));
+ axgbe_printf(1, "MTL RQDCM2R Reg (%08x) = %08x\n", MTL_RQDCM2R,
+ XGMAC_IOREAD(pdata, MTL_RQDCM2R));
+ axgbe_printf(1, "MTL TCPM0R Reg (%08x) = %08x\n", MTL_TCPM0R,
+ XGMAC_IOREAD(pdata, MTL_TCPM0R));
+ axgbe_printf(1, "MTL TCPM1R Reg (%08x) = %08x\n", MTL_TCPM1R,
+ XGMAC_IOREAD(pdata, MTL_TCPM1R));
+
+ for (i = 0 ; i < 8 ; i++ ) {
+
+ axgbe_printf(1, "\n************* MTL CH %d dump ****************\n", i);
+
+ axgbe_printf(1, "MTL_Q_TQOMR Reg (%08x) = %08x\n",
+ MTL_Q_TQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR));
+ axgbe_printf(1, "MTL_Q_TQUR Reg (%08x) = %08x\n",
+ MTL_Q_TQUR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQUR));
+ axgbe_printf(1, "MTL_Q_TQDR Reg (%08x) = %08x\n",
+ MTL_Q_TQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQDR));
+ axgbe_printf(1, "MTL_Q_TC0ETSCR Reg (%08x) = %08x\n",
+ MTL_Q_TC0ETSCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSCR));
+ axgbe_printf(1, "MTL_Q_TC0ETSSR Reg (%08x) = %08x\n",
+ MTL_Q_TC0ETSSR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSSR));
+ axgbe_printf(1, "MTL_Q_TC0QWR Reg (%08x) = %08x\n",
+ MTL_Q_TC0QWR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0QWR));
+
+ axgbe_printf(1, "MTL_Q_RQOMR Reg (%08x) = %08x\n",
+ MTL_Q_RQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR));
+ axgbe_printf(1, "MTL_Q_RQMPOCR Reg (%08x) = %08x\n",
+ MTL_Q_RQMPOCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQMPOCR));
+ axgbe_printf(1, "MTL_Q_RQDR Reg (%08x) = %08x\n",
+ MTL_Q_RQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQDR));
+ axgbe_printf(1, "MTL_Q_RQCR Reg (%08x) = %08x\n",
+ MTL_Q_RQCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQCR));
+ axgbe_printf(1, "MTL_Q_RQFCR Reg (%08x) = %08x\n",
+ MTL_Q_RQFCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR));
+ axgbe_printf(1, "MTL_Q_IER Reg (%08x) = %08x\n",
+ MTL_Q_IER, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_IER));
+ axgbe_printf(1, "MTL_Q_ISR Reg (%08x) = %08x\n",
+ MTL_Q_ISR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR));
+ }
+}
+
+static void
+xgbe_dump_mac_registers(struct xgbe_prv_data *pdata)
+{
+ axgbe_printf(1, "\n************* MAC Reg dump **********************\n");
+
+ axgbe_printf(1, "MAC TCR Reg (%08x) = %08x\n", MAC_TCR,
+ XGMAC_IOREAD(pdata, MAC_TCR));
+ axgbe_printf(1, "MAC RCR Reg (%08x) = %08x\n", MAC_RCR,
+ XGMAC_IOREAD(pdata, MAC_RCR));
+ axgbe_printf(1, "MAC PFR Reg (%08x) = %08x\n", MAC_PFR,
+ XGMAC_IOREAD(pdata, MAC_PFR));
+ axgbe_printf(1, "MAC WTR Reg (%08x) = %08x\n", MAC_WTR,
+ XGMAC_IOREAD(pdata, MAC_WTR));
+ axgbe_printf(1, "MAC HTR0 Reg (%08x) = %08x\n", MAC_HTR0,
+ XGMAC_IOREAD(pdata, MAC_HTR0));
+ axgbe_printf(1, "MAC HTR1 Reg (%08x) = %08x\n", MAC_HTR1,
+ XGMAC_IOREAD(pdata, MAC_HTR1));
+ axgbe_printf(1, "MAC HTR2 Reg (%08x) = %08x\n", MAC_HTR2,
+ XGMAC_IOREAD(pdata, MAC_HTR2));
+ axgbe_printf(1, "MAC HTR3 Reg (%08x) = %08x\n", MAC_HTR3,
+ XGMAC_IOREAD(pdata, MAC_HTR3));
+ axgbe_printf(1, "MAC HTR4 Reg (%08x) = %08x\n", MAC_HTR4,
+ XGMAC_IOREAD(pdata, MAC_HTR4));
+ axgbe_printf(1, "MAC HTR5 Reg (%08x) = %08x\n", MAC_HTR5,
+ XGMAC_IOREAD(pdata, MAC_HTR5));
+ axgbe_printf(1, "MAC HTR6 Reg (%08x) = %08x\n", MAC_HTR6,
+ XGMAC_IOREAD(pdata, MAC_HTR6));
+ axgbe_printf(1, "MAC HTR7 Reg (%08x) = %08x\n", MAC_HTR7,
+ XGMAC_IOREAD(pdata, MAC_HTR7));
+ axgbe_printf(1, "MAC VLANTR Reg (%08x) = %08x\n", MAC_VLANTR,
+ XGMAC_IOREAD(pdata, MAC_VLANTR));
+ axgbe_printf(1, "MAC VLANHTR Reg (%08x) = %08x\n", MAC_VLANHTR,
+ XGMAC_IOREAD(pdata, MAC_VLANHTR));
+ axgbe_printf(1, "MAC VLANIR Reg (%08x) = %08x\n", MAC_VLANIR,
+ XGMAC_IOREAD(pdata, MAC_VLANIR));
+ axgbe_printf(1, "MAC IVLANIR Reg (%08x) = %08x\n", MAC_IVLANIR,
+ XGMAC_IOREAD(pdata, MAC_IVLANIR));
+ axgbe_printf(1, "MAC RETMR Reg (%08x) = %08x\n", MAC_RETMR,
+ XGMAC_IOREAD(pdata, MAC_RETMR));
+ axgbe_printf(1, "MAC Q0TFCR Reg (%08x) = %08x\n", MAC_Q0TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q0TFCR));
+ axgbe_printf(1, "MAC Q1TFCR Reg (%08x) = %08x\n", MAC_Q1TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q1TFCR));
+ axgbe_printf(1, "MAC Q2TFCR Reg (%08x) = %08x\n", MAC_Q2TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q2TFCR));
+ axgbe_printf(1, "MAC Q3TFCR Reg (%08x) = %08x\n", MAC_Q3TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q3TFCR));
+ axgbe_printf(1, "MAC Q4TFCR Reg (%08x) = %08x\n", MAC_Q4TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q4TFCR));
+ axgbe_printf(1, "MAC Q5TFCR Reg (%08x) = %08x\n", MAC_Q5TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q5TFCR));
+ axgbe_printf(1, "MAC Q6TFCR Reg (%08x) = %08x\n", MAC_Q6TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q6TFCR));
+ axgbe_printf(1, "MAC Q7TFCR Reg (%08x) = %08x\n", MAC_Q7TFCR,
+ XGMAC_IOREAD(pdata, MAC_Q7TFCR));
+ axgbe_printf(1, "MAC RFCR Reg (%08x) = %08x\n", MAC_RFCR,
+ XGMAC_IOREAD(pdata, MAC_RFCR));
+ axgbe_printf(1, "MAC RQC0R Reg (%08x) = %08x\n", MAC_RQC0R,
+ XGMAC_IOREAD(pdata, MAC_RQC0R));
+ axgbe_printf(1, "MAC RQC1R Reg (%08x) = %08x\n", MAC_RQC1R,
+ XGMAC_IOREAD(pdata, MAC_RQC1R));
+ axgbe_printf(1, "MAC RQC2R Reg (%08x) = %08x\n", MAC_RQC2R,
+ XGMAC_IOREAD(pdata, MAC_RQC2R));
+ axgbe_printf(1, "MAC RQC3R Reg (%08x) = %08x\n", MAC_RQC3R,
+ XGMAC_IOREAD(pdata, MAC_RQC3R));
+ axgbe_printf(1, "MAC ISR Reg (%08x) = %08x\n", MAC_ISR,
+ XGMAC_IOREAD(pdata, MAC_ISR));
+ axgbe_printf(1, "MAC IER Reg (%08x) = %08x\n", MAC_IER,
+ XGMAC_IOREAD(pdata, MAC_IER));
+ axgbe_printf(1, "MAC RTSR Reg (%08x) = %08x\n", MAC_RTSR,
+ XGMAC_IOREAD(pdata, MAC_RTSR));
+ axgbe_printf(1, "MAC PMTCSR Reg (%08x) = %08x\n", MAC_PMTCSR,
+ XGMAC_IOREAD(pdata, MAC_PMTCSR));
+ axgbe_printf(1, "MAC RWKPFR Reg (%08x) = %08x\n", MAC_RWKPFR,
+ XGMAC_IOREAD(pdata, MAC_RWKPFR));
+ axgbe_printf(1, "MAC LPICSR Reg (%08x) = %08x\n", MAC_LPICSR,
+ XGMAC_IOREAD(pdata, MAC_LPICSR));
+ axgbe_printf(1, "MAC LPITCR Reg (%08x) = %08x\n", MAC_LPITCR,
+ XGMAC_IOREAD(pdata, MAC_LPITCR));
+ axgbe_printf(1, "MAC TIR Reg (%08x) = %08x\n", MAC_TIR,
+ XGMAC_IOREAD(pdata, MAC_TIR));
+ axgbe_printf(1, "MAC VR Reg (%08x) = %08x\n", MAC_VR,
+ XGMAC_IOREAD(pdata, MAC_VR));
+ axgbe_printf(1, "MAC DR Reg (%08x) = %08x\n", MAC_DR,
+ XGMAC_IOREAD(pdata, MAC_DR));
+ axgbe_printf(1, "MAC HWF0R Reg (%08x) = %08x\n", MAC_HWF0R,
+ XGMAC_IOREAD(pdata, MAC_HWF0R));
+ axgbe_printf(1, "MAC HWF1R Reg (%08x) = %08x\n", MAC_HWF1R,
+ XGMAC_IOREAD(pdata, MAC_HWF1R));
+ axgbe_printf(1, "MAC HWF2R Reg (%08x) = %08x\n", MAC_HWF2R,
+ XGMAC_IOREAD(pdata, MAC_HWF2R));
+ axgbe_printf(1, "MAC MDIOSCAR Reg (%08x) = %08x\n", MAC_MDIOSCAR,
+ XGMAC_IOREAD(pdata, MAC_MDIOSCAR));
+ axgbe_printf(1, "MAC MDIOSCCDR Reg (%08x) = %08x\n", MAC_MDIOSCCDR,
+ XGMAC_IOREAD(pdata, MAC_MDIOSCCDR));
+ axgbe_printf(1, "MAC MDIOISR Reg (%08x) = %08x\n", MAC_MDIOISR,
+ XGMAC_IOREAD(pdata, MAC_MDIOISR));
+ axgbe_printf(1, "MAC MDIOIER Reg (%08x) = %08x\n", MAC_MDIOIER,
+ XGMAC_IOREAD(pdata, MAC_MDIOIER));
+ axgbe_printf(1, "MAC MDIOCL22R Reg (%08x) = %08x\n", MAC_MDIOCL22R,
+ XGMAC_IOREAD(pdata, MAC_MDIOCL22R));
+ axgbe_printf(1, "MAC GPIOCR Reg (%08x) = %08x\n", MAC_GPIOCR,
+ XGMAC_IOREAD(pdata, MAC_GPIOCR));
+ axgbe_printf(1, "MAC GPIOSR Reg (%08x) = %08x\n", MAC_GPIOSR,
+ XGMAC_IOREAD(pdata, MAC_GPIOSR));
+ axgbe_printf(1, "MAC MACA0HR Reg (%08x) = %08x\n", MAC_MACA0HR,
+ XGMAC_IOREAD(pdata, MAC_MACA0HR));
+ axgbe_printf(1, "MAC MACA0LR Reg (%08x) = %08x\n", MAC_TCR,
+ XGMAC_IOREAD(pdata, MAC_MACA0LR));
+ axgbe_printf(1, "MAC MACA1HR Reg (%08x) = %08x\n", MAC_MACA1HR,
+ XGMAC_IOREAD(pdata, MAC_MACA1HR));
+ axgbe_printf(1, "MAC MACA1LR Reg (%08x) = %08x\n", MAC_MACA1LR,
+ XGMAC_IOREAD(pdata, MAC_MACA1LR));
+ axgbe_printf(1, "MAC RSSCR Reg (%08x) = %08x\n", MAC_RSSCR,
+ XGMAC_IOREAD(pdata, MAC_RSSCR));
+ axgbe_printf(1, "MAC RSSDR Reg (%08x) = %08x\n", MAC_RSSDR,
+ XGMAC_IOREAD(pdata, MAC_RSSDR));
+ axgbe_printf(1, "MAC RSSAR Reg (%08x) = %08x\n", MAC_RSSAR,
+ XGMAC_IOREAD(pdata, MAC_RSSAR));
+ axgbe_printf(1, "MAC TSCR Reg (%08x) = %08x\n", MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR));
+ axgbe_printf(1, "MAC SSIR Reg (%08x) = %08x\n", MAC_SSIR,
+ XGMAC_IOREAD(pdata, MAC_SSIR));
+ axgbe_printf(1, "MAC STSR Reg (%08x) = %08x\n", MAC_STSR,
+ XGMAC_IOREAD(pdata, MAC_STSR));
+ axgbe_printf(1, "MAC STNR Reg (%08x) = %08x\n", MAC_STNR,
+ XGMAC_IOREAD(pdata, MAC_STNR));
+ axgbe_printf(1, "MAC STSUR Reg (%08x) = %08x\n", MAC_STSUR,
+ XGMAC_IOREAD(pdata, MAC_STSUR));
+ axgbe_printf(1, "MAC STNUR Reg (%08x) = %08x\n", MAC_STNUR,
+ XGMAC_IOREAD(pdata, MAC_STNUR));
+ axgbe_printf(1, "MAC TSAR Reg (%08x) = %08x\n", MAC_TSAR,
+ XGMAC_IOREAD(pdata, MAC_TSAR));
+ axgbe_printf(1, "MAC TSSR Reg (%08x) = %08x\n", MAC_TSSR,
+ XGMAC_IOREAD(pdata, MAC_TSSR));
+ axgbe_printf(1, "MAC TXSNR Reg (%08x) = %08x\n", MAC_TXSNR,
+ XGMAC_IOREAD(pdata, MAC_TXSNR));
+ axgbe_printf(1, "MAC TXSSR Reg (%08x) = %08x\n", MAC_TXSSR,
+ XGMAC_IOREAD(pdata, MAC_TXSSR));
+}
+
+static void
+xgbe_dump_rmon_counters(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ axgbe_printf(1, "\n************* RMON counters dump ***************\n");
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ axgbe_printf(1, "rmon txoctetcount_gb (%08x) = %08lx\n",
+ MMC_TXOCTETCOUNT_GB_LO, stats->txoctetcount_gb);
+ axgbe_printf(1, "rmon txframecount_gb (%08x) = %08lx\n",
+ MMC_TXFRAMECOUNT_GB_LO, stats->txframecount_gb);
+ axgbe_printf(1, "rmon txbroadcastframes_g (%08x) = %08lx\n",
+ MMC_TXBROADCASTFRAMES_G_LO, stats->txbroadcastframes_g);
+ axgbe_printf(1, "rmon txmulticastframes_g (%08x) = %08lx\n",
+ MMC_TXMULTICASTFRAMES_G_LO, stats->txmulticastframes_g);
+ axgbe_printf(1, "rmon tx64octets_gb (%08x) = %08lx\n",
+ MMC_TX64OCTETS_GB_LO, stats->tx64octets_gb);
+ axgbe_printf(1, "rmon tx65to127octets_gb (%08x) = %08lx\n",
+ MMC_TX65TO127OCTETS_GB_LO, stats->tx65to127octets_gb);
+ axgbe_printf(1, "rmon tx128to255octets_gb (%08x) = %08lx\n",
+ MMC_TX128TO255OCTETS_GB_LO, stats->tx128to255octets_gb);
+ axgbe_printf(1, "rmon tx256to511octets_gb (%08x) = %08lx\n",
+ MMC_TX256TO511OCTETS_GB_LO, stats->tx256to511octets_gb);
+ axgbe_printf(1, "rmon tx512to1023octets_gb (%08x) = %08lx\n",
+ MMC_TX512TO1023OCTETS_GB_LO, stats->tx512to1023octets_gb);
+ axgbe_printf(1, "rmon tx1024tomaxoctets_gb (%08x) = %08lx\n",
+ MMC_TX1024TOMAXOCTETS_GB_LO, stats->tx1024tomaxoctets_gb);
+ axgbe_printf(1, "rmon txunicastframes_gb (%08x) = %08lx\n",
+ MMC_TXUNICASTFRAMES_GB_LO, stats->txunicastframes_gb);
+ axgbe_printf(1, "rmon txmulticastframes_gb (%08x) = %08lx\n",
+ MMC_TXMULTICASTFRAMES_GB_LO, stats->txmulticastframes_gb);
+ axgbe_printf(1, "rmon txbroadcastframes_gb (%08x) = %08lx\n",
+ MMC_TXBROADCASTFRAMES_GB_LO, stats->txbroadcastframes_gb);
+ axgbe_printf(1, "rmon txunderflowerror (%08x) = %08lx\n",
+ MMC_TXUNDERFLOWERROR_LO, stats->txunderflowerror);
+ axgbe_printf(1, "rmon txoctetcount_g (%08x) = %08lx\n",
+ MMC_TXOCTETCOUNT_G_LO, stats->txoctetcount_g);
+ axgbe_printf(1, "rmon txframecount_g (%08x) = %08lx\n",
+ MMC_TXFRAMECOUNT_G_LO, stats->txframecount_g);
+ axgbe_printf(1, "rmon txpauseframes (%08x) = %08lx\n",
+ MMC_TXPAUSEFRAMES_LO, stats->txpauseframes);
+ axgbe_printf(1, "rmon txvlanframes_g (%08x) = %08lx\n",
+ MMC_TXVLANFRAMES_G_LO, stats->txvlanframes_g);
+ axgbe_printf(1, "rmon rxframecount_gb (%08x) = %08lx\n",
+ MMC_RXFRAMECOUNT_GB_LO, stats->rxframecount_gb);
+ axgbe_printf(1, "rmon rxoctetcount_gb (%08x) = %08lx\n",
+ MMC_RXOCTETCOUNT_GB_LO, stats->rxoctetcount_gb);
+ axgbe_printf(1, "rmon rxoctetcount_g (%08x) = %08lx\n",
+ MMC_RXOCTETCOUNT_G_LO, stats->rxoctetcount_g);
+ axgbe_printf(1, "rmon rxbroadcastframes_g (%08x) = %08lx\n",
+ MMC_RXBROADCASTFRAMES_G_LO, stats->rxbroadcastframes_g);
+ axgbe_printf(1, "rmon rxmulticastframes_g (%08x) = %08lx\n",
+ MMC_RXMULTICASTFRAMES_G_LO, stats->rxmulticastframes_g);
+ axgbe_printf(1, "rmon rxcrcerror (%08x) = %08lx\n",
+ MMC_RXCRCERROR_LO, stats->rxcrcerror);
+ axgbe_printf(1, "rmon rxrunterror (%08x) = %08lx\n",
+ MMC_RXRUNTERROR, stats->rxrunterror);
+ axgbe_printf(1, "rmon rxjabbererror (%08x) = %08lx\n",
+ MMC_RXJABBERERROR, stats->rxjabbererror);
+ axgbe_printf(1, "rmon rxundersize_g (%08x) = %08lx\n",
+ MMC_RXUNDERSIZE_G, stats->rxundersize_g);
+ axgbe_printf(1, "rmon rxoversize_g (%08x) = %08lx\n",
+ MMC_RXOVERSIZE_G, stats->rxoversize_g);
+ axgbe_printf(1, "rmon rx64octets_gb (%08x) = %08lx\n",
+ MMC_RX64OCTETS_GB_LO, stats->rx64octets_gb);
+ axgbe_printf(1, "rmon rx65to127octets_gb (%08x) = %08lx\n",
+ MMC_RX65TO127OCTETS_GB_LO, stats->rx65to127octets_gb);
+ axgbe_printf(1, "rmon rx128to255octets_gb (%08x) = %08lx\n",
+ MMC_RX128TO255OCTETS_GB_LO, stats->rx128to255octets_gb);
+ axgbe_printf(1, "rmon rx256to511octets_gb (%08x) = %08lx\n",
+ MMC_RX256TO511OCTETS_GB_LO, stats->rx256to511octets_gb);
+ axgbe_printf(1, "rmon rx512to1023octets_gb (%08x) = %08lx\n",
+ MMC_RX512TO1023OCTETS_GB_LO, stats->rx512to1023octets_gb);
+ axgbe_printf(1, "rmon rx1024tomaxoctets_gb (%08x) = %08lx\n",
+ MMC_RX1024TOMAXOCTETS_GB_LO, stats->rx1024tomaxoctets_gb);
+ axgbe_printf(1, "rmon rxunicastframes_g (%08x) = %08lx\n",
+ MMC_RXUNICASTFRAMES_G_LO, stats->rxunicastframes_g);
+ axgbe_printf(1, "rmon rxlengtherror (%08x) = %08lx\n",
+ MMC_RXLENGTHERROR_LO, stats->rxlengtherror);
+ axgbe_printf(1, "rmon rxoutofrangetype (%08x) = %08lx\n",
+ MMC_RXOUTOFRANGETYPE_LO, stats->rxoutofrangetype);
+ axgbe_printf(1, "rmon rxpauseframes (%08x) = %08lx\n",
+ MMC_RXPAUSEFRAMES_LO, stats->rxpauseframes);
+ axgbe_printf(1, "rmon rxfifooverflow (%08x) = %08lx\n",
+ MMC_RXFIFOOVERFLOW_LO, stats->rxfifooverflow);
+ axgbe_printf(1, "rmon rxvlanframes_gb (%08x) = %08lx\n",
+ MMC_RXVLANFRAMES_GB_LO, stats->rxvlanframes_gb);
+ axgbe_printf(1, "rmon rxwatchdogerror (%08x) = %08lx\n",
+ MMC_RXWATCHDOGERROR, stats->rxwatchdogerror);
+}
+
+void
+xgbe_dump_i2c_registers(struct xgbe_prv_data *pdata)
+{
+ axgbe_printf(1, "*************** I2C Registers **************\n");
+ axgbe_printf(1, " IC_CON : %010x\n",
+ XI2C_IOREAD(pdata, 0x00));
+ axgbe_printf(1, " IC_TAR : %010x\n",
+ XI2C_IOREAD(pdata, 0x04));
+ axgbe_printf(1, " IC_HS_MADDR : %010x\n",
+ XI2C_IOREAD(pdata, 0x0c));
+ axgbe_printf(1, " IC_INTR_STAT : %010x\n",
+ XI2C_IOREAD(pdata, 0x2c));
+ axgbe_printf(1, " IC_INTR_MASK : %010x\n",
+ XI2C_IOREAD(pdata, 0x30));
+ axgbe_printf(1, " IC_RAW_INTR_STAT : %010x\n",
+ XI2C_IOREAD(pdata, 0x34));
+ axgbe_printf(1, " IC_RX_TL : %010x\n",
+ XI2C_IOREAD(pdata, 0x38));
+ axgbe_printf(1, " IC_TX_TL : %010x\n",
+ XI2C_IOREAD(pdata, 0x3c));
+ axgbe_printf(1, " IC_ENABLE : %010x\n",
+ XI2C_IOREAD(pdata, 0x6c));
+ axgbe_printf(1, " IC_STATUS : %010x\n",
+ XI2C_IOREAD(pdata, 0x70));
+ axgbe_printf(1, " IC_TXFLR : %010x\n",
+ XI2C_IOREAD(pdata, 0x74));
+ axgbe_printf(1, " IC_RXFLR : %010x\n",
+ XI2C_IOREAD(pdata, 0x78));
+ axgbe_printf(1, " IC_ENABLE_STATUS : %010x\n",
+ XI2C_IOREAD(pdata, 0x9c));
+ axgbe_printf(1, " IC_COMP_PARAM1 : %010x\n",
+ XI2C_IOREAD(pdata, 0xf4));
+}
+
+static void
+xgbe_dump_active_vlans(struct xgbe_prv_data *pdata)
+{
+ int i;
+
+ for(i=0 ; i<BITS_TO_LONGS(VLAN_NVID); i++) {
+ if (i && (i%8 == 0))
+ axgbe_printf(1, "\n");
+ axgbe_printf(1, "vlans[%d]: 0x%08lx ", i, pdata->active_vlans[i]);
+ }
+ axgbe_printf(1, "\n");
+}
+
+static void
+xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ pdata->blen = DMA_SBMR_BLEN_64;
+ pdata->pbl = DMA_PBL_128;
+ pdata->aal = 1;
+ pdata->rd_osr_limit = 8;
+ pdata->wr_osr_limit = 8;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+ pdata->enable_rss = 1;
+}
+
+static void
+axgbe_setup_sysctl(struct xgbe_prv_data *pdata)
+{
+ struct sysctl_ctx_list *clist;
+ struct sysctl_oid *parent;
+ struct sysctl_oid_list *top;
+
+ clist = device_get_sysctl_ctx(pdata->dev);
+ parent = device_get_sysctl_tree(pdata->dev);
+ top = SYSCTL_CHILDREN(parent);
+}
+
+static int
+axgbe_if_attach_post(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct ifnet *ifp = pdata->netdev;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ if_softc_ctx_t scctx = sc->scctx;
+ int i, ret;
+
+ /* Initialize ECC timestamps */
+ pdata->tx_sec_period = ticks;
+ pdata->tx_ded_period = ticks;
+ pdata->rx_sec_period = ticks;
+ pdata->rx_ded_period = ticks;
+ pdata->desc_sec_period = ticks;
+ pdata->desc_ded_period = ticks;
+
+ /* Reset the hardware */
+ ret = hw_if->exit(&sc->pdata);
+ if (ret)
+ axgbe_error("%s: exit error %d\n", __func__, ret);
+
+ /* Configure the defaults */
+ xgbe_default_config(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ DBGPR("%s: tx fifo 0x%x rx fifo 0x%x\n", __func__,
+ pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
+
+ /* Set and validate the number of descriptors for a ring */
+ MPASS(powerof2(XGBE_TX_DESC_CNT));
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+ MPASS(powerof2(XGBE_RX_DESC_CNT));
+ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
+
+ /* Adjust the number of queues based on interrupts assigned */
+ if (pdata->channel_irq_count) {
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->channel_irq_count);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->channel_irq_count);
+
+ DBGPR("adjusted TX %u/%u RX %u/%u\n",
+ pdata->tx_ring_count, pdata->tx_q_count,
+ pdata->rx_ring_count, pdata->rx_q_count);
+ }
+
+ /* Set channel count based on interrupts assigned */
+ pdata->channel_count = max_t(unsigned int, scctx->isc_ntxqsets,
+ scctx->isc_nrxqsets);
+ DBGPR("Channel count set to: %u\n", pdata->channel_count);
+
+ /* Get RSS key */
+#ifdef RSS
+ rss_getkey((uint8_t *)pdata->rss_key);
+#else
+ arc4rand(&pdata->rss_key, ARRAY_SIZE(pdata->rss_key), 0);
+#endif
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+
+ /* Initialize the PHY device */
+ pdata->sysctl_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
+ phy_if->phy_init(pdata);
+
+ /* Set the coalescing */
+ xgbe_init_rx_coalesce(&sc->pdata);
+ xgbe_init_tx_coalesce(&sc->pdata);
+
+ ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SGMII, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_100_SGMII, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
+
+ /* Initialize the phy */
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ ret = phy_if->phy_reset(pdata);
+ if (ret)
+ return (ret);
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xgbe_calc_rx_buf_size(pdata->netdev, if_getmtu(pdata->netdev));
+ pdata->rx_buf_size = ret;
+ DBGPR("%s: rx_buf_size %d\n", __func__, ret);
+
+ /* Setup RSS lookup table */
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
+
+ /*
+ * Mark the device down until it is initialized, which happens
+ * when the device is accessed first (for configuring the iface,
+ * eg: setting IP)
+ */
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
+ DBGPR("mtu %d\n", ifp->if_mtu);
+ scctx->isc_max_frame_size = ifp->if_mtu + 18;
+ scctx->isc_min_frame_size = XGMAC_MIN_PACKET;
+
+ axgbe_setup_sysctl(pdata);
+
+ axgbe_sysctl_init(pdata);
+
+ return (0);
+} /* axgbe_if_attach_post */
+
+static void
+xgbe_free_intr(struct xgbe_prv_data *pdata, struct resource *res, void *tag,
+ int rid)
+{
+ if (tag)
+ bus_teardown_intr(pdata->dev, res, tag);
+
+ if (res)
+ bus_release_resource(pdata->dev, SYS_RES_IRQ, rid, res);
+}
+
+static void
+axgbe_interrupts_free(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ if_softc_ctx_t scctx = sc->scctx;
+ struct xgbe_channel *channel;
+ struct if_irq irq;
+ int i;
+
+ axgbe_printf(2, "%s: mode %d\n", __func__, scctx->isc_intr);
+
+ /* Free dev_irq */
+ iflib_irq_free(ctx, &pdata->dev_irq);
+
+ /* Free ecc_irq */
+ xgbe_free_intr(pdata, pdata->ecc_irq_res, pdata->ecc_irq_tag,
+ pdata->ecc_rid);
+
+ /* Free i2c_irq */
+ xgbe_free_intr(pdata, pdata->i2c_irq_res, pdata->i2c_irq_tag,
+ pdata->i2c_rid);
+
+ /* Free an_irq */
+ xgbe_free_intr(pdata, pdata->an_irq_res, pdata->an_irq_tag,
+ pdata->an_rid);
+
+ for (i = 0; i < scctx->isc_nrxqsets; i++) {
+
+ channel = pdata->channel[i];
+ axgbe_printf(2, "%s: rid %d\n", __func__, channel->dma_irq_rid);
+ irq.ii_res = channel->dma_irq_res;
+ irq.ii_tag = channel->dma_irq_tag;
+ iflib_irq_free(ctx, &irq);
+ }
+}
+
+static int
+axgbe_if_detach(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct resource *mac_res[2];
+
+ mac_res[0] = pdata->xgmac_res;
+ mac_res[1] = pdata->xpcs_res;
+
+ phy_if->phy_exit(pdata);
+
+ /* Free Interrupts */
+ axgbe_interrupts_free(ctx);
+
+ /* Free workqueues */
+ taskqueue_free(pdata->dev_workqueue);
+
+ /* Release bus resources */
+ bus_release_resources(iflib_get_dev(ctx), axgbe_pci_mac_spec, mac_res);
+
+ /* Free VLAN bitmap */
+ free(pdata->active_vlans, M_AXGBE);
+
+ axgbe_sysctl_exit(pdata);
+
+ return (0);
+} /* axgbe_if_detach */
+
+static void
+axgbe_pci_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int ret = 0;
+
+ hw_if->init(pdata);
+
+ ret = phy_if->phy_start(pdata);
+ if (ret) {
+ axgbe_error("%s: phy start %d\n", __func__, ret);
+ ret = hw_if->exit(pdata);
+ if (ret)
+ axgbe_error("%s: exit error %d\n", __func__, ret);
+ return;
+ }
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_start_timers(pdata);
+
+ clear_bit(XGBE_DOWN, &pdata->dev_state);
+
+ xgbe_dump_phy_registers(pdata);
+ xgbe_dump_prop_registers(pdata);
+ xgbe_dump_dma_registers(pdata, -1);
+ xgbe_dump_mtl_registers(pdata);
+ xgbe_dump_mac_registers(pdata);
+ xgbe_dump_rmon_counters(pdata);
+}
+
+static void
+axgbe_if_init(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+
+ axgbe_pci_init(pdata);
+}
+
+static void
+axgbe_pci_stop(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int ret;
+
+ if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) {
+ axgbe_printf(1, "%s: Stopping when XGBE_DOWN\n", __func__);
+ return;
+ }
+
+ xgbe_stop_timers(pdata);
+ taskqueue_drain_all(pdata->dev_workqueue);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ phy_if->phy_stop(pdata);
+
+ ret = hw_if->exit(pdata);
+ if (ret)
+ axgbe_error("%s: exit error %d\n", __func__, ret);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+}
+
+static void
+axgbe_if_stop(if_ctx_t ctx)
+{
+ axgbe_pci_stop(ctx);
+}
+
+static void
+axgbe_if_disable_intr(if_ctx_t ctx)
+{
+ /* TODO - implement */
+}
+
+static void
+axgbe_if_enable_intr(if_ctx_t ctx)
+{
+ /* TODO - implement */
+}
+
+static int
+axgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int ntxqs,
+ int ntxqsets)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ if_softc_ctx_t scctx = sc->scctx;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *tx_ring;
+ int i, j, k;
+
+ MPASS(scctx->isc_ntxqsets > 0);
+ MPASS(scctx->isc_ntxqsets == ntxqsets);
+ MPASS(ntxqs == 1);
+
+ axgbe_printf(1, "%s: txqsets %d/%d txqs %d\n", __func__,
+ scctx->isc_ntxqsets, ntxqsets, ntxqs);
+
+ for (i = 0 ; i < ntxqsets; i++) {
+
+ channel = pdata->channel[i];
+
+ tx_ring = (struct xgbe_ring*)malloc(ntxqs *
+ sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO);
+
+ if (tx_ring == NULL) {
+ axgbe_error("Unable to allocate TX ring memory\n");
+ goto tx_ring_fail;
+ }
+
+ channel->tx_ring = tx_ring;
+
+ for (j = 0; j < ntxqs; j++, tx_ring++) {
+ tx_ring->rdata =
+ (struct xgbe_ring_data*)malloc(scctx->isc_ntxd[j] *
+ sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT);
+
+ /* Get the virtual & physical address of hw queues */
+ tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j];
+ tx_ring->rdesc_paddr = pa[i*ntxqs + j];
+ tx_ring->rdesc_count = scctx->isc_ntxd[j];
+ spin_lock_init(&tx_ring->lock);
+ }
+ }
+
+ axgbe_printf(1, "allocated for %d tx queues\n", scctx->isc_ntxqsets);
+
+ return (0);
+
+tx_ring_fail:
+
+ for (j = 0; j < i ; j++) {
+
+ channel = pdata->channel[j];
+
+ tx_ring = channel->tx_ring;
+ for (k = 0; k < ntxqs ; k++, tx_ring++) {
+ if (tx_ring && tx_ring->rdata)
+ free(tx_ring->rdata, M_AXGBE);
+ }
+ free(channel->tx_ring, M_AXGBE);
+
+ channel->tx_ring = NULL;
+ }
+
+ return (ENOMEM);
+
+} /* axgbe_if_tx_queues_alloc */
+
+static int
+axgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int nrxqs,
+ int nrxqsets)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ if_softc_ctx_t scctx = sc->scctx;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *rx_ring;
+ int i, j, k;
+
+ MPASS(scctx->isc_nrxqsets > 0);
+ MPASS(scctx->isc_nrxqsets == nrxqsets);
+ MPASS(nrxqs == 2);
+
+ axgbe_printf(1, "%s: rxqsets %d/%d rxqs %d\n", __func__,
+ scctx->isc_nrxqsets, nrxqsets, nrxqs);
+
+ for (i = 0 ; i < nrxqsets; i++) {
+
+ channel = pdata->channel[i];
+
+ rx_ring = (struct xgbe_ring*)malloc(nrxqs *
+ sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO);
+
+ if (rx_ring == NULL) {
+ axgbe_error("Unable to allocate RX ring memory\n");
+ goto rx_ring_fail;
+ }
+
+ channel->rx_ring = rx_ring;
+
+ for (j = 0; j < nrxqs; j++, rx_ring++) {
+ rx_ring->rdata =
+ (struct xgbe_ring_data*)malloc(scctx->isc_nrxd[j] *
+ sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT);
+
+ /* Get the virtual and physical address of the hw queues */
+ rx_ring->rdesc = (struct xgbe_ring_desc *)va[i*nrxqs + j];
+ rx_ring->rdesc_paddr = pa[i*nrxqs + j];
+ rx_ring->rdesc_count = scctx->isc_nrxd[j];
+ spin_lock_init(&rx_ring->lock);
+ }
+ }
+
+ axgbe_printf(2, "allocated for %d rx queues\n", scctx->isc_nrxqsets);
+
+ return (0);
+
+rx_ring_fail:
+
+ for (j = 0 ; j < i ; j++) {
+
+ channel = pdata->channel[j];
+
+ rx_ring = channel->rx_ring;
+ for (k = 0; k < nrxqs ; k++, rx_ring++) {
+ if (rx_ring && rx_ring->rdata)
+ free(rx_ring->rdata, M_AXGBE);
+ }
+ free(channel->rx_ring, M_AXGBE);
+
+ channel->rx_ring = NULL;
+ }
+
+ return (ENOMEM);
+
+} /* axgbe_if_rx_queues_alloc */
+
+static void
+axgbe_if_queues_free(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ if_softc_ctx_t scctx = sc->scctx;
+ if_shared_ctx_t sctx = sc->sctx;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+ int i, j;
+
+ for (i = 0 ; i < scctx->isc_ntxqsets; i++) {
+
+ channel = pdata->channel[i];
+
+ tx_ring = channel->tx_ring;
+ for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) {
+ if (tx_ring && tx_ring->rdata)
+ free(tx_ring->rdata, M_AXGBE);
+ }
+ free(channel->tx_ring, M_AXGBE);
+ channel->tx_ring = NULL;
+ }
+
+ for (i = 0 ; i < scctx->isc_nrxqsets; i++) {
+
+ channel = pdata->channel[i];
+
+ rx_ring = channel->rx_ring;
+ for (j = 0; j < sctx->isc_nrxqs ; j++, rx_ring++) {
+ if (rx_ring && rx_ring->rdata)
+ free(rx_ring->rdata, M_AXGBE);
+ }
+ free(channel->rx_ring, M_AXGBE);
+ channel->rx_ring = NULL;
+ }
+
+ /* Free Channels */
+ for (i = 0; i < pdata->total_channel_count ; i++) {
+ free(pdata->channel[i], M_AXGBE);
+ pdata->channel[i] = NULL;
+ }
+
+ pdata->total_channel_count = 0;
+ pdata->channel_count = 0;
+} /* axgbe_if_queues_free */
+
+static void
+axgbe_if_vlan_register(if_ctx_t ctx, uint16_t vtag)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ if (!bit_test(pdata->active_vlans, vtag)) {
+ axgbe_printf(0, "Registering VLAN %d\n", vtag);
+
+ bit_set(pdata->active_vlans, vtag);
+ hw_if->update_vlan_hash_table(pdata);
+ pdata->num_active_vlans++;
+
+ axgbe_printf(1, "Total active vlans: %d\n",
+ pdata->num_active_vlans);
+ } else
+ axgbe_printf(0, "VLAN %d already registered\n", vtag);
+
+ xgbe_dump_active_vlans(pdata);
+}
+
+static void
+axgbe_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ if (pdata->num_active_vlans == 0) {
+ axgbe_printf(1, "No active VLANs to unregister\n");
+ return;
+ }
+
+ if (bit_test(pdata->active_vlans, vtag)){
+ axgbe_printf(0, "Un-Registering VLAN %d\n", vtag);
+
+ bit_clear(pdata->active_vlans, vtag);
+ hw_if->update_vlan_hash_table(pdata);
+ pdata->num_active_vlans--;
+
+ axgbe_printf(1, "Total active vlans: %d\n",
+ pdata->num_active_vlans);
+ } else
+ axgbe_printf(0, "VLAN %d already unregistered\n", vtag);
+
+ xgbe_dump_active_vlans(pdata);
+}
+
+#if __FreeBSD_version >= 1300000
+static bool
+axgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
+{
+ switch (event) {
+ case IFLIB_RESTART_VLAN_CONFIG:
+ default:
+ return (true);
+ }
+}
+#endif
+
+static int
+axgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ if_softc_ctx_t scctx = sc->scctx;
+ struct xgbe_channel *channel;
+ struct if_irq irq;
+ int i, error, rid = 0, flags;
+ char buf[16];
+
+ MPASS(scctx->isc_intr != IFLIB_INTR_LEGACY);
+
+ pdata->isr_as_tasklet = 1;
+
+ if (scctx->isc_intr == IFLIB_INTR_MSI) {
+ pdata->irq_count = 1;
+ pdata->channel_irq_count = 1;
+ return (0);
+ }
+
+ axgbe_printf(1, "%s: msix %d txqsets %d rxqsets %d\n", __func__, msix,
+ scctx->isc_ntxqsets, scctx->isc_nrxqsets);
+
+ flags = RF_ACTIVE;
+
+ /* DEV INTR SETUP */
+ rid++;
+ error = iflib_irq_alloc_generic(ctx, &pdata->dev_irq, rid,
+ IFLIB_INTR_ADMIN, axgbe_dev_isr, sc, 0, "dev_irq");
+ if (error) {
+ axgbe_error("Failed to register device interrupt rid %d name %s\n",
+ rid, "dev_irq");
+ return (error);
+ }
+
+ /* ECC INTR SETUP */
+ rid++;
+ pdata->ecc_rid = rid;
+ pdata->ecc_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ,
+ &rid, flags);
+ if (!pdata->ecc_irq_res) {
+ axgbe_error("failed to allocate IRQ for rid %d, name %s.\n",
+ rid, "ecc_irq");
+ return (ENOMEM);
+ }
+
+ error = bus_setup_intr(pdata->dev, pdata->ecc_irq_res, INTR_MPSAFE |
+ INTR_TYPE_NET, NULL, axgbe_ecc_isr, sc, &pdata->ecc_irq_tag);
+ if (error) {
+ axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n",
+ rid, "ecc_irq", error);
+ return (error);
+ }
+
+ /* I2C INTR SETUP */
+ rid++;
+ pdata->i2c_rid = rid;
+ pdata->i2c_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ,
+ &rid, flags);
+ if (!pdata->i2c_irq_res) {
+ axgbe_error("failed to allocate IRQ for rid %d, name %s.\n",
+ rid, "i2c_irq");
+ return (ENOMEM);
+ }
+
+ error = bus_setup_intr(pdata->dev, pdata->i2c_irq_res, INTR_MPSAFE |
+ INTR_TYPE_NET, NULL, axgbe_i2c_isr, sc, &pdata->i2c_irq_tag);
+ if (error) {
+ axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n",
+ rid, "i2c_irq", error);
+ return (error);
+ }
+
+ /* AN INTR SETUP */
+ rid++;
+ pdata->an_rid = rid;
+ pdata->an_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ,
+ &rid, flags);
+ if (!pdata->an_irq_res) {
+ axgbe_error("failed to allocate IRQ for rid %d, name %s.\n",
+ rid, "an_irq");
+ return (ENOMEM);
+ }
+
+ error = bus_setup_intr(pdata->dev, pdata->an_irq_res, INTR_MPSAFE |
+ INTR_TYPE_NET, NULL, axgbe_an_isr, sc, &pdata->an_irq_tag);
+ if (error) {
+ axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n",
+ rid, "an_irq", error);
+ return (error);
+ }
+
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+ rid++;
+ for (i = 0; i < scctx->isc_nrxqsets; i++, rid++) {
+
+ channel = pdata->channel[i];
+
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ error = iflib_irq_alloc_generic(ctx, &irq, rid, IFLIB_INTR_RX,
+ axgbe_msix_que, channel, channel->queue_index, buf);
+
+ if (error) {
+ axgbe_error("Failed to allocated que int %d err: %d\n",
+ i, error);
+ return (error);
+ }
+
+ channel->dma_irq_rid = rid;
+ channel->dma_irq_res = irq.ii_res;
+ channel->dma_irq_tag = irq.ii_tag;
+ axgbe_printf(1, "%s: channel count %d idx %d irq %d\n",
+ __func__, scctx->isc_nrxqsets, i, rid);
+ }
+ pdata->irq_count = msix;
+ pdata->channel_irq_count = scctx->isc_nrxqsets;
+
+ for (i = 0; i < scctx->isc_ntxqsets; i++) {
+
+ channel = pdata->channel[i];
+
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ irq.ii_res = channel->dma_irq_res;
+ iflib_softirq_alloc_generic(ctx, &irq, IFLIB_INTR_TX, channel,
+ channel->queue_index, buf);
+ }
+
+ return (0);
+} /* axgbe_if_msix_intr_assign */
+
+static int
+xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ enum xgbe_int int_id;
+
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ return (-1);
+
+ axgbe_printf(1, "%s channel: %d rx_tx interrupt enabled %d\n",
+ __func__, channel->queue_index, int_id);
+ return (hw_if->enable_int(channel, int_id));
+}
+
+static void
+xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ enum xgbe_int int_id;
+
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ return;
+
+ axgbe_printf(1, "%s channel: %d rx_tx interrupt disabled %d\n",
+ __func__, channel->queue_index, int_id);
+ hw_if->disable_int(channel, int_id);
+}
+
+static void
+xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++)
+ xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
+}
+
+static int
+axgbe_msix_que(void *arg)
+{
+ struct xgbe_channel *channel = (struct xgbe_channel *)arg;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ unsigned int dma_ch_isr, dma_status;
+
+ axgbe_printf(1, "%s: Channel: %d SR 0x%04x DSR 0x%04x IER:0x%04x D_ISR:0x%04x M_ISR:0x%04x\n",
+ __func__, channel->queue_index,
+ XGMAC_DMA_IOREAD(channel, DMA_CH_SR),
+ XGMAC_DMA_IOREAD(channel, DMA_CH_DSR),
+ XGMAC_DMA_IOREAD(channel, DMA_CH_IER),
+ XGMAC_IOREAD(pdata, DMA_ISR),
+ XGMAC_IOREAD(pdata, MAC_ISR));
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+
+ /* Disable Tx and Rx channel interrupts */
+ xgbe_disable_rx_tx_int(pdata, channel);
+
+ /* Clear the interrupts */
+ dma_status = 0;
+ XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
+ XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+static int
+axgbe_dev_isr(void *arg)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int i, dma_isr, dma_ch_isr;
+ unsigned int mac_isr, mac_mdioisr;
+ int ret = FILTER_HANDLED;
+
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ axgbe_printf(2, "%s DMA ISR: 0x%x\n", __func__, dma_isr);
+
+ if (!dma_isr)
+ return (FILTER_HANDLED);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel[i];
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ axgbe_printf(2, "%s: channel %d SR 0x%x DSR 0x%x\n", __func__,
+ channel->queue_index, dma_ch_isr, XGMAC_DMA_IOREAD(channel,
+ DMA_CH_DSR));
+
+ /*
+ * The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
+ */
+ if (!pdata->per_channel_irq &&
+ (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
+
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+ } else {
+
+ /*
+ * Don't clear Rx/Tx status if doing per channel DMA
+ * interrupts, these will be cleared by the ISR for
+ * per channel DMA interrupts
+ */
+ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
+ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
+ }
+
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+ pdata->ext_stats.rx_buffer_unavailable++;
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ axgbe_error("%s: Fatal bus error reported 0x%x\n",
+ __func__, dma_ch_isr);
+
+ /* Clear all interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+
+ ret = FILTER_SCHEDULE_THREAD;
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+ axgbe_printf(2, "%s MAC ISR: 0x%x\n", __func__, mac_isr);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
+ mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
+
+ if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
+ SNGLCOMPINT))
+ wakeup_one(pdata);
+ }
+
+ }
+
+ return (ret);
+} /* axgbe_dev_isr */
+
+static void
+axgbe_i2c_isr(void *arg)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg;
+
+ sc->pdata.i2c_if.i2c_isr(&sc->pdata);
+}
+
+static void
+axgbe_ecc_isr(void *arg)
+{
+ /* TODO - implement */
+}
+
+static void
+axgbe_an_isr(void *arg)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg;
+
+ sc->pdata.phy_if.an_isr(&sc->pdata);
+}
+
+static int
+axgbe_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ int ret;
+
+ if (qid < pdata->tx_q_count) {
+ ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]);
+ if (ret) {
+ axgbe_error("Enable TX INT failed\n");
+ return (ret);
+ }
+ } else
+ axgbe_error("Queue ID exceed channel count\n");
+
+ return (0);
+}
+
+static int
+axgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ int ret;
+
+ if (qid < pdata->rx_q_count) {
+ ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]);
+ if (ret) {
+ axgbe_error("Enable RX INT failed\n");
+ return (ret);
+ }
+ } else
+ axgbe_error("Queue ID exceed channel count\n");
+
+ return (0);
+}
+
+static void
+axgbe_if_update_admin_status(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+
+ axgbe_printf(1, "%s: phy_link %d status %d speed %d\n", __func__,
+ pdata->phy_link, sc->link_status, pdata->phy.speed);
+
+ if (pdata->phy_link < 0)
+ return;
+
+ if (pdata->phy_link) {
+ if (sc->link_status == LINK_STATE_DOWN) {
+ sc->link_status = LINK_STATE_UP;
+ if (pdata->phy.speed & SPEED_10000)
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Gbps(10));
+ else if (pdata->phy.speed & SPEED_2500)
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Gbps(2.5));
+ else if (pdata->phy.speed & SPEED_1000)
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Gbps(1));
+ else if (pdata->phy.speed & SPEED_100)
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Mbps(100));
+ else if (pdata->phy.speed & SPEED_10)
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Mbps(10));
+ }
+ } else {
+ if (sc->link_status == LINK_STATE_UP) {
+ sc->link_status = LINK_STATE_DOWN;
+ iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
+ }
+ }
+}
+
+static int
+axgbe_if_media_change(if_ctx_t ctx)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
+
+ sx_xlock(&sc->pdata.an_mutex);
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_10G_KR:
+ sc->pdata.phy.speed = SPEED_10000;
+ sc->pdata.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_2500_KX:
+ sc->pdata.phy.speed = SPEED_2500;
+ sc->pdata.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_1000_KX:
+ sc->pdata.phy.speed = SPEED_1000;
+ sc->pdata.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_100_TX:
+ sc->pdata.phy.speed = SPEED_100;
+ sc->pdata.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_AUTO:
+ sc->pdata.phy.autoneg = AUTONEG_ENABLE;
+ break;
+ }
+ sx_xunlock(&sc->pdata.an_mutex);
+
+ return (-sc->pdata.phy_if.phy_config_aneg(&sc->pdata));
+}
+
+static int
+axgbe_if_promisc_set(if_ctx_t ctx, int flags)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+
+ if (XGMAC_IOREAD_BITS(&sc->pdata, MAC_PFR, PR) == 1)
+ return (0);
+
+ XGMAC_IOWRITE_BITS(&sc->pdata, MAC_PFR, PR, 1);
+ XGMAC_IOWRITE_BITS(&sc->pdata, MAC_PFR, VTFE, 0);
+
+ return (0);
+}
+
+static uint64_t
+axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ switch(cnt) {
+ case IFCOUNTER_IPACKETS:
+ return (pstats->rxframecount_gb);
+ case IFCOUNTER_IERRORS:
+ return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g - pstats->rxunicastframes_g);
+ case IFCOUNTER_OPACKETS:
+ return (pstats->txframecount_gb);
+ case IFCOUNTER_OERRORS:
+ return (pstats->txframecount_gb - pstats->txframecount_g);
+ case IFCOUNTER_IBYTES:
+ return (pstats->rxoctetcount_gb);
+ case IFCOUNTER_OBYTES:
+ return (pstats->txoctetcount_gb);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
+static int
+axgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ int ret;
+
+ if (mtu > XGMAC_JUMBO_PACKET_MTU)
+ return (EINVAL);
+
+ ret = xgbe_calc_rx_buf_size(pdata->netdev, mtu);
+ pdata->rx_buf_size = ret;
+ axgbe_printf(1, "%s: rx_buf_size %d\n", __func__, ret);
+
+ sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ return (0);
+}
+
+static void
+axgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
+{
+ struct axgbe_if_softc *sc = iflib_get_softc(ctx);
+ struct xgbe_prv_data *pdata = &sc->pdata;
+
+ ifmr->ifm_status = IFM_AVALID;
+ if (!sc->pdata.phy.link)
+ return;
+
+ ifmr->ifm_active = IFM_ETHER;
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ axgbe_printf(1, "Speed 0x%x Mode %d\n", sc->pdata.phy.speed,
+ pdata->phy_if.phy_impl.cur_mode(pdata));
+ pdata->phy_if.phy_impl.get_type(pdata, ifmr);
+
+ ifmr->ifm_active |= IFM_FDX;
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+}
diff --git a/sys/dev/axgbe/xgbe-common.h b/sys/dev/axgbe/xgbe-common.h
index bc081352bf53..dc13310dd4a3 100644
--- a/sys/dev/axgbe/xgbe-common.h
+++ b/sys/dev/axgbe/xgbe-common.h
@@ -1,13 +1,13 @@
/*
* AMD 10Gb Ethernet driver
*
+ * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
+ *
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
@@ -56,9 +56,6 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
@@ -128,52 +125,49 @@
#define DMA_ISR 0x3008
#define DMA_AXIARCR 0x3010
#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWARCR 0x301c
#define DMA_DSR0 0x3020
#define DMA_DSR1 0x3024
+#define DMA_DSR2 0x3028
+#define DMA_DSR3 0x302C
+#define DMA_DSR4 0x3030
+#define DMA_TXEDMACR 0x3040
+#define DMA_RXEDMACR 0x3044
/* DMA register entry bit positions and sizes */
-#define DMA_AXIARCR_DRC_INDEX 0
-#define DMA_AXIARCR_DRC_WIDTH 4
-#define DMA_AXIARCR_DRD_INDEX 4
-#define DMA_AXIARCR_DRD_WIDTH 2
-#define DMA_AXIARCR_TEC_INDEX 8
-#define DMA_AXIARCR_TEC_WIDTH 4
-#define DMA_AXIARCR_TED_INDEX 12
-#define DMA_AXIARCR_TED_WIDTH 2
-#define DMA_AXIARCR_THC_INDEX 16
-#define DMA_AXIARCR_THC_WIDTH 4
-#define DMA_AXIARCR_THD_INDEX 20
-#define DMA_AXIARCR_THD_WIDTH 2
-#define DMA_AXIAWCR_DWC_INDEX 0
-#define DMA_AXIAWCR_DWC_WIDTH 4
-#define DMA_AXIAWCR_DWD_INDEX 4
-#define DMA_AXIAWCR_DWD_WIDTH 2
-#define DMA_AXIAWCR_RPC_INDEX 8
-#define DMA_AXIAWCR_RPC_WIDTH 4
-#define DMA_AXIAWCR_RPD_INDEX 12
-#define DMA_AXIAWCR_RPD_WIDTH 2
-#define DMA_AXIAWCR_RHC_INDEX 16
-#define DMA_AXIAWCR_RHC_WIDTH 4
-#define DMA_AXIAWCR_RHD_INDEX 20
-#define DMA_AXIAWCR_RHD_WIDTH 2
-#define DMA_AXIAWCR_TDC_INDEX 24
-#define DMA_AXIAWCR_TDC_WIDTH 4
-#define DMA_AXIAWCR_TDD_INDEX 28
-#define DMA_AXIAWCR_TDD_WIDTH 2
#define DMA_ISR_MACIS_INDEX 17
#define DMA_ISR_MACIS_WIDTH 1
#define DMA_ISR_MTLIS_INDEX 16
#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
#define DMA_MR_SWR_INDEX 0
#define DMA_MR_SWR_WIDTH 1
+#define DMA_RXEDMACR_RDPS_INDEX 0
+#define DMA_RXEDMACR_RDPS_WIDTH 3
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
#define DMA_SBMR_EAME_INDEX 11
#define DMA_SBMR_EAME_WIDTH 1
-#define DMA_SBMR_BLEN_256_INDEX 7
-#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_INDEX 1
+#define DMA_SBMR_BLEN_WIDTH 7
+#define DMA_SBMR_RD_OSR_LMT_INDEX 16
+#define DMA_SBMR_RD_OSR_LMT_WIDTH 6
#define DMA_SBMR_UNDEF_INDEX 0
#define DMA_SBMR_UNDEF_WIDTH 1
+#define DMA_SBMR_WR_OSR_LMT_INDEX 24
+#define DMA_SBMR_WR_OSR_LMT_WIDTH 6
+#define DMA_TXEDMACR_TDPS_INDEX 0
+#define DMA_TXEDMACR_TDPS_WIDTH 3
/* DMA register values */
+#define DMA_SBMR_BLEN_256 256
+#define DMA_SBMR_BLEN_128 128
+#define DMA_SBMR_BLEN_64 64
+#define DMA_SBMR_BLEN_32 32
+#define DMA_SBMR_BLEN_16 16
+#define DMA_SBMR_BLEN_8 8
+#define DMA_SBMR_BLEN_4 4
#define DMA_DSR_RPS_WIDTH 4
#define DMA_DSR_TPS_WIDTH 4
#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
@@ -215,17 +209,28 @@
#define DMA_CH_CARBR_HI 0x58
#define DMA_CH_CARBR_LO 0x5c
#define DMA_CH_SR 0x60
+#define DMA_CH_DSR 0x64
+#define DMA_CH_DCFL 0x68
+#define DMA_CH_MFC 0x6c
+#define DMA_CH_TDTRO 0x70
+#define DMA_CH_RDTRO 0x74
+#define DMA_CH_TDWRO 0x78
+#define DMA_CH_RDWRO 0x7C
/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
#define DMA_CH_CR_SPH_INDEX 24
#define DMA_CH_CR_SPH_WIDTH 1
-#define DMA_CH_IER_AIE_INDEX 15
+#define DMA_CH_IER_AIE20_INDEX 15
+#define DMA_CH_IER_AIE20_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 14
#define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12
#define DMA_CH_IER_FBEE_WIDTH 1
-#define DMA_CH_IER_NIE_INDEX 16
+#define DMA_CH_IER_NIE20_INDEX 16
+#define DMA_CH_IER_NIE20_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 15
#define DMA_CH_IER_NIE_WIDTH 1
#define DMA_CH_IER_RBUE_INDEX 7
#define DMA_CH_IER_RBUE_WIDTH 1
@@ -291,12 +296,26 @@
#define MAC_PFR 0x0008
#define MAC_WTR 0x000c
#define MAC_HTR0 0x0010
+#define MAC_HTR1 0x0014
+#define MAC_HTR2 0x0018
+#define MAC_HTR3 0x001c
+#define MAC_HTR4 0x0020
+#define MAC_HTR5 0x0024
+#define MAC_HTR6 0x0028
+#define MAC_HTR7 0x002c
#define MAC_VLANTR 0x0050
#define MAC_VLANHTR 0x0058
#define MAC_VLANIR 0x0060
#define MAC_IVLANIR 0x0064
#define MAC_RETMR 0x006c
#define MAC_Q0TFCR 0x0070
+#define MAC_Q1TFCR 0x0074
+#define MAC_Q2TFCR 0x0078
+#define MAC_Q3TFCR 0x007c
+#define MAC_Q4TFCR 0x0080
+#define MAC_Q5TFCR 0x0084
+#define MAC_Q6TFCR 0x0088
+#define MAC_Q7TFCR 0x008c
#define MAC_RFCR 0x0090
#define MAC_RQC0R 0x00a0
#define MAC_RQC1R 0x00a4
@@ -309,11 +328,17 @@
#define MAC_RWKPFR 0x00c4
#define MAC_LPICSR 0x00d0
#define MAC_LPITCR 0x00d4
+#define MAC_TIR 0x00e0
#define MAC_VR 0x0110
#define MAC_DR 0x0114
#define MAC_HWF0R 0x011c
#define MAC_HWF1R 0x0120
#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
#define MAC_GPIOCR 0x0278
#define MAC_GPIOSR 0x027c
#define MAC_MACA0HR 0x0300
@@ -370,6 +395,8 @@
#define MAC_HWF0R_TXCOESEL_WIDTH 1
#define MAC_HWF0R_VLHASH_INDEX 4
#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF0R_VXN_INDEX 29
+#define MAC_HWF0R_VXN_WIDTH 1
#define MAC_HWF1R_ADDR64_INDEX 14
#define MAC_HWF1R_ADDR64_WIDTH 2
#define MAC_HWF1R_ADVTHWORD_INDEX 13
@@ -414,10 +441,32 @@
#define MAC_ISR_MMCTXIS_WIDTH 1
#define MAC_ISR_PMTIS_INDEX 4
#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
#define MAC_ISR_TSIS_INDEX 12
#define MAC_ISR_TSIS_WIDTH 1
#define MAC_MACA1HR_AE_INDEX 31
#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
#define MAC_PFR_HMC_INDEX 2
#define MAC_PFR_HMC_WIDTH 1
#define MAC_PFR_HPF_INDEX 10
@@ -430,6 +479,8 @@
#define MAC_PFR_PR_WIDTH 1
#define MAC_PFR_VTFE_INDEX 16
#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PFR_VUCC_INDEX 22
+#define MAC_PFR_VUCC_WIDTH 1
#define MAC_PMTCSR_MGKPKTEN_INDEX 1
#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
#define MAC_PMTCSR_PWRDWN_INDEX 0
@@ -458,6 +509,8 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RCR_ARPEN_INDEX 31
+#define MAC_RCR_ARPEN_WIDTH 1
#define MAC_RFCR_PFCE_INDEX 8
#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
@@ -492,6 +545,12 @@
#define MAC_TCR_SS_WIDTH 2
#define MAC_TCR_TE_INDEX 0
#define MAC_TCR_TE_WIDTH 1
+#define MAC_TCR_VNE_INDEX 24
+#define MAC_TCR_VNE_WIDTH 1
+#define MAC_TCR_VNM_INDEX 25
+#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TIR_TNID_INDEX 0
+#define MAC_TIR_TNID_WIDTH 16
#define MAC_TSCR_AV8021ASMEN_INDEX 28
#define MAC_TSCR_AV8021ASMEN_WIDTH 1
#define MAC_TSCR_SNAPTYPSEL_INDEX 16
@@ -746,6 +805,8 @@
#define MTL_FDDR 0x1010
#define MTL_ISR 0x1020
#define MTL_RQDCM0R 0x1030
+#define MTL_RQDCM1R 0x1034
+#define MTL_RQDCM2R 0x1038
#define MTL_TCPM0R 0x1040
#define MTL_TCPM1R 0x1044
@@ -771,9 +832,13 @@
#define MTL_Q_TQOMR 0x00
#define MTL_Q_TQUR 0x04
#define MTL_Q_TQDR 0x08
+#define MTL_Q_TC0ETSCR 0x10
+#define MTL_Q_TC0ETSSR 0x14
+#define MTL_Q_TC0QWR 0x18
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQCR 0x4c
#define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74
@@ -795,6 +860,10 @@
#define MTL_Q_RQOMR_RSF_WIDTH 1
#define MTL_Q_RQOMR_RTC_INDEX 0
#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
#define MTL_Q_TQOMR_FTQ_INDEX 0
#define MTL_Q_TQOMR_FTQ_WIDTH 1
#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
@@ -864,7 +933,17 @@
* an address phase and a data phase. The address phases requires
* writing an address selection value to the MMD select regiesters.
*/
-#define PCS_MMD_SELECT 0xff
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+#define PCS_V2_RV_WINDOW_DEF 0x1060
+#define PCS_V2_RV_WINDOW_SELECT 0x1064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
/* SerDes integration register offsets */
#define SIR0_KR_RT_1 0x002c
@@ -908,6 +987,199 @@
#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_REISSUE_EN 0x0074
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 5
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 4
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 3
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 2
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 1
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 0
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 5
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 4
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 3
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 2
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 1
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 0
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
/* Descriptor/Packet entry bit positions and sizes */
#define RX_PACKET_ERRORS_CRC_INDEX 2
#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -922,8 +1194,8 @@
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@@ -932,11 +1204,21 @@
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_TNP_INDEX 8
+#define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9
+#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
#define RX_NORMAL_DESC2_HL_INDEX 0
#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC2_TNP_INDEX 11
+#define RX_NORMAL_DESC2_TNP_WIDTH 1
+#define RX_NORMAL_DESC2_RPNG_INDEX 14
+#define RX_NORMAL_DESC2_RPNG_WIDTH 1
#define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30
@@ -963,9 +1245,11 @@
#define RX_DESC3_L34T_IPV4_TCP 1
#define RX_DESC3_L34T_IPV4_UDP 2
#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV4_UNKNOWN 7
#define RX_DESC3_L34T_IPV6_TCP 9
#define RX_DESC3_L34T_IPV6_UDP 10
#define RX_DESC3_L34T_IPV6_ICMP 11
+#define RX_DESC3_L34T_IPV6_UNKNOWN 15
#define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1
@@ -980,6 +1264,8 @@
#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4
+#define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1
#define TX_CONTEXT_DESC2_MSS_INDEX 0
#define TX_CONTEXT_DESC2_MSS_WIDTH 15
@@ -1020,8 +1306,11 @@
#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
#define TX_NORMAL_DESC3_TSE_INDEX 18
#define TX_NORMAL_DESC3_TSE_WIDTH 1
+#define TX_NORMAL_DESC3_VNP_INDEX 23
+#define TX_NORMAL_DESC3_VNP_WIDTH 3
#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+#define TX_NORMAL_DESC3_VXLAN_PACKET 0x3
/* MDIO undefined or vendor specific registers */
#ifndef MDIO_PMA_10GBR_PMD_CTRL
@@ -1032,6 +1321,10 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
@@ -1052,11 +1345,52 @@
#define MDIO_AN_INT 0x8002
#endif
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
/* MDIO mask values */
+#define XGBE_AN_CL73_INT_CMPLT BIT(0)
+#define XGBE_AN_CL73_INC_LINK BIT(1)
+#define XGBE_AN_CL73_PG_RCV BIT(2)
+#define XGBE_AN_CL73_INT_MASK 0x07
+
#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
#define XGBE_XNP_ACK_PROCESSED BIT(12)
#define XGBE_XNP_MP_FORMATTED BIT(13)
@@ -1065,6 +1399,24 @@
#define XGBE_KR_TRAINING_START BIT(0)
#define XGBE_KR_TRAINING_ENABLE BIT(1)
+#define XGBE_PCS_CL37_BP BIT(12)
+
+#define XGBE_AN_CL37_INT_CMPLT BIT(0)
+#define XGBE_AN_CL37_INT_MASK 0x01
+
+#define XGBE_AN_CL37_HD_MASK 0x40
+#define XGBE_AN_CL37_FD_MASK 0x20
+
+#define XGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
+#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
+
+#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -1140,7 +1492,7 @@ do { \
#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
- u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ uint32_t reg_val = XGMAC_IOREAD((_pdata), _reg); \
SET_BITS(reg_val, \
_reg##_##_field##_INDEX, \
_reg##_##_field##_WIDTH, (_val)); \
@@ -1166,7 +1518,7 @@ do { \
#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
do { \
- u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ uint32_t reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
SET_BITS(reg_val, \
_reg##_##_field##_INDEX, \
_reg##_##_field##_WIDTH, (_val)); \
@@ -1191,7 +1543,7 @@ do { \
#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
do { \
- u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ uint32_t reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
SET_BITS(reg_val, \
_reg##_##_field##_INDEX, \
_reg##_##_field##_WIDTH, (_val)); \
@@ -1201,12 +1553,28 @@ do { \
/* Macros for building, reading or writing register values or bits
* within the register values of XPCS registers.
*/
-#define XPCS_IOWRITE(_pdata, _off, _val) \
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
bus_write_4((_pdata)->xpcs_res, (_off), _val)
-#define XPCS_IOREAD(_pdata, _off) \
+#define XPCS32_IOREAD(_pdata, _off) \
bus_read_4((_pdata)->xpcs_res, (_off))
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ bus_write_2((_pdata)->xpcs_res, (_off), _val)
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ bus_read_2((_pdata)->xpcs_res, (_off))
+
/* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers.
*/
@@ -1233,7 +1601,7 @@ do { \
#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
- u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ uint16_t reg_val = XSIR0_IOREAD((_pdata), _reg); \
SET_BITS(reg_val, \
_reg##_##_field##_INDEX, \
_reg##_##_field##_WIDTH, (_val)); \
@@ -1253,7 +1621,7 @@ do { \
#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
- u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ uint16_t reg_val = XSIR1_IOREAD((_pdata), _reg); \
SET_BITS(reg_val, \
_reg##_##_field##_INDEX, \
_reg##_##_field##_WIDTH, (_val)); \
@@ -1276,7 +1644,7 @@ do { \
#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
- u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ uint16_t reg_val = XRXTX_IOREAD((_pdata), _reg); \
SET_BITS(reg_val, \
_reg##_##_field##_INDEX, \
_reg##_##_field##_WIDTH, (_val)); \
@@ -1284,6 +1652,74 @@ do { \
} while (0)
/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ bus_read_4((_pdata)->xgmac_res, _reg + XGBE_MAC_PROP_OFFSET)
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ bus_write_4((_pdata)->xgmac_res, _reg + XGBE_MAC_PROP_OFFSET, \
+ (_val))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ uint32_t reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ bus_read_4((_pdata)->xgmac_res, _reg + XGBE_I2C_CTRL_OFFSET)
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ bus_write_4((_pdata)->xgmac_res, _reg + XGBE_I2C_CTRL_OFFSET, \
+ (_val))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ uint32_t reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
* using MDIO. Different from above because of the use of standardized
* Linux include values. No shifting is performed with the bit
* operations, everything works on mask values.
@@ -1301,7 +1737,7 @@ do { \
#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
do { \
- u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ uint32_t mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
mmd_val &= ~_mask; \
mmd_val |= (_val); \
XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
diff --git a/sys/dev/axgbe/xgbe-dcb.c b/sys/dev/axgbe/xgbe-dcb.c
new file mode 100644
index 000000000000..c64a31f0696f
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-dcb.c
@@ -0,0 +1,272 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "xgbe.h"
+
+#if 0
+static int xgbe_dcb_ieee_getets(struct xgbe_prv_data *pdata,
+ struct ieee_ets *ets)
+{
+ /* Set number of supported traffic classes */
+ ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->ets) {
+ ets->cbs = pdata->ets->cbs;
+ memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, pdata->ets->prio_tc,
+ sizeof(ets->prio_tc));
+ }
+
+ return (0);
+}
+
+static int xgbe_dcb_ieee_setets(struct xgbe_prv_data *pdata,
+ struct ieee_ets *ets)
+{
+ unsigned int i, tc_ets, tc_ets_weight;
+ u8 max_tc = 0;
+
+ tc_ets = 0;
+ tc_ets_weight = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ axgbe_printf(1,
+ "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+ ets->tc_tsa[i]);
+ axgbe_printf(1, "PRIO%u: TC=%hhu\n", i,
+ ets->prio_tc[i]);
+
+ max_tc = max_t(u8, max_tc, ets->prio_tc[i]);
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]))
+ max_tc = max_t(u8, max_tc, i);
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_ets = 1;
+ tc_ets_weight += ets->tc_tx_bw[i];
+ break;
+ default:
+ axgbe_error(
+ "unsupported TSA algorithm (%hhu)\n",
+ ets->tc_tsa[i]);
+ return (-EINVAL);
+ }
+ }
+
+ /* Check maximum traffic class requested */
+ if (max_tc >= pdata->hw_feat.tc_cnt) {
+ axgbe_error(
+ "exceeded number of supported traffic classes\n");
+ return (-EINVAL);
+ }
+
+ /* Weights must add up to 100% */
+ if (tc_ets && (tc_ets_weight != 100)) {
+ axgbe_error(
+ "sum of ETS algorithm weights is not 100 (%u)\n",
+ tc_ets_weight);
+ return (-EINVAL);
+ }
+
+ if (!pdata->ets) {
+ pdata->ets = (struct ieee_ets *)malloc(sizeof(struct ieee_ets),
+ M_AXGBE, M_NOWAIT); //TODO - when to free?
+
+ if (!pdata->ets)
+ return (-ENOMEM);
+ }
+
+ pdata->num_tcs = max_tc + 1;
+ memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+ pdata->hw_if.config_dcb_tc(pdata);
+
+ return (0);
+}
+
+static int xgbe_dcb_ieee_getpfc(struct xgbe_prv_data *pdata,
+ struct ieee_pfc *pfc)
+{
+
+ /* Set number of supported PFC traffic classes */
+ pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->pfc) {
+ pfc->pfc_en = pdata->pfc->pfc_en;
+ pfc->mbc = pdata->pfc->mbc;
+ pfc->delay = pdata->pfc->delay;
+ }
+
+ return (0);
+}
+
+static int xgbe_dcb_ieee_setpfc(struct xgbe_prv_data *pdata,
+ struct ieee_pfc *pfc)
+{
+
+ axgbe_printf(1,
+ "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%d\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+ /* Check PFC for supported number of traffic classes */
+ if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) {
+ axgbe_error(
+ "PFC requested for unsupported traffic class\n");
+ return (-EINVAL);
+ }
+
+ if (!pdata->pfc) {
+ pdata->pfc = (struct ieee_pfc *)malloc(sizeof(struct ieee_pfc),
+ M_AXGBE, M_NOWAIT); //TODO - when to free?
+
+ if (!pdata->pfc)
+ return (-ENOMEM);
+ }
+
+ memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+ pdata->hw_if.config_dcb_pfc(pdata);
+
+ return (0);
+}
+
+static u8 xgbe_dcb_getdcbx(struct xgbe_prv_data *pdata)
+{
+ return (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE);
+}
+
+static u8 xgbe_dcb_setdcbx(struct xgbe_prv_data *pdata, u8 dcbx)
+{
+ u8 support = xgbe_dcb_getdcbx(pdata);
+
+ axgbe_printf(1, "DCBX=%#hhx\n", dcbx);
+
+ if (dcbx & ~support)
+ return (1);
+
+ if ((dcbx & support) != support)
+ return (1);
+
+ return (0);
+}
+#endif
diff --git a/sys/dev/axgbe/xgbe-desc.c b/sys/dev/axgbe/xgbe-desc.c
index a2f1f98881e9..f74d600b301f 100644
--- a/sys/dev/axgbe/xgbe-desc.c
+++ b/sys/dev/axgbe/xgbe-desc.c
@@ -1,13 +1,13 @@
/*
* AMD 10Gb Ethernet driver
*
+ * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
+ *
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
@@ -56,9 +56,6 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
@@ -120,221 +117,8 @@ __FBSDID("$FreeBSD$");
#include "xgbe.h"
#include "xgbe-common.h"
-static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
-
-static void xgbe_free_ring(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring)
-{
- struct xgbe_ring_data *rdata;
- unsigned int i;
-
- if (!ring)
- return;
-
- bus_dmamap_destroy(ring->mbuf_dmat, ring->mbuf_map);
- bus_dma_tag_destroy(ring->mbuf_dmat);
-
- ring->mbuf_map = NULL;
- ring->mbuf_dmat = NULL;
-
- if (ring->rdata) {
- for (i = 0; i < ring->rdesc_count; i++) {
- rdata = XGBE_GET_DESC_DATA(ring, i);
- xgbe_unmap_rdata(pdata, rdata);
- }
-
- free(ring->rdata, M_AXGBE);
- ring->rdata = NULL;
- }
-
- bus_dmamap_unload(ring->rdesc_dmat, ring->rdesc_map);
- bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
- bus_dma_tag_destroy(ring->rdesc_dmat);
-
- ring->rdesc_map = NULL;
- ring->rdesc_dmat = NULL;
- ring->rdesc = NULL;
-}
-
-static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- DBGPR("-->xgbe_free_ring_resources\n");
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- xgbe_free_ring(pdata, channel->tx_ring);
- xgbe_free_ring(pdata, channel->rx_ring);
- }
-
- DBGPR("<--xgbe_free_ring_resources\n");
-}
-
-static void xgbe_ring_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg,
- int error)
-{
- if (error)
- return;
- *(bus_addr_t *) arg = segs->ds_addr;
-}
-
-static int xgbe_init_ring(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring, unsigned int rdesc_count)
-{
- bus_size_t len;
- int err, flags;
-
- DBGPR("-->xgbe_init_ring\n");
-
- if (!ring)
- return 0;
-
- flags = 0;
- if (pdata->coherent)
- flags = BUS_DMA_COHERENT;
-
- /* Descriptors */
- ring->rdesc_count = rdesc_count;
- len = sizeof(struct xgbe_ring_desc) * rdesc_count;
- err = bus_dma_tag_create(pdata->dmat, 512, 0, BUS_SPACE_MAXADDR,
- BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, flags, NULL, NULL,
- &ring->rdesc_dmat);
- if (err != 0) {
- printf("Unable to create the DMA tag: %d\n", err);
- return -err;
- }
-
- err = bus_dmamem_alloc(ring->rdesc_dmat, (void **)&ring->rdesc,
- BUS_DMA_WAITOK | BUS_DMA_COHERENT, &ring->rdesc_map);
- if (err != 0) {
- bus_dma_tag_destroy(ring->rdesc_dmat);
- printf("Unable to allocate DMA memory: %d\n", err);
- return -err;
- }
- err = bus_dmamap_load(ring->rdesc_dmat, ring->rdesc_map, ring->rdesc,
- len, xgbe_ring_dmamap_cb, &ring->rdesc_paddr, 0);
- if (err != 0) {
- bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
- bus_dma_tag_destroy(ring->rdesc_dmat);
- printf("Unable to load DMA memory\n");
- return -err;
- }
-
- /* Descriptor information */
- ring->rdata = malloc(rdesc_count * sizeof(struct xgbe_ring_data),
- M_AXGBE, M_WAITOK | M_ZERO);
-
- /* Create the space DMA tag for mbufs */
- err = bus_dma_tag_create(pdata->dmat, 1, 0, BUS_SPACE_MAXADDR,
- BUS_SPACE_MAXADDR, NULL, NULL, XGBE_TX_MAX_BUF_SIZE * rdesc_count,
- rdesc_count, XGBE_TX_MAX_BUF_SIZE, flags, NULL, NULL,
- &ring->mbuf_dmat);
- if (err != 0)
- return -err;
-
- err = bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
- if (err != 0)
- return -err;
-
- DBGPR("<--xgbe_init_ring\n");
-
- return 0;
-}
-
-static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
- int ret;
-
- DBGPR("-->xgbe_alloc_ring_resources\n");
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ret = xgbe_init_ring(pdata, channel->tx_ring,
- pdata->tx_desc_count);
- if (ret) {
- printf("error initializing Tx ring\n");
- goto err_ring;
- }
-
- ret = xgbe_init_ring(pdata, channel->rx_ring,
- pdata->rx_desc_count);
- if (ret) {
- printf("error initializing Rx ring\n");
- goto err_ring;
- }
- }
-
- DBGPR("<--xgbe_alloc_ring_resources\n");
-
- return 0;
-
-err_ring:
- xgbe_free_ring_resources(pdata);
-
- return ret;
-}
-
-static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring,
- struct xgbe_ring_data *rdata)
-{
- bus_dmamap_t mbuf_map;
- bus_dma_segment_t segs[2];
- struct mbuf *m0, *m1;
- int err, nsegs;
-
- m0 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
- if (m0 == NULL)
- return (-ENOBUFS);
-
- m1 = m_getjcl(M_NOWAIT, MT_DATA, 0, MCLBYTES);
- if (m1 == NULL) {
- m_freem(m0);
- return (-ENOBUFS);
- }
-
- m0->m_next = m1;
- m0->m_flags |= M_PKTHDR;
- m0->m_len = MHLEN;
- m0->m_pkthdr.len = MHLEN + MCLBYTES;
-
- m1->m_len = MCLBYTES;
- m1->m_next = NULL;
- m1->m_pkthdr.len = MCLBYTES;
-
- err = bus_dmamap_create(ring->mbuf_dmat, 0, &mbuf_map);
- if (err != 0) {
- m_freem(m0);
- return (-err);
- }
-
- err = bus_dmamap_load_mbuf_sg(ring->mbuf_dmat, mbuf_map, m0, segs,
- &nsegs, BUS_DMA_NOWAIT);
- if (err != 0) {
- m_freem(m0);
- bus_dmamap_destroy(ring->mbuf_dmat, mbuf_map);
- return (-err);
- }
-
- KASSERT(nsegs == 2,
- ("xgbe_map_rx_buffer: Unable to handle multiple segments %d",
- nsegs));
-
- rdata->mb = m0;
- rdata->mbuf_free = 0;
- rdata->mbuf_dmat = ring->mbuf_dmat;
- rdata->mbuf_map = mbuf_map;
- rdata->mbuf_hdr_paddr = segs[0].ds_addr;
- rdata->mbuf_data_paddr = segs[1].ds_addr;
-
- return 0;
-}
-
-static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+static void
+xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
@@ -346,8 +130,10 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+
+ channel = pdata->channel[i];
+
ring = channel->tx_ring;
if (!ring)
break;
@@ -375,7 +161,8 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
}
-static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+static void
+xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
@@ -387,8 +174,10 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
+ for (i = 0; i < pdata->channel_count; i++) {
+
+ channel = pdata->channel[i];
+
ring = channel->rx_ring;
if (!ring)
break;
@@ -402,9 +191,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc;
rdata->rdata_paddr = rdesc_paddr;
- if (xgbe_map_rx_buffer(pdata, ring, rdata))
- break;
-
rdesc++;
rdesc_paddr += sizeof(struct xgbe_ring_desc);
}
@@ -416,124 +202,10 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
- struct xgbe_ring_data *rdata)
-{
-
- if (rdata->mbuf_map != NULL)
- bus_dmamap_destroy(rdata->mbuf_dmat, rdata->mbuf_map);
-
- if (rdata->mbuf_free)
- m_freem(rdata->mb);
-
- rdata->mb = NULL;
- rdata->mbuf_free = 0;
- rdata->mbuf_hdr_paddr = 0;
- rdata->mbuf_data_paddr = 0;
- rdata->mbuf_len = 0;
-
- memset(&rdata->tx, 0, sizeof(rdata->tx));
- memset(&rdata->rx, 0, sizeof(rdata->rx));
-}
-
-struct xgbe_map_tx_skb_data {
- struct xgbe_ring *ring;
- struct xgbe_packet_data *packet;
- unsigned int cur_index;
-};
-
-static void xgbe_map_tx_skb_cb(void *callback_arg, bus_dma_segment_t *segs,
- int nseg, bus_size_t mapsize, int error)
-{
- struct xgbe_map_tx_skb_data *data;
- struct xgbe_ring_data *rdata;
- struct xgbe_ring *ring;
- int i;
-
- if (error != 0)
- return;
-
- data = callback_arg;
- ring = data->ring;
-
- for (i = 0; i < nseg; i++) {
- rdata = XGBE_GET_DESC_DATA(ring, data->cur_index);
-
- KASSERT(segs[i].ds_len <= XGBE_TX_MAX_BUF_SIZE,
- ("%s: Segment size is too large %ld > %d", __func__,
- segs[i].ds_len, XGBE_TX_MAX_BUF_SIZE));
-
- if (i == 0) {
- rdata->mbuf_dmat = ring->mbuf_dmat;
- bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
- }
-
- rdata->mbuf_hdr_paddr = 0;
- rdata->mbuf_data_paddr = segs[i].ds_addr;
- rdata->mbuf_len = segs[i].ds_len;
-
- data->packet->length += rdata->mbuf_len;
-
- data->cur_index++;
- }
-}
-
-static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct mbuf *m)
+void
+xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
{
- struct xgbe_ring *ring = channel->tx_ring;
- struct xgbe_map_tx_skb_data cbdata;
- struct xgbe_ring_data *rdata;
- struct xgbe_packet_data *packet;
- unsigned int start_index, cur_index;
- int err;
- DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
-
- start_index = ring->cur;
- cur_index = ring->cur;
-
- packet = &ring->packet_data;
- packet->rdesc_count = 0;
- packet->length = 0;
-
- cbdata.ring = ring;
- cbdata.packet = packet;
- cbdata.cur_index = cur_index;
-
- err = bus_dmamap_load_mbuf(ring->mbuf_dmat, ring->mbuf_map, m,
- xgbe_map_tx_skb_cb, &cbdata, BUS_DMA_NOWAIT);
- if (err != 0) /* TODO: Undo the mapping */
- return (-err);
-
- cur_index = cbdata.cur_index;
-
- /* Save the mbuf address in the last entry. We always have some data
- * that has been mapped so rdata is always advanced past the last
- * piece of mapped data - use the entry pointed to by cur_index - 1.
- */
- rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
- rdata->mb = m;
- rdata->mbuf_free = 1;
-
- /* Save the number of descriptor entries used */
- packet->rdesc_count = cur_index - start_index;
-
- DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
-
- return packet->rdesc_count;
-}
-
-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
-{
- DBGPR("-->xgbe_init_function_ptrs_desc\n");
-
- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
- desc_if->free_ring_resources = xgbe_free_ring_resources;
- desc_if->map_tx_skb = xgbe_map_tx_skb;
- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
- desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
-
- DBGPR("<--xgbe_init_function_ptrs_desc\n");
}
diff --git a/sys/dev/axgbe/xgbe-dev.c b/sys/dev/axgbe/xgbe-dev.c
index 3a0c65cfa7c9..86e2bd2c9b74 100644
--- a/sys/dev/axgbe/xgbe-dev.c
+++ b/sys/dev/axgbe/xgbe-dev.c
@@ -1,13 +1,13 @@
/*
* AMD 10Gb Ethernet driver
*
+ * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
+ *
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
@@ -56,9 +56,6 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
@@ -117,23 +114,22 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/param.h>
-#include <sys/kernel.h>
-
#include "xgbe.h"
#include "xgbe-common.h"
#include <net/if_dl.h>
-#include <net/if_var.h>
-static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
- unsigned int usec)
+static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
+{
+ return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+}
+
+static unsigned int
+xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec)
{
unsigned long rate;
unsigned int ret;
- DBGPR("-->xgbe_usec_to_riwt\n");
-
rate = pdata->sysclk_rate;
/*
@@ -144,19 +140,15 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
*/
ret = (usec * (rate / 1000000)) / 256;
- DBGPR("<--xgbe_usec_to_riwt\n");
-
- return ret;
+ return (ret);
}
-static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
- unsigned int riwt)
+static unsigned int
+xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt)
{
unsigned long rate;
unsigned int ret;
- DBGPR("-->xgbe_riwt_to_usec\n");
-
rate = pdata->sysclk_rate;
/*
@@ -167,214 +159,315 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
*/
ret = (riwt * 256) / (rate / 1000000);
- DBGPR("<--xgbe_riwt_to_usec\n");
-
- return ret;
+ return (ret);
}
-static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
+ unsigned int pblx8, pbl;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
- pdata->pblx8);
-
- return 0;
-}
-
-static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
-{
- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
-}
+ pblx8 = DMA_PBL_X8_DISABLE;
+ pbl = pdata->pbl;
-static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
- pdata->tx_pbl);
+ if (pdata->pbl > 32) {
+ pblx8 = DMA_PBL_X8_ENABLE;
+ pbl >>= 3;
}
- return 0;
-}
-
-static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
-{
- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
-}
-
-static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
+ for (i = 0; i < pdata->channel_count; i++) {
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
+ pblx8);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
- break;
+ if (pdata->channel[i]->tx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
+ PBL, pbl);
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
- pdata->rx_pbl);
+ if (pdata->channel[i]->rx_ring)
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
+ PBL, pbl);
}
- return 0;
+ return (0);
}
-static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
- pdata->tx_osp_mode);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
}
- return 0;
+ return (0);
}
-static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+static int
+xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
{
unsigned int i;
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
- return 0;
+ return (0);
}
-static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+static int
+xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
{
unsigned int i;
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
- return 0;
+ return (0);
}
-static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
- unsigned int val)
+static int
+xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val)
{
unsigned int i;
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
- return 0;
+ return (0);
}
-static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
- unsigned int val)
+static int
+xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val)
{
unsigned int i;
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
- return 0;
+ return (0);
}
-static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
- pdata->rx_riwt);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
}
- return 0;
+ return (0);
}
-static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
{
- return 0;
+ return (0);
}
-static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
- pdata->rx_buf_size);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
}
}
-static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ axgbe_printf(0, "Enabling TSO in channel %d\n", i);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
}
}
-static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
-static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+static int
+xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mtx_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ DELAY(1000);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mtx_unlock(&pdata->rss_mutex);
+
+ return (ret);
+}
+
+static int
+xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return (ret);
+ }
+
+ return (0);
+}
+
+static int
+xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return (ret);
+ }
+
+ return (0);
+}
+
+static int
+xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key)
{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return (xgbe_write_rss_hash_key(pdata));
+}
+
+static int
+xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return (xgbe_write_rss_lookup_table(pdata));
+}
+
+static int
+xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
if (!pdata->hw_feat.rss)
- return -EOPNOTSUPP;
+ return (-EOPNOTSUPP);
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return (ret);
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return (ret);
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ axgbe_printf(0, "RSS Enabled\n");
+
+ return (0);
+}
+
+static int
+xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return (-EOPNOTSUPP);
XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
- return 0;
+ axgbe_printf(0, "RSS Disabled\n");
+
+ return (0);
}
-static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_rss(struct xgbe_prv_data *pdata)
{
+ int ret;
if (!pdata->hw_feat.rss)
return;
- xgbe_disable_rss(pdata);
+ /* Check if the interface has RSS capability */
+ if (pdata->enable_rss)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ axgbe_error("error configuring RSS, RSS disabled\n");
}
-static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+static int
+xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
@@ -396,10 +489,11 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
reg += MAC_QTFCR_INC;
}
- return 0;
+ return (0);
}
-static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+static int
+xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
@@ -407,7 +501,18 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+ unsigned int ehfc = 0;
+
+ if (pdata->rx_rfd[i]) {
+ /* Flow control thresholds are established */
+ /* TODO - enable pfc/ets support */
+ ehfc = 1;
+ }
+
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+
+ axgbe_printf(1, "flow control %s for RXq%u\n",
+ ehfc ? "enabled" : "disabled", i);
}
/* Set MAC flow control */
@@ -419,6 +524,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
/* Enable transmit flow control */
XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+
/* Set pause time */
XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
@@ -427,102 +533,122 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
reg += MAC_QTFCR_INC;
}
- return 0;
+ return (0);
}
-static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+static int
+xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
- return 0;
+ return (0);
}
-static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+static int
+xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
- return 0;
+ return (0);
}
-static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
{
-
if (pdata->tx_pause)
xgbe_enable_tx_flow_control(pdata);
else
xgbe_disable_tx_flow_control(pdata);
- return 0;
+ return (0);
}
-static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
{
-
if (pdata->rx_pause)
xgbe_enable_rx_flow_control(pdata);
else
xgbe_disable_rx_flow_control(pdata);
- return 0;
+ return (0);
}
-static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_flow_control(struct xgbe_prv_data *pdata)
{
-
xgbe_config_tx_flow_control(pdata);
xgbe_config_rx_flow_control(pdata);
XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
}
-static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+static void
+xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
- unsigned int dma_ch_isr, dma_ch_ier;
- unsigned int i;
+ unsigned int i, ver;
+
+ /* Set the interrupt mode if supported */
+ if (pdata->channel_irq_mode)
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
+ pdata->channel_irq_mode);
+
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
/* Clear all the interrupts which are set */
- dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
+ XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
/* Clear all interrupt enable bits */
- dma_ch_ier = 0;
+ channel->curr_ier = 0;
/* Enable following interrupts
* NIE - Normal Interrupt Summary Enable
* AIE - Abnormal Interrupt Summary Enable
* FBEE - Fatal Bus Error Enable
*/
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+ if (ver < 0x21) {
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
+ } else {
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
+ }
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
if (channel->tx_ring) {
/* Enable the following Tx interrupts
* TIE - Transmit Interrupt Enable (unless using
- * per channel interrupts)
+ * per channel interrupts in edge triggered
+ * mode)
*/
- if (!pdata->per_channel_irq)
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+ XGMAC_SET_BITS(channel->curr_ier,
+ DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
* RIE - Receive Interrupt Enable (unless using
- * per channel interrupts)
+ * per channel interrupts in edge triggered
+ * mode)
*/
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
- if (!pdata->per_channel_irq)
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+ XGMAC_SET_BITS(channel->curr_ier,
+ DMA_CH_IER, RIE, 1);
}
- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
}
}
-static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+static void
+xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
{
unsigned int mtl_q_isr;
unsigned int q_count, i;
@@ -538,7 +664,8 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+static void
+xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
{
unsigned int mac_ier = 0;
@@ -550,39 +677,38 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
/* Enable all counter interrupts */
XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
-}
-
-static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
-{
- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
- return 0;
- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
-
- return 0;
+ /* Enable MDIO single command completion interrupt */
+ XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
}
-static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+static int
+xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
{
- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
- return 0;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
-
- return 0;
-}
+ unsigned int ss;
-static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
-{
- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
- return 0;
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return (-EINVAL);
+ }
- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
- return 0;
+ return (0);
}
-static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+static int
+xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{
/* Put the VLAN tag in the Rx descriptor */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
@@ -599,17 +725,23 @@ static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
/* Enable VLAN tag stripping */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
- return 0;
+ axgbe_printf(0, "VLAN Stripping Enabled\n");
+
+ return (0);
}
-static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+static int
+xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
- return 0;
+ axgbe_printf(0, "VLAN Stripping Disabled\n");
+
+ return (0);
}
-static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+static int
+xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
{
/* Enable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
@@ -631,75 +763,141 @@ static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
*/
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
- return 0;
+ axgbe_printf(0, "VLAN filtering Enabled\n");
+
+ return (0);
}
-static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+static int
+xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
{
/* Disable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
- return 0;
+ axgbe_printf(0, "VLAN filtering Disabled\n");
+
+ return (0);
+}
+
+static uint32_t
+xgbe_vid_crc32_le(__le16 vid_le)
+{
+ uint32_t crc = ~0;
+ uint32_t temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= CRC32_POLY_LE;
+ }
+
+ return (crc);
}
-static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+static int
+xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
{
- u16 vlan_hash_table = 0;
+ uint32_t crc;
+ uint16_t vid;
+ uint16_t vlan_hash_table = 0;
+ __le16 vid_le = 0;
+
+ axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__,
+ XGMAC_IOREAD(pdata, MAC_VLANHTR));
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) {
+
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x "
+ "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc,
+ vlan_hash_table);
+ }
/* Set the VLAN Hash Table filtering register */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
- return 0;
+ axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__,
+ XGMAC_IOREAD(pdata, MAC_VLANHTR));
+
+ return (0);
}
-static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
- unsigned int enable)
+static int
+xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable)
{
unsigned int val = enable ? 1 : 0;
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
- return 0;
+ return (0);
+
+ axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
/* Hardware will still perform VLAN filtering in promiscuous mode */
- xgbe_disable_rx_vlan_filtering(pdata);
+ if (enable) {
+ axgbe_printf(1, "Disabling rx vlan filtering\n");
+ xgbe_disable_rx_vlan_filtering(pdata);
+ } else {
+ if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) {
+ axgbe_printf(1, "Enabling rx vlan filtering\n");
+ xgbe_enable_rx_vlan_filtering(pdata);
+ }
+ }
- return 0;
+ return (0);
}
-static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
- unsigned int enable)
+static int
+xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable)
{
unsigned int val = enable ? 1 : 0;
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
- return 0;
+ return (0);
+ axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
- return 0;
+ return (0);
}
-static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
- char *addr, unsigned int *mac_reg)
+static void
+xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg)
{
unsigned int mac_addr_hi, mac_addr_lo;
- u8 *mac_addr;
+ uint8_t *mac_addr;
mac_addr_lo = 0;
mac_addr_hi = 0;
if (addr) {
- mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr = (uint8_t *)&mac_addr_lo;
mac_addr[0] = addr[0];
mac_addr[1] = addr[1];
mac_addr[2] = addr[2];
mac_addr[3] = addr[3];
- mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr = (uint8_t *)&mac_addr_hi;
mac_addr[0] = addr[4];
mac_addr[1] = addr[5];
+ axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg);
+
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
@@ -709,7 +907,8 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
*mac_reg += MAC_MACA_INC;
}
-static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+static void
+xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
{
unsigned int mac_reg;
unsigned int addn_macs;
@@ -725,14 +924,17 @@ static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
xgbe_set_mac_reg(pdata, NULL, &mac_reg);
}
-static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+static int
+xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
{
+ /* TODO - add support to set mac hash table */
xgbe_set_mac_addn_addrs(pdata);
- return 0;
+ return (0);
}
-static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+static int
+xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
@@ -743,27 +945,123 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
- return 0;
+ return (0);
}
-static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+static int
+xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
{
unsigned int pr_mode, am_mode;
- /* XXX */
- pr_mode = 0;
- am_mode = 0;
+ pr_mode = ((pdata->netdev->if_drv_flags & IFF_PPROMISC) != 0);
+ am_mode = ((pdata->netdev->if_drv_flags & IFF_ALLMULTI) != 0);
xgbe_set_promiscuous_mode(pdata, pr_mode);
xgbe_set_all_multicast_mode(pdata, am_mode);
xgbe_add_mac_addresses(pdata);
- return 0;
+ return (0);
+}
+
+static int
+xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
+{
+ unsigned int reg;
+
+ if (gpio > 15)
+ return (-EINVAL);
+
+ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
+
+ reg &= ~(1 << (gpio + 16));
+ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
+
+ return (0);
+}
+
+static int
+xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
+{
+ unsigned int reg;
+
+ if (gpio > 15)
+ return (-EINVAL);
+
+ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
+
+ reg |= (1 << (gpio + 16));
+ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
+
+ return (0);
+}
+
+static int
+xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
+{
+ unsigned long flags;
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+ return (mmd_data);
}
-static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static void
+xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
+ int mmd_data)
+{
+ unsigned long flags;
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+}
+
+static int
+xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
{
unsigned long flags;
unsigned int mmd_address;
@@ -784,15 +1082,16 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* offset 2 bits and reading 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
- mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
+ mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
- return mmd_data;
+ return (mmd_data);
}
-static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg, int mmd_data)
+static void
+xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
+ int mmd_data)
{
unsigned int mmd_address;
unsigned long flags;
@@ -809,34 +1108,160 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
*
* The mmio interface is based on 32-bit offsets and values. All
* register offsets must therefore be adjusted by left shifting the
- * offset 2 bits and reading 32 bits of data.
+ * offset 2 bits and writing 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
- XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
+ XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
-static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+static int
+xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
{
- return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+ switch (pdata->vdata->xpcs_access) {
+ case XGBE_XPCS_ACCESS_V1:
+ return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg));
+
+ case XGBE_XPCS_ACCESS_V2:
+ default:
+ return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg));
+ }
}
-static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+static void
+xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
+ int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case XGBE_XPCS_ACCESS_V1:
+ return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data));
+
+ case XGBE_XPCS_ACCESS_V2:
+ default:
+ return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data));
+ }
+}
+
+static unsigned int
+xgbe_create_mdio_sca(int port, int reg)
+{
+ unsigned int mdio_sca, da;
+
+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+ return (mdio_sca);
+}
+
+static int
+xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg,
+ uint16_t val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+
+ mtx_lock_spin(&pdata->mdio_mutex);
+
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) ==
+ EWOULDBLOCK) {
+ axgbe_error("%s: MDIO write error\n", __func__);
+ mtx_unlock_spin(&pdata->mdio_mutex);
+ return (-ETIMEDOUT);
+ }
+
+ mtx_unlock_spin(&pdata->mdio_mutex);
+ return (0);
+}
+
+static int
+xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+
+ mtx_lock_spin(&pdata->mdio_mutex);
+
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) ==
+ EWOULDBLOCK) {
+ axgbe_error("%s: MDIO read error\n", __func__);
+ mtx_unlock_spin(&pdata->mdio_mutex);
+ return (-ETIMEDOUT);
+ }
+
+ mtx_unlock_spin(&pdata->mdio_mutex);
+
+ return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA));
+}
+
+static int
+xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
+ enum xgbe_mdio_mode mode)
+{
+ unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
+
+ switch (mode) {
+ case XGBE_MDIO_MODE_CL22:
+ if (port > XGMAC_MAX_C22_PORT)
+ return (-EINVAL);
+ reg_val |= (1 << port);
+ break;
+ case XGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return (-EINVAL);
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return (0);
+}
+
+static int
+xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN));
+}
+
+static int
+xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
- return 0;
+ axgbe_printf(0, "Receive checksum offload Disabled\n");
+ return (0);
}
-static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+static int
+xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
{
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
- return 0;
+ axgbe_printf(0, "Receive checksum offload Enabled\n");
+ return (0);
}
-static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+static void
+xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
@@ -851,18 +1276,17 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc2 = 0;
rdesc->desc3 = 0;
- dsb(sy);
+ wmb();
}
-static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+static void
+xgbe_tx_desc_init(struct xgbe_channel *channel)
{
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
int i;
int start_index = ring->cur;
- DBGPR("-->tx_desc_init\n");
-
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
@@ -877,62 +1301,22 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Update the starting address of descriptor ring */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
- upper_32_bits(rdata->rdata_paddr));
+ upper_32_bits(rdata->rdata_paddr));
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
- lower_32_bits(rdata->rdata_paddr));
-
- DBGPR("<--tx_desc_init\n");
-}
-
-static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
- struct xgbe_ring_data *rdata, unsigned int index)
-{
- struct xgbe_ring_desc *rdesc = rdata->rdesc;
- unsigned int inte;
-
- inte = 1;
-
- /* Reset the Rx descriptor
- * Set buffer 1 (lo) address to header dma address (lo)
- * Set buffer 1 (hi) address to header dma address (hi)
- * Set buffer 2 (lo) address to buffer dma address (lo)
- * Set buffer 2 (hi) address to buffer dma address (hi) and
- * set control bits OWN and INTE
- */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_hdr_paddr));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_hdr_paddr));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
-
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
-
- dsb(sy);
-
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
-
- dsb(sy);
+ lower_32_bits(rdata->rdata_paddr));
}
-static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+static void
+xgbe_rx_desc_init(struct xgbe_channel *channel)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
unsigned int start_index = ring->cur;
- unsigned int i;
- DBGPR("-->rx_desc_init\n");
-
- /* Initialize all descriptors */
- for (i = 0; i < ring->rdesc_count; i++) {
- rdata = XGBE_GET_DESC_DATA(ring, i);
-
- /* Initialize Rx descriptor */
- xgbe_rx_desc_reset(pdata, rdata, i);
- }
-
- bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * Just set desc_count and the starting address of the desc list
+ * here. Rest will be done as part of the txrx path.
+ */
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -940,184 +1324,41 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Update the starting address of descriptor ring */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
- upper_32_bits(rdata->rdata_paddr));
+ upper_32_bits(rdata->rdata_paddr));
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
- lower_32_bits(rdata->rdata_paddr));
-
- /* Update the Rx Descriptor Tail Pointer */
- rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdata_paddr));
-
- DBGPR("<--rx_desc_init\n");
+ lower_32_bits(rdata->rdata_paddr));
}
-static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
- struct xgbe_ring *ring)
-{
- struct xgbe_ring_data *rdata;
-
- /* Issue a poll command to Tx DMA by writing address
- * of next immediate free descriptor */
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
- lower_32_bits(rdata->rdata_paddr));
-
- ring->tx.xmit_more = 0;
-}
-
-static void xgbe_dev_xmit(struct xgbe_channel *channel)
+static int
+xgbe_dev_read(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
- struct xgbe_ring *ring = channel->tx_ring;
- struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
- struct xgbe_packet_data *packet = &ring->packet_data;
- unsigned int tx_set_ic;
- int start_index = ring->cur;
- int cur_index = ring->cur;
- int i;
-
- DBGPR("-->xgbe_dev_xmit\n");
-
- /* Determine if an interrupt should be generated for this Tx:
- * Interrupt:
- * - Tx frame count exceeds the frame count setting
- * - Addition of Tx frame count to the frame count since the
- * last interrupt was set exceeds the frame count setting
- * No interrupt:
- * - No frame count setting specified (ethtool -C ethX tx-frames 0)
- * - Addition of Tx frame count to the frame count since the
- * last interrupt was set does not exceed the frame count setting
- */
- ring->coalesce_count += packet->tx_packets;
- if (!pdata->tx_frames)
- tx_set_ic = 0;
- else if (packet->tx_packets > pdata->tx_frames)
- tx_set_ic = 1;
- else if ((ring->coalesce_count % pdata->tx_frames) <
- packet->tx_packets)
- tx_set_ic = 1;
- else
- tx_set_ic = 0;
- tx_set_ic = 1;
-
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- rdesc = rdata->rdesc;
-
- /* Update buffer address (for TSO this is the header) */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
-
- /* Update the buffer length */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
- rdata->mbuf_len);
-
- /* Timestamp enablement check */
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
-
- /* Mark it as First Descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
-
- /* Mark it as a NORMAL descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-
- /* Set OWN bit if not the first descriptor */
- if (cur_index != start_index)
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
-
- /* Enable CRC and Pad Insertion */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
-
- /* Set the total length to be transmitted */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
- packet->length);
-
- for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- rdesc = rdata->rdesc;
-
- /* Update buffer address */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
-
- /* Update the buffer length */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
- rdata->mbuf_len);
-
- /* Set OWN bit */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
-
- /* Mark it as NORMAL descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
- }
-
- /* Set LAST bit for the last descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
-
- /* Set IC bit based on Tx coalescing settings */
- if (tx_set_ic)
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
-
- /* Save the Tx info to report back during cleanup */
- rdata->tx.packets = packet->tx_packets;
- rdata->tx.bytes = packet->tx_bytes;
-
- /* Sync the DMA buffers */
- bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
- BUS_DMASYNC_PREWRITE);
- bus_dmamap_sync(ring->mbuf_dmat, ring->mbuf_map,
- BUS_DMASYNC_PREWRITE);
-
- /* In case the Tx DMA engine is running, make sure everything
- * is written to the descriptor(s) before setting the OWN bit
- * for the first descriptor
- */
-
- /* Set OWN bit for the first descriptor */
- rdata = XGBE_GET_DESC_DATA(ring, start_index);
- rdesc = rdata->rdesc;
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
-
- /* Sync to ensure the OWN bit was seen */
- bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
- BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
-
- ring->cur = cur_index + 1;
- xgbe_tx_start_xmit(channel, ring);
-
- DBGPR(" %s: descriptors %u to %u written\n",
- channel->name, start_index & (ring->rdesc_count - 1),
- (ring->cur - 1) & (ring->rdesc_count - 1));
-
- DBGPR("<--xgbe_dev_xmit\n");
-}
-
-static int xgbe_dev_read(struct xgbe_channel *channel)
-{
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- unsigned int err, etlt;
+ unsigned int err, etlt, l34t;
- DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+ axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur);
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
- bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- dsb(sy);
-
/* Check for data availability */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
- return 1;
+ return (1);
+
+ rmb();
- dsb(sy);
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+ /* TODO - Timestamp Context Descriptor */
+
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT, 1);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 0);
+ return (0);
+ }
/* Normal Descriptor, be sure Context Descriptor bit is off */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
@@ -1125,159 +1366,242 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Indicate if a Context Descriptor is next */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CONTEXT_NEXT, 1);
+ CONTEXT_NEXT, 1);
/* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
- RX_NORMAL_DESC2, HL);
- }
+ RX_NORMAL_DESC2, HL);
+ if (rdata->rx.hdr_len)
+ pdata->ext_stats.rx_split_header_packets++;
+ } else
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 0);
- /* Get the packet length */
- rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4;
+ break;
+ case RX_DESC3_L34T_IPV4_UDP:
+ packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4;
+ break;
+ case RX_DESC3_L34T_IPV6_TCP:
+ packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6;
+ break;
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6;
+ break;
+ default:
+ packet->rss_hash_type = M_HASHTYPE_OPAQUE;
+ break;
+ }
+ }
+
+ /* Not all the data has been transferred for this packet */
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
- /* Not all the data has been transferred for this packet */
+ /* This is not the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 1);
- return 0;
+ LAST, 0);
+ return (0);
}
/* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 0);
+ LAST, 1);
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ /* Set checksum done indicator as appropriate */
+ /* TODO - add tunneling support */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if (etlt == 0x09) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0, OVT);
+ axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag);
+ }
+ } else {
+ unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, TNP);
- if (err && etlt) {
- if ((etlt == 0x05) || (etlt == 0x06))
+ if ((etlt == 0x05) || (etlt == 0x06)) {
+ axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n",
+ __func__, l34t, err, etlt);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 0);
+ pdata->ext_stats.rx_csum_errors++;
+ } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
+ axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n",
+ __func__, l34t, err, etlt);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CSUM_DONE, 0);
- else
+ TNPCSUM_DONE, 0);
+ pdata->ext_stats.rx_vxlan_csum_errors++;
+ } else {
+ axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n",
+ __func__, tnp, l34t, err, etlt);
+ axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n",
+ __func__, channel->queue_index,
+ XGMAC_DMA_IOREAD(channel, DMA_CH_SR),
+ XGMAC_DMA_IOREAD(channel, DMA_CH_DSR));
+ axgbe_printf(1, "%s: ring cur %d dirty %d\n",
+ __func__, ring->cur, ring->dirty);
+ axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n",
+ __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2,
+ rdesc->desc3);
XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
- FRAME, 1);
+ FRAME, 1);
+ }
}
- bus_dmamap_sync(ring->mbuf_dmat, rdata->mbuf_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
- ring->cur & (ring->rdesc_count - 1), ring->cur);
+ axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n",
+ channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur);
- return 0;
+ return (0);
}
-static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+static int
+xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
{
/* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
- return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+ return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT));
}
-static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+static int
+xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
{
/* Rx and Tx share LD bit, so check TDES3.LD bit */
- return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+ return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD));
}
-static int xgbe_enable_int(struct xgbe_channel *channel,
- enum xgbe_int int_id)
+static int
+xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id)
{
- unsigned int dma_ch_ier;
+ struct xgbe_prv_data *pdata = channel->pdata;
- dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n",
+ channel->curr_ier);
switch (int_id) {
case XGMAC_INT_DMA_CH_SR_TI:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
break;
case XGMAC_INT_DMA_CH_SR_TPS:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
break;
case XGMAC_INT_DMA_CH_SR_TBU:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
break;
case XGMAC_INT_DMA_CH_SR_RI:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
break;
case XGMAC_INT_DMA_CH_SR_RBU:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
break;
case XGMAC_INT_DMA_CH_SR_RPS:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
break;
case XGMAC_INT_DMA_CH_SR_TI_RI:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
break;
case XGMAC_INT_DMA_CH_SR_FBE:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
break;
case XGMAC_INT_DMA_ALL:
- dma_ch_ier |= channel->saved_ier;
+ channel->curr_ier |= channel->saved_ier;
break;
default:
- return -1;
+ return (-1);
}
- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+
+ axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n",
+ channel->curr_ier);
- return 0;
+ return (0);
}
-static int xgbe_disable_int(struct xgbe_channel *channel,
- enum xgbe_int int_id)
+static int
+xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id)
{
- unsigned int dma_ch_ier;
+ struct xgbe_prv_data *pdata = channel->pdata;
- dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n",
+ channel->curr_ier);
switch (int_id) {
case XGMAC_INT_DMA_CH_SR_TI:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
break;
case XGMAC_INT_DMA_CH_SR_TPS:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
break;
case XGMAC_INT_DMA_CH_SR_TBU:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
break;
case XGMAC_INT_DMA_CH_SR_RI:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
break;
case XGMAC_INT_DMA_CH_SR_RBU:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
break;
case XGMAC_INT_DMA_CH_SR_RPS:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
break;
case XGMAC_INT_DMA_CH_SR_TI_RI:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
break;
case XGMAC_INT_DMA_CH_SR_FBE:
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
break;
case XGMAC_INT_DMA_ALL:
- channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
- dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
+ channel->saved_ier = channel->curr_ier;
+ channel->curr_ier = 0;
break;
default:
- return -1;
+ return (-1);
}
- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+
+ axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n",
+ channel->curr_ier);
- return 0;
+ return (0);
}
-static int xgbe_exit(struct xgbe_prv_data *pdata)
+static int
+__xgbe_exit(struct xgbe_prv_data *pdata)
{
unsigned int count = 2000;
- DBGPR("-->xgbe_exit\n");
-
/* Issue a software reset */
XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
DELAY(10);
@@ -1287,19 +1611,35 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
DELAY(500);
if (!count)
- return -EBUSY;
+ return (-EBUSY);
- DBGPR("<--xgbe_exit\n");
+ return (0);
+}
- return 0;
+static int
+xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __xgbe_exit(pdata);
+ if (ret) {
+ axgbe_error("%s: exit error %d\n", __func__, ret);
+ return (ret);
+ }
+
+ return (__xgbe_exit(pdata));
}
-static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+static int
+xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
{
unsigned int i, count;
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
- return 0;
+ return (0);
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
@@ -1312,48 +1652,52 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
DELAY(500);
if (!count)
- return -EBUSY;
+ return (-EBUSY);
}
- return 0;
+ return (0);
}
-static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
{
+ unsigned int sbmr;
+
+ sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
+
/* Set enhanced addressing mode */
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
/* Set the System Bus mode */
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
-}
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
+ XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
-static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
-{
- unsigned int arcache, awcache;
+ XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
- arcache = 0;
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
- XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+ /* Set descriptor fetching threshold */
+ if (pdata->vdata->tx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
+ pdata->vdata->tx_desc_prefetch);
+
+ if (pdata->vdata->rx_desc_prefetch)
+ XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
+ pdata->vdata->rx_desc_prefetch);
+}
- awcache = 0;
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
- XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+static void
+xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
+ if (pdata->awarcr)
+ XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
}
-static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
{
unsigned int i;
@@ -1363,7 +1707,7 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
/* Set Tx traffic classes to use WRR algorithm with equal weights */
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_ETS);
+ MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
}
@@ -1371,56 +1715,222 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
- unsigned int queue_count)
+static void
+xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
+ unsigned int queue, unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
+ axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n",
+ __func__, queue, q_fifo_size, frame_fifo_size);
+
+ /* TODO - add pfc/ets related support */
+
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += XGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+
+ pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
+ axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__,
+ queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]);
+}
+
+static void
+xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
+ unsigned int *fifo)
{
unsigned int q_fifo_size;
- unsigned int p_fifo;
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
+
+ axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n",
+ __func__, i, fifo[i], q_fifo_size);
+ xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
- /* Calculate the configured fifo size */
- q_fifo_size = 1 << (fifo_size + 7);
+static void
+xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i,
+ pdata->rx_rfa[i], pdata->rx_rfd[i]);
+
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+
+ axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__,
+ XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR));
+ }
+}
+static unsigned int
+xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ /* The configured value may not be the actual amount of fifo RAM */
+ return (min_t(unsigned int, pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size));
+}
+
+static unsigned int
+xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
/* The configured value may not be the actual amount of fifo RAM */
- q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+ return (min_t(unsigned int, pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size));
+}
+
+static void
+xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count,
+ unsigned int *fifo)
+{
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
+ unsigned int i;
- q_fifo_size = q_fifo_size / queue_count;
+ q_fifo_size = fifo_size / queue_count;
- /* Each increment in the queue fifo size represents 256 bytes of
- * fifo, with 0 representing 256 bytes. Distribute the fifo equally
- * between the queues.
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result by 1).
*/
- p_fifo = q_fifo_size / 256;
+ p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
if (p_fifo)
p_fifo--;
- return p_fifo;
+ /* Distribute the fifo equally amongst the queues */
+ for (i = 0; i < queue_count; i++)
+ fifo[i] = p_fifo;
}
-static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+static unsigned int
+xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count,
+ unsigned int *fifo)
+{
+ unsigned int i;
+
+ MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC));
+
+ if (queue_count <= IEEE_8021QAZ_MAX_TCS)
+ return (fifo_size);
+
+ /* Rx queues 9 and up are for specialized packets,
+ * such as PTP or DCB control packets, etc. and
+ * don't require a large fifo
+ */
+ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
+ fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
+ fifo_size -= XGMAC_FIFO_MIN_ALLOC;
+ }
+
+ return (fifo_size);
+}
+
+static void
+xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
{
unsigned int fifo_size;
+ unsigned int fifo[XGBE_MAX_QUEUES];
unsigned int i;
- fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
- pdata->tx_q_count);
+ fifo_size = xgbe_get_tx_fifo_size(pdata);
+ axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size);
- for (i = 0; i < pdata->tx_q_count; i++)
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+ xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
+ axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i,
+ XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR));
+ }
+
+ axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
}
-static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
{
unsigned int fifo_size;
+ unsigned int fifo[XGBE_MAX_QUEUES];
+ unsigned int prio_queues;
unsigned int i;
- fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
- pdata->rx_q_count);
+ /* TODO - add pfc/ets related support */
- for (i = 0; i < pdata->rx_q_count; i++)
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+ /* Clear any DCB related fifo/queue information */
+ fifo_size = xgbe_get_rx_fifo_size(pdata);
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__,
+ fifo_size, pdata->rx_q_count, prio_queues);
+
+ /* Assign a minimum fifo to the non-VLAN priority queues */
+ fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
+
+ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
+ axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i,
+ XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR));
+ }
+
+ xgbe_calculate_flow_control_threshold(pdata, fifo);
+ xgbe_config_flow_control_threshold(pdata);
+
+ axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n",
+ pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
}
-static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
{
unsigned int qptc, qptc_extra, queue;
unsigned int prio_queues;
@@ -1436,21 +1946,22 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
+ axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
- Q2TCMAP, i);
+ Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
if (i < qptc_extra) {
+ axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
- Q2TCMAP, i);
+ Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
}
/* Map the 8 VLAN priority values to available MTL Rx queues */
- prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
- pdata->rx_q_count);
+ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
@@ -1459,11 +1970,13 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
+ axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
if (i < ppq_extra) {
+ axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
@@ -1494,26 +2007,21 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_mac_address(struct xgbe_prv_data *pdata)
{
- unsigned int i;
-
- for (i = 0; i < pdata->rx_q_count; i++) {
- /* Activate flow control when less than 4k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
+ xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev));
- /* De-activate flow control when more than 6k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
}
}
-static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
-{
-
- xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev));
-}
-
-static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
@@ -1522,36 +2030,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
}
-static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
{
- switch (pdata->phy_speed) {
- case SPEED_10000:
- xgbe_set_xgmii_speed(pdata);
- break;
-
- case SPEED_2500:
- xgbe_set_gmii_2500_speed(pdata);
- break;
-
- case SPEED_1000:
- xgbe_set_gmii_speed(pdata);
- break;
- case SPEED_UNKNOWN:
- break;
- default:
- panic("TODO %s:%d\n", __FILE__, __LINE__);
- }
+ xgbe_set_speed(pdata, pdata->phy_speed);
}
-static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
{
- if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM) != 0)
+ if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM))
xgbe_enable_rx_csum(pdata);
else
xgbe_disable_rx_csum(pdata);
}
-static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
{
/* Indicate that VLAN Tx CTAGs come from context descriptors */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
@@ -1560,213 +2055,246 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
/* Set the current VLAN Hash Table register value */
xgbe_update_vlan_hash_table(pdata);
- xgbe_disable_rx_vlan_filtering(pdata);
- xgbe_disable_rx_vlan_stripping(pdata);
+ if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) {
+ axgbe_printf(1, "Enabling rx vlan filtering\n");
+ xgbe_enable_rx_vlan_filtering(pdata);
+ } else {
+ axgbe_printf(1, "Disabling rx vlan filtering\n");
+ xgbe_disable_rx_vlan_filtering(pdata);
+ }
+
+ if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) {
+ axgbe_printf(1, "Enabling rx vlan stripping\n");
+ xgbe_enable_rx_vlan_stripping(pdata);
+ } else {
+ axgbe_printf(1, "Disabling rx vlan stripping\n");
+ xgbe_disable_rx_vlan_stripping(pdata);
+ }
}
-static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+static uint64_t
+xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
{
bool read_hi;
- u64 val;
-
- switch (reg_lo) {
- /* These registers are always 64 bit */
- case MMC_TXOCTETCOUNT_GB_LO:
- case MMC_TXOCTETCOUNT_G_LO:
- case MMC_RXOCTETCOUNT_GB_LO:
- case MMC_RXOCTETCOUNT_G_LO:
- read_hi = true;
- break;
+ uint64_t val;
+
+ if (pdata->vdata->mmc_64bit) {
+ switch (reg_lo) {
+ /* These registers are always 32 bit */
+ case MMC_RXRUNTERROR:
+ case MMC_RXJABBERERROR:
+ case MMC_RXUNDERSIZE_G:
+ case MMC_RXOVERSIZE_G:
+ case MMC_RXWATCHDOGERROR:
+ read_hi = false;
+ break;
- default:
- read_hi = false;
+ default:
+ read_hi = true;
+ }
+ } else {
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ }
}
val = XGMAC_IOREAD(pdata, reg_lo);
if (read_hi)
- val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+ val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
- return val;
+ return (val);
}
-static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+static void
+xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
{
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
stats->txoctetcount_gb +=
- xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
stats->txframecount_gb +=
- xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
stats->txbroadcastframes_g +=
- xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
stats->txmulticastframes_g +=
- xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
stats->tx64octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
stats->tx65to127octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
stats->tx128to255octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
stats->tx256to511octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
stats->tx512to1023octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
stats->tx1024tomaxoctets_gb +=
- xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
stats->txunicastframes_gb +=
- xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
stats->txmulticastframes_gb +=
- xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
stats->txbroadcastframes_g +=
- xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
stats->txunderflowerror +=
- xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
stats->txoctetcount_g +=
- xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
stats->txframecount_g +=
- xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
stats->txpauseframes +=
- xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
stats->txvlanframes_g +=
- xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
}
-static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+static void
+xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
{
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
stats->rxframecount_gb +=
- xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
stats->rxoctetcount_gb +=
- xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
stats->rxoctetcount_g +=
- xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
stats->rxbroadcastframes_g +=
- xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
stats->rxmulticastframes_g +=
- xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
stats->rxcrcerror +=
- xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
stats->rxrunterror +=
- xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
stats->rxjabbererror +=
- xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
stats->rxundersize_g +=
- xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
stats->rxoversize_g +=
- xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
stats->rx64octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
stats->rx65to127octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
stats->rx128to255octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
stats->rx256to511octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
stats->rx512to1023octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
stats->rx1024tomaxoctets_gb +=
- xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
stats->rxunicastframes_g +=
- xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
stats->rxlengtherror +=
- xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
stats->rxoutofrangetype +=
- xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
stats->rxpauseframes +=
- xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
stats->rxfifooverflow +=
- xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
stats->rxvlanframes_gb +=
- xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
stats->rxwatchdogerror +=
- xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
}
-static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+static void
+xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
{
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1774,133 +2302,134 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
stats->txoctetcount_gb +=
- xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
stats->txframecount_gb +=
- xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
stats->txbroadcastframes_g +=
- xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
stats->txmulticastframes_g +=
- xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
stats->tx64octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
stats->tx65to127octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
stats->tx128to255octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
stats->tx256to511octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
stats->tx512to1023octets_gb +=
- xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
stats->tx1024tomaxoctets_gb +=
- xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
stats->txunicastframes_gb +=
- xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
stats->txmulticastframes_gb +=
- xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
- stats->txbroadcastframes_g +=
- xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ stats->txbroadcastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
stats->txunderflowerror +=
- xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
stats->txoctetcount_g +=
- xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
stats->txframecount_g +=
- xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
stats->txpauseframes +=
- xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
stats->txvlanframes_g +=
- xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
stats->rxframecount_gb +=
- xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
stats->rxoctetcount_gb +=
- xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
stats->rxoctetcount_g +=
- xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
stats->rxbroadcastframes_g +=
- xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
stats->rxmulticastframes_g +=
- xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
stats->rxcrcerror +=
- xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
stats->rxrunterror +=
- xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
stats->rxjabbererror +=
- xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
stats->rxundersize_g +=
- xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
stats->rxoversize_g +=
- xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
stats->rx64octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
stats->rx65to127octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
stats->rx128to255octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
stats->rx256to511octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
stats->rx512to1023octets_gb +=
- xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
stats->rx1024tomaxoctets_gb +=
- xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
stats->rxunicastframes_g +=
- xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
stats->rxlengtherror +=
- xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
stats->rxoutofrangetype +=
- xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
stats->rxpauseframes +=
- xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
stats->rxfifooverflow +=
- xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
stats->rxvlanframes_gb +=
- xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
stats->rxwatchdogerror +=
- xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
/* Un-freeze counters */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
}
-static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+static void
+xgbe_config_mmc(struct xgbe_prv_data *pdata)
{
/* Set counters to reset on read */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
@@ -1909,20 +2438,47 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
}
-static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
- struct xgbe_channel *channel)
+static void
+xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
+ while (ticks < tx_timeout) {
+ tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ DELAY(500);
+ }
+
+ if (ticks >= tx_timeout)
+ axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
+static void
+xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
{
unsigned int tx_dsr, tx_pos, tx_qidx;
unsigned int tx_status;
unsigned long tx_timeout;
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return (xgbe_txq_prepare_tx_stop(pdata, queue));
+
/* Calculate the status register to read and the position within */
- if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
tx_dsr = DMA_DSR0;
- tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
- DMA_DSR0_TPS_START;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
} else {
- tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
@@ -1943,44 +2499,42 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
DELAY(500);
}
+
+ if (ticks >= tx_timeout)
+ axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n",
+ queue);
}
-static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+static void
+xgbe_enable_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
}
/* Enable each Tx queue */
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
- MTL_Q_ENABLED);
+ MTL_Q_ENABLED);
/* Enable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
}
-static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+static void
+xgbe_disable_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Prepare for Tx DMA channel stop */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- xgbe_prepare_tx_stop(pdata, channel);
- }
+ for (i = 0; i < pdata->tx_q_count; i++)
+ xgbe_prepare_tx_stop(pdata, i);
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -1990,17 +2544,16 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
/* Disable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
}
}
-static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
- unsigned int queue)
+static void
+xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
{
unsigned int rx_status;
unsigned long rx_timeout;
@@ -2018,20 +2571,23 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
DELAY(500);
}
+
+ if (ticks >= rx_timeout)
+ axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n",
+ queue);
}
-static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+static void
+xgbe_enable_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int reg_val, i;
/* Enable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
}
/* Enable each Rx queue */
@@ -2047,9 +2603,9 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
}
-static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+static void
+xgbe_disable_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Disable MAC Rx */
@@ -2066,101 +2622,92 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
/* Disable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
}
}
-static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+static void
+xgbe_powerup_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
}
/* Enable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
}
-static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+static void
+xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Prepare for Tx DMA channel stop */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- xgbe_prepare_tx_stop(pdata, channel);
- }
+ for (i = 0; i < pdata->tx_q_count; i++)
+ xgbe_prepare_tx_stop(pdata, i);
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
/* Disable each Tx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->tx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
}
}
-static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+static void
+xgbe_powerup_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Enable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
}
}
-static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+static void
+xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
unsigned int i;
/* Disable each Rx DMA channel */
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->rx_ring)
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
break;
- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
}
}
-static int xgbe_init(struct xgbe_prv_data *pdata)
+static int
+xgbe_init(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
int ret;
- DBGPR("-->xgbe_init\n");
-
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
- return ret;
+ if (ret) {
+ axgbe_error("error flushing TX queues\n");
+ return (ret);
+ }
/*
* Initialize DMA related features
@@ -2168,9 +2715,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_dma_bus(pdata);
xgbe_config_dma_cache(pdata);
xgbe_config_osp_mode(pdata);
- xgbe_config_pblx8(pdata);
- xgbe_config_tx_pbl_val(pdata);
- xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_pbl_val(pdata);
xgbe_config_rx_coalesce(pdata);
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
@@ -2192,7 +2737,6 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
xgbe_config_tx_fifo_size(pdata);
xgbe_config_rx_fifo_size(pdata);
- xgbe_config_flow_control_threshold(pdata);
/*TODO: Error Packet and undersized good Packet forwarding enable
(FEP and FUP)
*/
@@ -2211,14 +2755,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_mmc(pdata);
xgbe_enable_mac_interrupts(pdata);
- DBGPR("<--xgbe_init\n");
-
- return 0;
+ return (0);
}
-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+void
+xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
{
- DBGPR("-->xgbe_init_function_ptrs\n");
hw_if->tx_complete = xgbe_tx_complete;
@@ -2237,9 +2779,14 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->read_mmd_regs = xgbe_read_mmd_regs;
hw_if->write_mmd_regs = xgbe_write_mmd_regs;
- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+ hw_if->set_speed = xgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+
+ hw_if->set_gpio = xgbe_set_gpio;
+ hw_if->clr_gpio = xgbe_clr_gpio;
hw_if->enable_tx = xgbe_enable_tx;
hw_if->disable_tx = xgbe_disable_tx;
@@ -2251,7 +2798,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx;
- hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int;
@@ -2262,10 +2808,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->tx_desc_init = xgbe_tx_desc_init;
hw_if->rx_desc_init = xgbe_rx_desc_init;
hw_if->tx_desc_reset = xgbe_tx_desc_reset;
- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
hw_if->is_last_desc = xgbe_is_last_desc;
hw_if->is_context_desc = xgbe_is_context_desc;
- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
/* For FLOW ctrl */
hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
@@ -2288,20 +2832,14 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
/* For TX DMA Operating on Second Frame config */
hw_if->config_osp_mode = xgbe_config_osp_mode;
- /* For RX and TX PBL config */
- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
- hw_if->config_pblx8 = xgbe_config_pblx8;
-
/* For MMC statistics support */
hw_if->tx_mmc_int = xgbe_tx_mmc_int;
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
/* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
hw_if->disable_rss = xgbe_disable_rss;
-
- DBGPR("<--xgbe_init_function_ptrs\n");
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
}
diff --git a/sys/dev/axgbe/xgbe-drv.c b/sys/dev/axgbe/xgbe-drv.c
index 81f8f30b927a..017c3c9bc6ac 100644
--- a/sys/dev/axgbe/xgbe-drv.c
+++ b/sys/dev/axgbe/xgbe-drv.c
@@ -1,13 +1,13 @@
/*
* AMD 10Gb Ethernet driver
*
+ * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
+ *
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
@@ -56,9 +56,6 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
@@ -117,253 +114,27 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/param.h>
-#include <sys/kernel.h>
-
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_one_poll(struct xgbe_channel *channel, int budget);
-static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget);
-
-static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel_mem, *channel;
- struct xgbe_ring *tx_ring, *rx_ring;
- unsigned int count, i;
- int ret = -ENOMEM;
-
- count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
-
- channel_mem = malloc(count * sizeof(struct xgbe_channel), M_AXGBE,
- M_WAITOK | M_ZERO);
- tx_ring = malloc(pdata->tx_ring_count * sizeof(struct xgbe_ring),
- M_AXGBE, M_WAITOK | M_ZERO);
- rx_ring = malloc(pdata->rx_ring_count * sizeof(struct xgbe_ring),
- M_AXGBE, M_WAITOK | M_ZERO);
-
- for (i = 0, channel = channel_mem; i < count; i++, channel++) {
- snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
- channel->pdata = pdata;
- channel->queue_index = i;
- channel->dma_tag = rman_get_bustag(pdata->xgmac_res);
- bus_space_subregion(channel->dma_tag,
- rman_get_bushandle(pdata->xgmac_res),
- DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC,
- &channel->dma_handle);
-
- if (pdata->per_channel_irq) {
- if (pdata->chan_irq_res[i] == NULL)
- goto err_irq;
-
- channel->dma_irq_res = pdata->chan_irq_res[i];
- }
-
- if (i < pdata->tx_ring_count) {
- spin_lock_init(&tx_ring->lock);
- channel->tx_ring = tx_ring++;
- }
-
- if (i < pdata->rx_ring_count) {
- spin_lock_init(&rx_ring->lock);
- channel->rx_ring = rx_ring++;
- }
- }
-
- pdata->channel = channel_mem;
- pdata->channel_count = count;
-
- return 0;
-
-err_irq:
- free(rx_ring, M_AXGBE);
- free(tx_ring, M_AXGBE);
- free(channel_mem, M_AXGBE);
-
- return ret;
-}
-
-static void xgbe_free_channels(struct xgbe_prv_data *pdata)
-{
- if (!pdata->channel)
- return;
-
- free(pdata->channel->rx_ring, M_AXGBE);
- free(pdata->channel->tx_ring, M_AXGBE);
- free(pdata->channel, M_AXGBE);
-
- pdata->channel = NULL;
- pdata->channel_count = 0;
-}
-
-static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
-{
- return (ring->rdesc_count - (ring->cur - ring->dirty));
-}
-
-static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
-{
- return (ring->cur - ring->dirty);
-}
-
-static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
- struct xgbe_ring *ring, unsigned int count)
-{
- struct xgbe_prv_data *pdata = channel->pdata;
-
- if (count > xgbe_tx_avail_desc(ring)) {
- /* If we haven't notified the hardware because of xmit_more
- * support, tell it now
- */
- if (ring->tx.xmit_more)
- pdata->hw_if.tx_start_xmit(channel, ring);
-
- return EFBIG;
- }
-
- return 0;
-}
-
-static int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu)
+int
+xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
- if (mtu > XGMAC_JUMBO_PACKET_MTU) {
- return -EINVAL;
- }
+ if (mtu > XGMAC_JUMBO_PACKET_MTU)
+ return (-EINVAL);
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- rx_buf_size = MIN(XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
-
- rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
- ~(XGBE_RX_BUF_ALIGN - 1);
-
- return rx_buf_size;
-}
-
-static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
-{
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
- enum xgbe_int int_id;
- unsigned int i;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring && channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
- else if (channel->tx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI;
- else if (channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_RI;
- else
- continue;
+ rx_buf_size = min(max(rx_buf_size, XGBE_RX_MIN_BUF_SIZE), PAGE_SIZE);
+ rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+ ~(XGBE_RX_BUF_ALIGN - 1);
- hw_if->enable_int(channel, int_id);
- }
+ return (rx_buf_size);
}
-static void xgbe_isr(void *data)
-{
- struct xgbe_prv_data *pdata = data;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
- unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr;
- unsigned int i;
-
- /* The DMA interrupt status register also reports MAC and MTL
- * interrupts. So for polling mode, we just need to check for
- * this register to be non-zero
- */
- dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
- if (!dma_isr)
- return;
-
- for (i = 0; i < pdata->channel_count; i++) {
- if (!(dma_isr & (1 << i)))
- continue;
-
- channel = pdata->channel + i;
-
- dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
-
- /* The TI or RI interrupt bits may still be set even if using
- * per channel DMA interrupts. Check to be sure those are not
- * enabled before using the private data napi structure.
- */
- if (!pdata->per_channel_irq &&
- (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
- xgbe_all_poll(pdata, 16);
- }
-
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
- pdata->ext_stats.rx_buffer_unavailable++;
-
- /* Restart the device on a Fatal Bus Error */
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
- taskqueue_enqueue(taskqueue_thread,
- &pdata->restart_work);
-
- /* Clear all interrupt signals */
- XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
- }
-
- if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
- mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
-
- if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
- hw_if->tx_mmc_int(pdata);
-
- if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
- hw_if->rx_mmc_int(pdata);
- }
-}
-
-static void xgbe_dma_isr(void *data)
-{
- struct xgbe_channel *channel = data;
-
- xgbe_one_poll(channel, 16);
-}
-
-static void xgbe_service(void *ctx, int pending)
-{
- struct xgbe_prv_data *pdata = ctx;
-
- pdata->phy_if.phy_status(pdata);
-}
-
-static void xgbe_service_timer(void *data)
-{
- struct xgbe_prv_data *pdata = data;
-
- DBGPR("--> xgbe_service_timer\n");
- taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
-
- callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
- DBGPR("<-- xgbe_service_timer\n");
-}
-
-static void xgbe_init_timers(struct xgbe_prv_data *pdata)
-{
-
- callout_init(&pdata->service_timer, 1);
-}
-
-static void xgbe_start_timers(struct xgbe_prv_data *pdata)
-{
- callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
-}
-
-static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
-{
-
- callout_drain(&pdata->service_timer);
-}
-
-void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+void
+xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
{
unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
@@ -379,35 +150,36 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
/* Hardware feature register 0 */
- hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
- hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
- hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
- hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
- hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
- hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
- hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
- hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
- hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
- hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
- hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
- hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
ADDMACADRSEL);
- hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+ hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
/* Hardware feature register 1 */
- hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
RXFIFOSIZE);
- hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
TXFIFOSIZE);
- hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
- hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
- hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
- hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
- hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
- hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
- hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
- hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
+ hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
HASHTBLSZ);
hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
@@ -460,65 +232,91 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->tx_ch_cnt++;
hw_feat->tc_cnt++;
- DBGPR("<--xgbe_get_all_hw_features\n");
-}
-
-static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
- int ret;
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+ DBGPR("%s: Tx fifo 0x%x Rx fifo 0x%x\n", __func__,
+ hw_feat->tx_fifo_size, hw_feat->rx_fifo_size);
- ret = bus_setup_intr(pdata->dev, pdata->dev_irq_res,
- INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_isr, pdata,
- &pdata->dev_irq_tag);
- if (ret) {
- return ret;
- }
+ DBGPR("Hardware features:\n");
- if (!pdata->per_channel_irq)
- return 0;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ret = bus_setup_intr(pdata->dev, channel->dma_irq_res,
- INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_dma_isr, channel,
- &channel->dma_irq_tag);
- if (ret != 0) {
- goto err_irq;
- }
- }
-
- return 0;
-
-err_irq:
- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
- bus_teardown_intr(pdata->dev, channel->dma_irq_res,
- channel->dma_irq_tag);
-
- bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
-
- return -ret;
-}
-
-static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
+ /* Hardware feature register 0 */
+ DBGPR(" 1GbE support : %s\n",
+ hw_feat->gmii ? "yes" : "no");
+ DBGPR(" VLAN hash filter : %s\n",
+ hw_feat->vlhash ? "yes" : "no");
+ DBGPR(" MDIO interface : %s\n",
+ hw_feat->sma ? "yes" : "no");
+ DBGPR(" Wake-up packet support : %s\n",
+ hw_feat->rwk ? "yes" : "no");
+ DBGPR(" Magic packet support : %s\n",
+ hw_feat->mgk ? "yes" : "no");
+ DBGPR(" Management counters : %s\n",
+ hw_feat->mmc ? "yes" : "no");
+ DBGPR(" ARP offload : %s\n",
+ hw_feat->aoe ? "yes" : "no");
+ DBGPR(" IEEE 1588-2008 Timestamp : %s\n",
+ hw_feat->ts ? "yes" : "no");
+ DBGPR(" Energy Efficient Ethernet : %s\n",
+ hw_feat->eee ? "yes" : "no");
+ DBGPR(" TX checksum offload : %s\n",
+ hw_feat->tx_coe ? "yes" : "no");
+ DBGPR(" RX checksum offload : %s\n",
+ hw_feat->rx_coe ? "yes" : "no");
+ DBGPR(" Additional MAC addresses : %u\n",
+ hw_feat->addn_mac);
+ DBGPR(" Timestamp source : %s\n",
+ (hw_feat->ts_src == 1) ? "internal" :
+ (hw_feat->ts_src == 2) ? "external" :
+ (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
+ DBGPR(" SA/VLAN insertion : %s\n",
+ hw_feat->sa_vlan_ins ? "yes" : "no");
- if (!pdata->per_channel_irq)
- return;
+ /* Hardware feature register 1 */
+ DBGPR(" RX fifo size : %u\n",
+ hw_feat->rx_fifo_size);
+ DBGPR(" TX fifo size : %u\n",
+ hw_feat->tx_fifo_size);
+ DBGPR(" IEEE 1588 high word : %s\n",
+ hw_feat->adv_ts_hi ? "yes" : "no");
+ DBGPR(" DMA width : %u\n",
+ hw_feat->dma_width);
+ DBGPR(" Data Center Bridging : %s\n",
+ hw_feat->dcb ? "yes" : "no");
+ DBGPR(" Split header : %s\n",
+ hw_feat->sph ? "yes" : "no");
+ DBGPR(" TCP Segmentation Offload : %s\n",
+ hw_feat->tso ? "yes" : "no");
+ DBGPR(" Debug memory interface : %s\n",
+ hw_feat->dma_debug ? "yes" : "no");
+ DBGPR(" Receive Side Scaling : %s\n",
+ hw_feat->rss ? "yes" : "no");
+ DBGPR(" Traffic Class count : %u\n",
+ hw_feat->tc_cnt);
+ DBGPR(" Hash table size : %u\n",
+ hw_feat->hash_table_size);
+ DBGPR(" L3/L4 Filters : %u\n",
+ hw_feat->l3l4_filter_num);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- bus_teardown_intr(pdata->dev, channel->dma_irq_res,
- channel->dma_irq_tag);
+ /* Hardware feature register 2 */
+ DBGPR(" RX queue count : %u\n",
+ hw_feat->rx_q_cnt);
+ DBGPR(" TX queue count : %u\n",
+ hw_feat->tx_q_cnt);
+ DBGPR(" RX DMA channel count : %u\n",
+ hw_feat->rx_ch_cnt);
+ DBGPR(" TX DMA channel count : %u\n",
+ hw_feat->rx_ch_cnt);
+ DBGPR(" PPS outputs : %u\n",
+ hw_feat->pps_out_num);
+ DBGPR(" Auxiliary snapshot inputs : %u\n",
+ hw_feat->aux_snap_num);
+
+ DBGPR("<--xgbe_get_all_hw_features\n");
}
-void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+void
+xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -532,7 +330,8 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_init_tx_coalesce\n");
}
-void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+void
+xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -546,531 +345,3 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_init_rx_coalesce\n");
}
-
-static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
-{
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- struct xgbe_ring *ring;
- struct xgbe_ring_data *rdata;
- unsigned int i, j;
-
- DBGPR("-->xgbe_free_tx_data\n");
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ring = channel->tx_ring;
- if (!ring)
- break;
-
- for (j = 0; j < ring->rdesc_count; j++) {
- rdata = XGBE_GET_DESC_DATA(ring, j);
- desc_if->unmap_rdata(pdata, rdata);
- }
- }
-
- DBGPR("<--xgbe_free_tx_data\n");
-}
-
-static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
-{
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- struct xgbe_ring *ring;
- struct xgbe_ring_data *rdata;
- unsigned int i, j;
-
- DBGPR("-->xgbe_free_rx_data\n");
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- ring = channel->rx_ring;
- if (!ring)
- break;
-
- for (j = 0; j < ring->rdesc_count; j++) {
- rdata = XGBE_GET_DESC_DATA(ring, j);
- desc_if->unmap_rdata(pdata, rdata);
- }
- }
-
- DBGPR("<--xgbe_free_rx_data\n");
-}
-
-static int xgbe_phy_init(struct xgbe_prv_data *pdata)
-{
- pdata->phy_link = -1;
- pdata->phy_speed = SPEED_UNKNOWN;
-
- return pdata->phy_if.phy_reset(pdata);
-}
-
-static int xgbe_start(struct xgbe_prv_data *pdata)
-{
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_phy_if *phy_if = &pdata->phy_if;
- int ret;
-
- DBGPR("-->xgbe_start\n");
-
- hw_if->init(pdata);
-
- ret = phy_if->phy_start(pdata);
- if (ret)
- goto err_phy;
-
- ret = xgbe_request_irqs(pdata);
- if (ret)
- goto err_napi;
-
- hw_if->enable_tx(pdata);
- hw_if->enable_rx(pdata);
-
- xgbe_enable_rx_tx_ints(pdata);
-
- xgbe_start_timers(pdata);
- taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
-
- DBGPR("<--xgbe_start\n");
-
- return 0;
-
-err_napi:
- phy_if->phy_stop(pdata);
-
-err_phy:
- hw_if->exit(pdata);
-
- return ret;
-}
-
-static void xgbe_stop(struct xgbe_prv_data *pdata)
-{
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_phy_if *phy_if = &pdata->phy_if;
-
- DBGPR("-->xgbe_stop\n");
-
- xgbe_stop_timers(pdata);
- taskqueue_drain_all(pdata->dev_workqueue);
-
- hw_if->disable_tx(pdata);
- hw_if->disable_rx(pdata);
-
- xgbe_free_irqs(pdata);
-
- phy_if->phy_stop(pdata);
-
- hw_if->exit(pdata);
-
- DBGPR("<--xgbe_stop\n");
-}
-
-static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
-{
- DBGPR("-->xgbe_restart_dev\n");
-
- /* If not running, "restart" will happen on open */
- if ((pdata->netdev->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
- xgbe_stop(pdata);
-
- xgbe_free_tx_data(pdata);
- xgbe_free_rx_data(pdata);
-
- xgbe_start(pdata);
-
- DBGPR("<--xgbe_restart_dev\n");
-}
-
-static void xgbe_restart(void *ctx, int pending)
-{
- struct xgbe_prv_data *pdata = ctx;
-
- xgbe_restart_dev(pdata);
-}
-
-static void xgbe_packet_info(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring, struct mbuf *m0,
- struct xgbe_packet_data *packet)
-{
- struct mbuf *m;
- unsigned int len;
-
- packet->m = m0;
-
- packet->rdesc_count = 0;
-
- packet->tx_packets = 1;
- packet->tx_bytes = m_length(m0, NULL);
-
- for (m = m0; m != NULL; m = m->m_next) {
- for (len = m->m_len; len != 0;) {
- packet->rdesc_count++;
- len -= MIN(len, XGBE_TX_MAX_BUF_SIZE);
- }
- }
-}
-
-int xgbe_open(struct ifnet *netdev)
-{
- struct xgbe_prv_data *pdata = netdev->if_softc;
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
- int ret;
-
- DBGPR("-->xgbe_open\n");
-
- /* Initialize the phy */
- ret = xgbe_phy_init(pdata);
- if (ret)
- return ret;
-
- /* Calculate the Rx buffer size before allocating rings */
- ret = xgbe_calc_rx_buf_size(netdev, if_getmtu(netdev));
- if (ret < 0) {
- goto err_ptpclk;
- }
- pdata->rx_buf_size = ret;
-
- /* Allocate the channel and ring structures */
- ret = xgbe_alloc_channels(pdata);
- if (ret) {
- printf("xgbe_alloc_channels failed\n");
- goto err_ptpclk;
- }
-
- /* Allocate the ring descriptors and buffers */
- ret = desc_if->alloc_ring_resources(pdata);
- if (ret) {
- printf("desc_if->alloc_ring_resources failed\n");
- goto err_channels;
- }
-
- TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata);
- TASK_INIT(&pdata->restart_work, 0, xgbe_restart, pdata);
- xgbe_init_timers(pdata);
-
- ret = xgbe_start(pdata);
- if (ret)
- goto err_rings;
-
- clear_bit(XGBE_DOWN, &pdata->dev_state);
-
- DBGPR("<--xgbe_open\n");
-
- return 0;
-
-err_rings:
- desc_if->free_ring_resources(pdata);
-
-err_channels:
- xgbe_free_channels(pdata);
-
-err_ptpclk:
-
- return ret;
-}
-
-int xgbe_close(struct ifnet *netdev)
-{
- struct xgbe_prv_data *pdata = netdev->if_softc;
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
-
- DBGPR("-->xgbe_close\n");
-
- /* Stop the device */
- xgbe_stop(pdata);
-
- /* Free the ring descriptors and buffers */
- desc_if->free_ring_resources(pdata);
-
- /* Free the channel and ring structures */
- xgbe_free_channels(pdata);
-
- set_bit(XGBE_DOWN, &pdata->dev_state);
-
- DBGPR("<--xgbe_close\n");
-
- return 0;
-}
-
-int xgbe_xmit(struct ifnet *ifp, struct mbuf *m)
-{
- struct xgbe_prv_data *pdata = ifp->if_softc;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- struct xgbe_ring *ring;
- struct xgbe_packet_data *packet;
- int ret;
-
- M_ASSERTPKTHDR(m);
- MPASS(m->m_nextpkt == NULL);
-
- if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state) ||
- !pdata->phy.link)) {
- m_freem(m);
- return (ENETDOWN);
- }
-
- channel = pdata->channel;
- ring = channel->tx_ring;
- packet = &ring->packet_data;
-
- /* Calculate preliminary packet info */
- memset(packet, 0, sizeof(*packet));
- xgbe_packet_info(pdata, ring, m, packet);
-
- /* Check that there are enough descriptors available */
- ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
- if (ret)
- goto tx_netdev_return;
-
- if (!desc_if->map_tx_skb(channel, m)) {
- goto tx_netdev_return;
- }
-
- /* Configure required descriptor fields for transmission */
- hw_if->dev_xmit(channel);
-
- return 0;
-
-tx_netdev_return:
- m_free(m);
-
- return 0;
-}
-
-int xgbe_change_mtu(struct ifnet *netdev, int mtu)
-{
- struct xgbe_prv_data *pdata = netdev->if_softc;
- int ret;
-
- DBGPR("-->xgbe_change_mtu\n");
-
- ret = xgbe_calc_rx_buf_size(netdev, mtu);
- if (ret < 0)
- return -ret;
-
- pdata->rx_buf_size = ret;
- netdev->if_mtu = mtu;
-
- xgbe_restart_dev(pdata);
-
- DBGPR("<--xgbe_change_mtu\n");
-
- return 0;
-}
-
-static void xgbe_rx_refresh(struct xgbe_channel *channel)
-{
- struct xgbe_prv_data *pdata = channel->pdata;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_ring *ring = channel->rx_ring;
- struct xgbe_ring_data *rdata;
-
- while (ring->dirty != ring->cur) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
-
- /* Reset rdata values */
- desc_if->unmap_rdata(pdata, rdata);
-
- if (desc_if->map_rx_buffer(pdata, ring, rdata))
- break;
-
- hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
-
- ring->dirty++;
- }
-
- /* Make sure everything is written before the register write */
- dsb(sy);
-
- /* Update the Rx Tail Pointer Register with address of
- * the last cleaned entry */
- rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdata_paddr));
-}
-
-static int xgbe_tx_poll(struct xgbe_channel *channel)
-{
- struct xgbe_prv_data *pdata = channel->pdata;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_ring *ring = channel->tx_ring;
- struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
- int processed = 0;
- unsigned int cur;
-
- DBGPR("-->xgbe_tx_poll\n");
-
- /* Nothing to do if there isn't a Tx ring for this channel */
- if (!ring)
- return 0;
-
- cur = ring->cur;
-
- /* Be sure we get ring->cur before accessing descriptor data */
- dsb(sy);
-
- while ((processed < XGBE_TX_DESC_MAX_PROC) &&
- (ring->dirty != cur)) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
- rdesc = rdata->rdesc;
-
- if (!hw_if->tx_complete(rdesc))
- break;
-
- /* Make sure descriptor fields are read after reading the OWN
- * bit */
- dsb(sy);
-
- /* Free the SKB and reset the descriptor for re-use */
- desc_if->unmap_rdata(pdata, rdata);
- hw_if->tx_desc_reset(rdata);
-
- processed++;
- ring->dirty++;
- }
-
- if (!processed)
- return 0;
-
- DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
-
- return processed;
-}
-
-static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
-{
- struct xgbe_prv_data *pdata = channel->pdata;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_ring *ring = channel->rx_ring;
- struct xgbe_ring_data *rdata;
- struct xgbe_packet_data *packet;
- struct ifnet *ifp = pdata->netdev;
- struct mbuf *m;
- unsigned int incomplete, context_next;
- unsigned int received = 0;
- int packet_count = 0;
-
- DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
-
- /* Nothing to do if there isn't a Rx ring for this channel */
- if (!ring)
- return 0;
-
- incomplete = 0;
- context_next = 0;
-
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- packet = &ring->packet_data;
- while (packet_count < budget) {
- DBGPR(" cur = %d\n", ring->cur);
-
-read_again:
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
-
- if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
- xgbe_rx_refresh(channel);
-
- if (hw_if->dev_read(channel))
- break;
-
- m = rdata->mb;
-
- received++;
- ring->cur++;
-
- incomplete = XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES,
- INCOMPLETE);
- context_next = XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES,
- CONTEXT_NEXT);
-
- /* Earlier error, just drain the remaining data */
- if (incomplete || context_next) {
- goto read_again;
- }
-
- if (packet->errors) {
- rdata->mbuf_free = 1;
- goto next_packet;
- }
- rdata->mb = NULL;
-
- m->m_pkthdr.len = rdata->rx.hdr_len + rdata->rx.len;
- if (rdata->rx.hdr_len != 0) {
- m->m_len = rdata->rx.hdr_len;
- m->m_next->m_len = rdata->rx.len;
- } else {
- m->m_len = rdata->rx.len;
- m_freem(m->m_next);
- m->m_next = NULL;
- }
- if_setrcvif(m, ifp);
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
-
- ifp->if_input(ifp, m);
-
-next_packet:
- packet_count++;
- }
-
- DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
-
- return packet_count;
-}
-
-static int xgbe_one_poll(struct xgbe_channel *channel, int budget)
-{
- int processed = 0;
-
- DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
-
- /* Cleanup Tx ring first */
- xgbe_tx_poll(channel);
-
- /* Process Rx ring next */
- processed = xgbe_rx_poll(channel, budget);
-
- DBGPR("<--xgbe_one_poll: received = %d\n", processed);
-
- return processed;
-}
-
-static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget)
-{
- struct xgbe_channel *channel;
- int ring_budget;
- int processed, last_processed;
- unsigned int i;
-
- DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
-
- processed = 0;
- ring_budget = budget / pdata->rx_ring_count;
- do {
- last_processed = processed;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- /* Cleanup Tx ring first */
- xgbe_tx_poll(channel);
-
- /* Process Rx ring next */
- if (ring_budget > (budget - processed))
- ring_budget = budget - processed;
- processed += xgbe_rx_poll(channel, ring_budget);
- }
- } while ((processed < budget) && (processed != last_processed));
-
- DBGPR("<--xgbe_all_poll: received = %d\n", processed);
-
- return processed;
-}
diff --git a/sys/dev/axgbe/xgbe-i2c.c b/sys/dev/axgbe/xgbe-i2c.c
new file mode 100644
index 000000000000..b24d19f19e0a
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-i2c.c
@@ -0,0 +1,532 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_ABORT_COUNT 500
+#define XGBE_DISABLE_COUNT 1000
+
+#define XGBE_STD_SPEED 1
+
+#define XGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define XGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define XGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define XGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define XGBE_DEFAULT_INT_MASK (XGBE_INTR_RX_FULL | \
+ XGBE_INTR_TX_EMPTY | \
+ XGBE_INTR_TX_ABRT | \
+ XGBE_INTR_STOP_DET)
+
+#define XGBE_I2C_READ BIT(8)
+#define XGBE_I2C_STOP BIT(9)
+
+static int
+xgbe_i2c_abort(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait = XGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return (0);
+
+ DELAY(500);
+ }
+
+ return (-EBUSY);
+}
+
+static int
+xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
+{
+ unsigned int wait = XGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return (0);
+
+ DELAY(100);
+ }
+
+ return (-EBUSY);
+}
+
+static int
+xgbe_i2c_disable(struct xgbe_prv_data *pdata)
+{
+ unsigned int ret;
+
+ ret = xgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = xgbe_i2c_abort(pdata);
+ if (ret) {
+ axgbe_error("%s: i2c_abort %d\n", __func__, ret);
+ return (ret);
+ }
+
+ /* Abort succeeded, try to disable again */
+ ret = xgbe_i2c_set_enable(pdata, false);
+ }
+
+ axgbe_printf(3, "%s: final i2c_disable %d\n", __func__, ret);
+ return (ret);
+}
+
+static int
+xgbe_i2c_enable(struct xgbe_prv_data *pdata)
+{
+ return (xgbe_i2c_set_enable(pdata, true));
+}
+
+static void
+xgbe_i2c_clear_all_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void
+xgbe_i2c_disable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void
+xgbe_i2c_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, XGBE_DEFAULT_INT_MASK);
+}
+
+static void
+xgbe_i2c_write(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots, cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ axgbe_printf(3, "%s: tx_slots %d tx_len %d\n", __func__, tx_slots,
+ state->tx_len);
+
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == XGBE_I2C_CMD_READ)
+ cmd = XGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ axgbe_printf(3, "%s: cmd %d tx_len %d\n", __func__, cmd,
+ state->tx_len);
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void
+xgbe_i2c_read(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ axgbe_printf(3, "%s: op cmd %d\n", __func__, state->op->cmd);
+ if (state->op->cmd != XGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ axgbe_printf(3, "%s: rx_slots %d rx_len %d\n", __func__, rx_slots,
+ state->rx_len);
+
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void
+xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata, unsigned int isr)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & XGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & XGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static void
+xgbe_i2c_isr(void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+ axgbe_printf(3, "%s: isr 0x%x\n", __func__, isr);
+ if (!isr)
+ goto reissue_check;
+
+ axgbe_printf(3, "%s: I2C interrupt status=%#010x\n", __func__, isr);
+
+ xgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & XGBE_INTR_TX_ABRT) {
+ axgbe_printf(1, "%s: I2C TX_ABRT received (%#010x) for target "
+ "%#04x\n", __func__, state->tx_abort_source,
+ state->op->target);
+
+ xgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ xgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ xgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ axgbe_printf(3, "%s: ret %d stop %d\n", __func__, state->ret,
+ XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET));
+
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ pdata->i2c_complete = true;
+
+ return;
+
+reissue_check:
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2);
+}
+
+static void
+xgbe_i2c_set_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, XGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void
+xgbe_i2c_get_features(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+
+ axgbe_printf(3, "%s: I2C features: %s=%u, %s=%u, %s=%u\n", __func__,
+ "MAX_SPEED_MODE", i2c->max_speed_mode,
+ "RX_BUFFER_DEPTH", i2c->rx_fifo_size,
+ "TX_BUFFER_DEPTH", i2c->tx_fifo_size);
+}
+
+static void
+xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static void
+xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
+{
+ xgbe_i2c_isr(pdata);
+}
+
+static int
+xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
+{
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned long timeout;
+ int ret;
+
+ mtx_lock(&pdata->i2c_mutex);
+
+ axgbe_printf(3, "i2c xfer started ---->>>\n");
+
+ ret = xgbe_i2c_disable(pdata);
+ if (ret) {
+ axgbe_error("failed to disable i2c master\n");
+ goto out;
+ }
+
+ xgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = op->buf;
+
+ xgbe_i2c_clear_all_interrupts(pdata);
+ ret = xgbe_i2c_enable(pdata);
+ if (ret) {
+ axgbe_error("failed to enable i2c master\n");
+ goto out;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ xgbe_i2c_enable_interrupts(pdata);
+
+ timeout = ticks + (20 * hz);
+ while (ticks < timeout) {
+
+ if (!pdata->i2c_complete) {
+ DELAY(200);
+ continue;
+ }
+
+ axgbe_printf(1, "%s: I2C OP complete\n", __func__);
+ break;
+ }
+
+ if ((ticks >= timeout) && !pdata->i2c_complete) {
+ axgbe_error("%s: operation timed out\n", __func__);
+ ret = -ETIMEDOUT;
+ goto disable;
+ }
+
+ ret = state->ret;
+ axgbe_printf(3, "%s: i2c xfer ret %d abrt_source 0x%x \n", __func__,
+ ret, state->tx_abort_source);
+ if (ret) {
+
+ axgbe_error("%s: i2c xfer ret %d abrt_source 0x%x \n", __func__,
+ ret, state->tx_abort_source);
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+ axgbe_printf(3, "i2c xfer finished ---->>>\n");
+
+disable:
+ pdata->i2c_complete = false;
+ xgbe_i2c_disable_interrupts(pdata);
+ xgbe_i2c_disable(pdata);
+
+out:
+ mtx_unlock(&pdata->i2c_mutex);
+ return (ret);
+}
+
+static void
+xgbe_i2c_stop(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ axgbe_printf(3, "stopping I2C\n");
+
+ pdata->i2c.started = 0;
+
+ xgbe_i2c_disable_interrupts(pdata);
+ xgbe_i2c_disable(pdata);
+ xgbe_i2c_clear_all_interrupts(pdata);
+}
+
+static int
+xgbe_i2c_start(struct xgbe_prv_data *pdata)
+{
+ if (pdata->i2c.started)
+ return (0);
+
+ pdata->i2c.started = 1;
+
+ return (0);
+}
+
+static int
+xgbe_i2c_init(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ /* initialize lock for i2c */
+ mtx_init(&pdata->i2c_mutex, "xgbe i2c mutex lock", NULL, MTX_DEF);
+ pdata->i2c_complete = false;
+
+ xgbe_i2c_disable_interrupts(pdata);
+
+ ret = xgbe_i2c_disable(pdata);
+ if (ret) {
+ axgbe_error("failed to disable i2c master\n");
+ return (ret);
+ }
+
+ xgbe_i2c_get_features(pdata);
+
+ xgbe_i2c_set_mode(pdata);
+
+ xgbe_i2c_clear_all_interrupts(pdata);
+
+ xgbe_dump_i2c_registers(pdata);
+
+ return (0);
+}
+
+void
+xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = xgbe_i2c_init;
+
+ i2c_if->i2c_start = xgbe_i2c_start;
+ i2c_if->i2c_stop = xgbe_i2c_stop;
+
+ i2c_if->i2c_xfer = xgbe_i2c_xfer;
+
+ i2c_if->i2c_isr = xgbe_i2c_combined_isr;
+}
diff --git a/sys/dev/axgbe/xgbe-mdio.c b/sys/dev/axgbe/xgbe-mdio.c
index 850a58e04107..a716c1a7b797 100644
--- a/sys/dev/axgbe/xgbe-mdio.c
+++ b/sys/dev/axgbe/xgbe-mdio.c
@@ -1,13 +1,13 @@
/*
* AMD 10Gb Ethernet driver
*
+ * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
+ *
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
@@ -56,9 +56,6 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
@@ -117,305 +114,274 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/param.h>
-#include <sys/kernel.h>
-
#include "xgbe.h"
#include "xgbe-common.h"
static void xgbe_an_state_machine(struct xgbe_prv_data *pdata);
-static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
+static void
+xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ int reg;
- reg |= XGBE_KR_TRAINING_ENABLE;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
}
-static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
+static void
+xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
+ int reg;
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
- reg &= ~XGBE_KR_TRAINING_ENABLE;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~XGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
}
-static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
+static void
+xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ int reg;
- reg |= MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
- DELAY(75);
-
- reg &= ~MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg |= XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
}
-static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
+static void
+xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata)
{
- /* Assert Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
}
-static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+static void
+xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata)
{
- unsigned int wait;
- u16 status;
-
- /* Release Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
- /* Wait for Rx and Tx ready */
- wait = XGBE_RATECHANGE_COUNT;
- while (wait--) {
- DELAY(50);
+static void
+xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK);
+}
- status = XSIR0_IOREAD(pdata, SIR0_STATUS);
- if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
- XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- goto rx_reset;
+static void
+xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_enable_interrupts(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_enable_interrupts(pdata);
+ break;
+ default:
+ break;
}
-
-rx_reset:
- /* Perform Rx reset for the DFE changes */
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
}
-static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- /* Enable KR training */
- xgbe_an_enable_kr_training(pdata);
+ xgbe_an73_clear_interrupts(pdata);
+ xgbe_an37_clear_interrupts(pdata);
+}
+static void
+xgbe_kr_mode(struct xgbe_prv_data *pdata)
+{
/* Set MAC to 10G speed */
- pdata->hw_if.set_xgmii_speed(pdata);
-
- /* Set PCS to KR/10G speed */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- reg &= ~MDIO_PCS_CTRL2_TYPE;
- reg |= MDIO_PCS_CTRL2_10GBR;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg &= ~MDIO_CTRL1_SPEEDSEL;
- reg |= MDIO_CTRL1_SPEED10G;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
-
- xgbe_pcs_power_cycle(pdata);
-
- /* Set SerDes to 10G speed */
- xgbe_serdes_start_ratechange(pdata);
-
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
-
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
- pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
- pdata->serdes_tx_amp[XGBE_SPEED_10000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
- pdata->serdes_blwc[XGBE_SPEED_10000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
- pdata->serdes_pq_skew[XGBE_SPEED_10000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
- XRXTX_IOWRITE(pdata, RXTX_REG22,
- pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
-
- xgbe_serdes_complete_ratechange(pdata);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR);
}
-static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- /* Disable KR training */
- xgbe_an_disable_kr_training(pdata);
-
/* Set MAC to 2.5G speed */
- pdata->hw_if.set_gmii_2500_speed(pdata);
-
- /* Set PCS to KX/1G speed */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- reg &= ~MDIO_PCS_CTRL2_TYPE;
- reg |= MDIO_PCS_CTRL2_10GBX;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg &= ~MDIO_CTRL1_SPEEDSEL;
- reg |= MDIO_CTRL1_SPEED1G;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500);
+}
- xgbe_pcs_power_cycle(pdata);
+static void
+xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- /* Set SerDes to 2.5G speed */
- xgbe_serdes_start_ratechange(pdata);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000);
+}
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+static void
+xgbe_sfi_mode(struct xgbe_prv_data *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return (xgbe_kr_mode(pdata));
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
- pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
- pdata->serdes_tx_amp[XGBE_SPEED_2500]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
- pdata->serdes_blwc[XGBE_SPEED_2500]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
- pdata->serdes_pq_skew[XGBE_SPEED_2500]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
- XRXTX_IOWRITE(pdata, RXTX_REG22,
- pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
- xgbe_serdes_complete_ratechange(pdata);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI);
}
-static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_x_mode(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- /* Disable KR training */
- xgbe_an_disable_kr_training(pdata);
-
/* Set MAC to 1G speed */
- pdata->hw_if.set_gmii_speed(pdata);
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- /* Set PCS to KX/1G speed */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- reg &= ~MDIO_PCS_CTRL2_TYPE;
- reg |= MDIO_PCS_CTRL2_10GBX;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X);
+}
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg &= ~MDIO_CTRL1_SPEEDSEL;
- reg |= MDIO_CTRL1_SPEED1G;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+static void
+xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- xgbe_pcs_power_cycle(pdata);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
+}
- /* Set SerDes to 1G speed */
- xgbe_serdes_start_ratechange(pdata);
+static void
+xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100);
+}
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
- pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
- pdata->serdes_tx_amp[XGBE_SPEED_1000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
- pdata->serdes_blwc[XGBE_SPEED_1000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
- pdata->serdes_pq_skew[XGBE_SPEED_1000]);
- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
- XRXTX_IOWRITE(pdata, RXTX_REG22,
- pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+static enum xgbe_mode
+xgbe_cur_mode(struct xgbe_prv_data *pdata)
+{
+ return (pdata->phy_if.phy_impl.cur_mode(pdata));
+}
- xgbe_serdes_complete_ratechange(pdata);
+static bool
+xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+ return (xgbe_cur_mode(pdata) == XGBE_MODE_KR);
}
-static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
- enum xgbe_mode *mode)
+static void
+xgbe_change_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
- unsigned int reg;
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_kr_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_100:
+ xgbe_sgmii_100_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_1000:
+ xgbe_sgmii_1000_mode(pdata);
+ break;
+ case XGBE_MODE_X:
+ xgbe_x_mode(pdata);
+ break;
+ case XGBE_MODE_SFI:
+ xgbe_sfi_mode(pdata);
+ break;
+ case XGBE_MODE_UNKNOWN:
+ break;
+ default:
+ axgbe_error("invalid operation mode requested (%u)\n", mode);
+ }
+}
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
- if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
- *mode = XGBE_MODE_KR;
- else
- *mode = XGBE_MODE_KX;
+static void
+xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+ xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
}
-static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+static bool
+xgbe_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
- enum xgbe_mode mode;
+ if (mode == xgbe_cur_mode(pdata))
+ return (false);
- xgbe_cur_mode(pdata, &mode);
+ xgbe_change_mode(pdata, mode);
- return (mode == XGBE_MODE_KR);
+ return (true);
}
-static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+static bool
+xgbe_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
{
- /* If we are in KR switch to KX, and vice-versa */
- if (xgbe_in_kr_mode(pdata)) {
- if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
- xgbe_gmii_mode(pdata);
- else
- xgbe_gmii_2500_mode(pdata);
- } else {
- xgbe_xgmii_mode(pdata);
- }
+ return (pdata->phy_if.phy_impl.use_mode(pdata, mode));
}
-static void xgbe_set_mode(struct xgbe_prv_data *pdata,
- enum xgbe_mode mode)
+static void
+xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, bool restart)
{
- enum xgbe_mode cur_mode;
+ unsigned int reg;
- xgbe_cur_mode(pdata, &cur_mode);
- if (mode != cur_mode)
- xgbe_switch_mode(pdata);
-}
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
-static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
-{
- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
- return true;
- } else {
- if (pdata->phy.speed == SPEED_10000)
- return true;
- }
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
- return false;
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
}
-static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_an37_restart(struct xgbe_prv_data *pdata)
{
- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
- if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
- return true;
- } else {
- if (pdata->phy.speed == SPEED_2500)
- return true;
- }
-
- return false;
+ xgbe_an37_enable_interrupts(pdata);
+ xgbe_an37_set(pdata, true, true);
}
-static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+static void
+xgbe_an37_disable(struct xgbe_prv_data *pdata)
{
- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
- if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
- return true;
- } else {
- if (pdata->phy.speed == SPEED_1000)
- return true;
- }
-
- return false;
+ xgbe_an37_set(pdata, false, false);
+ xgbe_an37_disable_interrupts(pdata);
}
-static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+static void
+xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, bool restart)
{
unsigned int reg;
+ /* Disable KR training for now */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+
+ /* Update AN settings */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
reg &= ~MDIO_AN_CTRL1_ENABLE;
@@ -428,18 +394,71 @@ static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
-static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+static void
+xgbe_an73_restart(struct xgbe_prv_data *pdata)
{
- xgbe_set_an(pdata, true, true);
+ xgbe_an73_enable_interrupts(pdata);
+ xgbe_an73_set(pdata, true, true);
}
-static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+static void
+xgbe_an73_disable(struct xgbe_prv_data *pdata)
{
- xgbe_set_an(pdata, false, false);
+ xgbe_an73_set(pdata, false, false);
+ xgbe_an73_disable_interrupts(pdata);
+
+ pdata->an_start = 0;
}
-static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static void
+xgbe_an_restart(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_restart(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_restart(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+xgbe_an_disable(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_disable(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_disable(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+xgbe_an_disable_all(struct xgbe_prv_data *pdata)
+{
+ xgbe_an73_disable(pdata);
+ xgbe_an37_disable(pdata);
+}
+
+static enum xgbe_an
+xgbe_an73_tx_training(struct xgbe_prv_data *pdata, enum xgbe_rx *state)
{
unsigned int ad_reg, lp_reg, reg;
@@ -447,7 +466,7 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
/* If we're not in KR mode then we're done */
if (!xgbe_in_kr_mode(pdata))
- return XGBE_AN_PAGE_RECEIVED;
+ return (XGBE_AN_PAGE_RECEIVED);
/* Enable/Disable FEC */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
@@ -461,24 +480,25 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
/* Start KR training */
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (reg & XGBE_KR_TRAINING_ENABLE) {
- XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
- reg |= XGBE_KR_TRAINING_START;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
- reg);
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
- XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
- }
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
- return XGBE_AN_PAGE_RECEIVED;
+ return (XGBE_AN_PAGE_RECEIVED);
}
-static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an
+xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state)
{
- u16 msg;
+ uint16_t msg;
*state = XGBE_RX_XNP;
@@ -489,11 +509,11 @@ static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
- return XGBE_AN_PAGE_RECEIVED;
+ return (XGBE_AN_PAGE_RECEIVED);
}
-static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an
+xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata, enum xgbe_rx *state)
{
unsigned int link_support;
unsigned int reg, ad_reg, lp_reg;
@@ -504,20 +524,20 @@ static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
/* Check for a supported mode, otherwise restart in a different one */
link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
if (!(reg & link_support))
- return XGBE_AN_INCOMPAT_LINK;
+ return (XGBE_AN_INCOMPAT_LINK);
/* Check Extended Next Page support */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
- return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ return (((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
(lp_reg & XGBE_XNP_NP_EXCHANGE))
- ? xgbe_an_tx_xnp(pdata, state)
- : xgbe_an_tx_training(pdata, state);
+ ? xgbe_an73_tx_xnp(pdata, state)
+ : xgbe_an73_tx_training(pdata, state));
}
-static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
- enum xgbe_rx *state)
+static enum xgbe_an
+xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state)
{
unsigned int ad_reg, lp_reg;
@@ -525,13 +545,14 @@ static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
- return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ return (((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
(lp_reg & XGBE_XNP_NP_EXCHANGE))
- ? xgbe_an_tx_xnp(pdata, state)
- : xgbe_an_tx_training(pdata, state);
+ ? xgbe_an73_tx_xnp(pdata, state)
+ : xgbe_an73_tx_training(pdata, state));
}
-static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+static enum xgbe_an
+xgbe_an73_page_received(struct xgbe_prv_data *pdata)
{
enum xgbe_rx *state;
unsigned long an_timeout;
@@ -548,65 +569,95 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = ticks;
+
+ axgbe_printf(2, "CL73 AN timed out, resetting state\n");
}
}
- state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
- : &pdata->kx_state;
+ state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state : &pdata->kx_state;
switch (*state) {
case XGBE_RX_BPA:
- ret = xgbe_an_rx_bpa(pdata, state);
+ ret = xgbe_an73_rx_bpa(pdata, state);
break;
case XGBE_RX_XNP:
- ret = xgbe_an_rx_xnp(pdata, state);
+ ret = xgbe_an73_rx_xnp(pdata, state);
break;
default:
ret = XGBE_AN_ERROR;
}
- return ret;
+ return (ret);
}
-static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+static enum xgbe_an
+xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
{
/* Be sure we aren't looping trying to negotiate */
if (xgbe_in_kr_mode(pdata)) {
pdata->kr_state = XGBE_RX_ERROR;
- if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
- !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
- return XGBE_AN_NO_LINK;
+ if (!(XGBE_ADV(&pdata->phy, 1000baseKX_Full)) &&
+ !(XGBE_ADV(&pdata->phy, 2500baseX_Full)))
+ return (XGBE_AN_NO_LINK);
if (pdata->kx_state != XGBE_RX_BPA)
- return XGBE_AN_NO_LINK;
+ return (XGBE_AN_NO_LINK);
} else {
pdata->kx_state = XGBE_RX_ERROR;
- if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
- return XGBE_AN_NO_LINK;
+ if (!(XGBE_ADV(&pdata->phy, 10000baseKR_Full)))
+ return (XGBE_AN_NO_LINK);
if (pdata->kr_state != XGBE_RX_BPA)
- return XGBE_AN_NO_LINK;
+ return (XGBE_AN_NO_LINK);
}
- xgbe_disable_an(pdata);
+ xgbe_an_disable(pdata);
xgbe_switch_mode(pdata);
- xgbe_restart_an(pdata);
+ xgbe_an_restart(pdata);
- return XGBE_AN_INCOMPAT_LINK;
+ return (XGBE_AN_INCOMPAT_LINK);
}
-static void xgbe_an_isr(void *data)
+static void
+xgbe_an37_isr(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ unsigned int reg;
/* Disable AN interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ xgbe_an37_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ pdata->an_int = reg & XGBE_AN_CL37_INT_MASK;
+ pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK;
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ reg &= ~XGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+
+ xgbe_an_state_machine(pdata);
+ } else {
+ /* Enable AN interrupts */
+ xgbe_an37_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+ }
+}
+
+static void
+xgbe_an73_isr(struct xgbe_prv_data *pdata)
+{
+ /* Disable AN interrupts */
+ xgbe_an73_disable_interrupts(pdata);
/* Save the interrupt(s) that fired */
pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
@@ -618,37 +669,152 @@ static void xgbe_an_isr(void *data)
xgbe_an_state_machine(pdata);
} else {
/* Enable AN interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
- XGBE_AN_INT_MASK);
+ xgbe_an73_enable_interrupts(pdata);
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+ }
+}
+
+static void
+xgbe_an_isr_task(unsigned long data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ axgbe_printf(2, "AN interrupt received\n");
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_isr(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_isr(pdata);
+ break;
+ default:
+ break;
}
}
-static void xgbe_an_state_machine(struct xgbe_prv_data *pdata)
+static void
+xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
+{
+ xgbe_an_isr_task((unsigned long)pdata);
+}
+
+static const char *
+xgbe_state_as_string(enum xgbe_an state)
+{
+ switch (state) {
+ case XGBE_AN_READY:
+ return ("Ready");
+ case XGBE_AN_PAGE_RECEIVED:
+ return ("Page-Received");
+ case XGBE_AN_INCOMPAT_LINK:
+ return ("Incompatible-Link");
+ case XGBE_AN_COMPLETE:
+ return ("Complete");
+ case XGBE_AN_NO_LINK:
+ return ("No-Link");
+ case XGBE_AN_ERROR:
+ return ("Error");
+ default:
+ return ("Undefined");
+ }
+}
+
+static void
+xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
{
enum xgbe_an cur_state = pdata->an_state;
- sx_xlock(&pdata->an_mutex);
+ if (!pdata->an_int)
+ return;
+
+ if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT;
+
+ /* If SGMII is enabled, check the link status */
+ if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) &&
+ !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS))
+ pdata->an_state = XGBE_AN_NO_LINK;
+ }
+
+ axgbe_printf(2, "%s: CL37 AN %s\n", __func__,
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ break;
+
+ case XGBE_AN_COMPLETE:
+ axgbe_printf(2, "Auto negotiation successful\n");
+ break;
+
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_ERROR) {
+ axgbe_printf(2, "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ pdata->an_int = 0;
+ xgbe_an37_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ axgbe_printf(2, "CL37 AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ axgbe_printf(2, "%s: an_state %d an_int %d an_mode %d an_status %d\n",
+ __func__, pdata->an_state, pdata->an_int, pdata->an_mode,
+ pdata->an_status);
+
+ xgbe_an37_enable_interrupts(pdata);
+}
+
+static void
+xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_an cur_state = pdata->an_state;
if (!pdata->an_int)
goto out;
next_int:
- if (pdata->an_int & XGBE_AN_PG_RCV) {
+ if (pdata->an_int & XGBE_AN_CL73_PG_RCV) {
pdata->an_state = XGBE_AN_PAGE_RECEIVED;
- pdata->an_int &= ~XGBE_AN_PG_RCV;
- } else if (pdata->an_int & XGBE_AN_INC_LINK) {
+ pdata->an_int &= ~XGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) {
pdata->an_state = XGBE_AN_INCOMPAT_LINK;
- pdata->an_int &= ~XGBE_AN_INC_LINK;
- } else if (pdata->an_int & XGBE_AN_INT_CMPLT) {
+ pdata->an_int &= ~XGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) {
pdata->an_state = XGBE_AN_COMPLETE;
- pdata->an_int &= ~XGBE_AN_INT_CMPLT;
+ pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT;
} else {
pdata->an_state = XGBE_AN_ERROR;
}
- pdata->an_result = pdata->an_state;
-
again:
+ axgbe_printf(2, "CL73 AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
cur_state = pdata->an_state;
switch (pdata->an_state) {
@@ -657,18 +823,21 @@ again:
break;
case XGBE_AN_PAGE_RECEIVED:
- pdata->an_state = xgbe_an_page_received(pdata);
+ pdata->an_state = xgbe_an73_page_received(pdata);
pdata->an_supported++;
break;
case XGBE_AN_INCOMPAT_LINK:
pdata->an_supported = 0;
pdata->parallel_detect = 0;
- pdata->an_state = xgbe_an_incompat_link(pdata);
+ pdata->an_state = xgbe_an73_incompat_link(pdata);
break;
case XGBE_AN_COMPLETE:
pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ axgbe_printf(2, "%s successful\n",
+ pdata->an_supported ? "Auto negotiation"
+ : "Parallel detection");
break;
case XGBE_AN_NO_LINK:
@@ -680,10 +849,14 @@ again:
if (pdata->an_state == XGBE_AN_NO_LINK) {
pdata->an_int = 0;
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an73_clear_interrupts(pdata);
} else if (pdata->an_state == XGBE_AN_ERROR) {
+ axgbe_printf(2,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
pdata->an_int = 0;
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an73_clear_interrupts(pdata);
}
if (pdata->an_state >= XGBE_AN_COMPLETE) {
@@ -692,6 +865,12 @@ again:
pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
+
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ axgbe_printf(2, "CL73 AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
}
if (cur_state != pdata->an_state)
@@ -702,30 +881,119 @@ again:
out:
/* Enable AN interrupts on the way out */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
+ xgbe_an73_enable_interrupts(pdata);
+}
+
+static void
+xgbe_an_state_machine(struct xgbe_prv_data *pdata)
+{
+ sx_xlock(&pdata->an_mutex);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_state_machine(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_state_machine(pdata);
+ break;
+ default:
+ break;
+ }
+
+ /* Reissue interrupt if status is not clear */
+ if (pdata->vdata->irq_reissue_support)
+ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
sx_xunlock(&pdata->an_mutex);
}
-static void xgbe_an_init(struct xgbe_prv_data *pdata)
+static void
+xgbe_an37_init(struct xgbe_prv_data *pdata)
{
+ struct xgbe_phy local_phy;
unsigned int reg;
+ pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy);
+
+ axgbe_printf(2, "%s: advertising 0x%x\n", __func__, local_phy.advertising);
+
+ /* Set up Advertisement register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ if (XGBE_ADV(&local_phy, Pause))
+ reg |= 0x100;
+ else
+ reg &= ~0x100;
+
+ if (XGBE_ADV(&local_phy, Asym_Pause))
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ /* Full duplex, but not half */
+ reg |= XGBE_AN_CL37_FD_MASK;
+ reg &= ~XGBE_AN_CL37_HD_MASK;
+
+ axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg);
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg);
+
+ /* Set up the Control register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ axgbe_printf(2, "%s: AN_ADVERTISE reg 0x%x an_mode %d\n", __func__,
+ reg, pdata->an_mode);
+ reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK;
+ reg &= ~XGBE_AN_CL37_PCS_MODE_MASK;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL37:
+ reg |= XGBE_AN_CL37_PCS_MODE_BASEX;
+ break;
+ case XGBE_AN_MODE_CL37_SGMII:
+ reg |= XGBE_AN_CL37_PCS_MODE_SGMII;
+ break;
+ default:
+ break;
+ }
+
+ reg |= XGBE_AN_CL37_MII_CTRL_8BIT;
+ axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg);
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ axgbe_printf(2, "CL37 AN (%s) initialized\n",
+ (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+}
+
+static void
+xgbe_an73_init(struct xgbe_prv_data *pdata)
+{
+ /*
+ * This local_phy is needed because phy-v2 alters the
+ * advertising flag variable. so phy-v1 an_advertising is just copying
+ */
+ struct xgbe_phy local_phy;
+ unsigned int reg;
+
+ pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy);
+
/* Set up Advertisement register 3 first */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- reg &= ~0xc000;
+ if (XGBE_ADV(&local_phy, 10000baseR_FEC))
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
/* Set up Advertisement register 2 next */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ if (XGBE_ADV(&local_phy, 10000baseKR_Full))
reg |= 0x80;
else
reg &= ~0x80;
- if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
- (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ if (XGBE_ADV(&local_phy, 1000baseKX_Full) ||
+ XGBE_ADV(&local_phy, 2500baseX_Full))
reg |= 0x20;
else
reg &= ~0x20;
@@ -734,12 +1002,12 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
/* Set up Advertisement register 1 last */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (pdata->phy.advertising & ADVERTISED_Pause)
+ if (XGBE_ADV(&local_phy, Pause))
reg |= 0x400;
else
reg &= ~0x400;
- if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+ if (XGBE_ADV(&local_phy, Asym_Pause))
reg |= 0x800;
else
reg &= ~0x800;
@@ -748,97 +1016,239 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
reg &= ~XGBE_XNP_NP_EXCHANGE;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ axgbe_printf(2, "CL73 AN initialized\n");
+}
+
+static void
+xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ axgbe_printf(2, "%s: setting up an_mode %d\n", __func__, pdata->an_mode);
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ xgbe_an73_init(pdata);
+ break;
+ case XGBE_AN_MODE_CL37:
+ case XGBE_AN_MODE_CL37_SGMII:
+ xgbe_an37_init(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static const char *
+xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause && pdata->rx_pause)
+ return ("rx/tx");
+ else if (pdata->rx_pause)
+ return ("rx");
+ else if (pdata->tx_pause)
+ return ("tx");
+ else
+ return ("off");
+}
+
+static const char *
+xgbe_phy_speed_string(int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return ("100Mbps");
+ case SPEED_1000:
+ return ("1Gbps");
+ case SPEED_2500:
+ return ("2.5Gbps");
+ case SPEED_10000:
+ return ("10Gbps");
+ case SPEED_UNKNOWN:
+ return ("Unknown");
+ default:
+ return ("Unsupported");
+ }
+}
+
+static void
+xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.link)
+ axgbe_printf(0,
+ "Link is UP - %s/%s - flow control %s\n",
+ xgbe_phy_speed_string(pdata->phy.speed),
+ pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+ xgbe_phy_fc_string(pdata));
+ else
+ axgbe_printf(0, "Link is DOWN\n");
}
-static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+static void
+xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
{
+ int new_state = 0;
+
+ axgbe_printf(1, "link %d/%d tx %d/%d rx %d/%d speed %d/%d autoneg %d/%d\n",
+ pdata->phy_link, pdata->phy.link,
+ pdata->tx_pause, pdata->phy.tx_pause,
+ pdata->rx_pause, pdata->phy.rx_pause,
+ pdata->phy_speed, pdata->phy.speed,
+ pdata->pause_autoneg, pdata->phy.pause_autoneg);
if (pdata->phy.link) {
/* Flow control support */
pdata->pause_autoneg = pdata->phy.pause_autoneg;
if (pdata->tx_pause != pdata->phy.tx_pause) {
- pdata->hw_if.config_tx_flow_control(pdata);
+ new_state = 1;
+ axgbe_printf(2, "tx pause %d/%d\n", pdata->tx_pause,
+ pdata->phy.tx_pause);
pdata->tx_pause = pdata->phy.tx_pause;
+ pdata->hw_if.config_tx_flow_control(pdata);
}
if (pdata->rx_pause != pdata->phy.rx_pause) {
- pdata->hw_if.config_rx_flow_control(pdata);
+ new_state = 1;
+ axgbe_printf(2, "rx pause %d/%d\n", pdata->rx_pause,
+ pdata->phy.rx_pause);
pdata->rx_pause = pdata->phy.rx_pause;
+ pdata->hw_if.config_rx_flow_control(pdata);
}
/* Speed support */
if (pdata->phy_speed != pdata->phy.speed) {
+ new_state = 1;
pdata->phy_speed = pdata->phy.speed;
}
if (pdata->phy_link != pdata->phy.link) {
+ new_state = 1;
pdata->phy_link = pdata->phy.link;
}
} else if (pdata->phy_link) {
+ new_state = 1;
pdata->phy_link = 0;
pdata->phy_speed = SPEED_UNKNOWN;
}
+
+ axgbe_printf(2, "phy_link %d Link %d new_state %d\n", pdata->phy_link,
+ pdata->phy.link, new_state);
+
+ if (new_state)
+ xgbe_phy_print_status(pdata);
}
-static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+static bool
+xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
{
+ return (pdata->phy_if.phy_impl.valid_speed(pdata, speed));
+}
- /* Disable auto-negotiation */
- xgbe_disable_an(pdata);
+static int
+xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
- /* Validate/Set specified speed */
- switch (pdata->phy.speed) {
- case SPEED_10000:
- xgbe_set_mode(pdata, XGBE_MODE_KR);
- break;
+ axgbe_printf(2, "fixed PHY configuration\n");
- case SPEED_2500:
- case SPEED_1000:
- xgbe_set_mode(pdata, XGBE_MODE_KX);
+ /* Disable auto-negotiation */
+ xgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ case XGBE_MODE_KX_2500:
+ case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_100:
+ case XGBE_MODE_SGMII_1000:
+ case XGBE_MODE_X:
+ case XGBE_MODE_SFI:
break;
-
+ case XGBE_MODE_UNKNOWN:
default:
- return -EINVAL;
+ return (-EINVAL);
}
/* Validate duplex mode */
if (pdata->phy.duplex != DUPLEX_FULL)
- return -EINVAL;
+ return (-EINVAL);
- return 0;
+ xgbe_set_mode(pdata, mode);
+
+ return (0);
}
-static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+static int
+__xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
{
+ int ret;
+ unsigned int reg;
+
+ sx_xlock(&pdata->an_mutex);
+
set_bit(XGBE_LINK_INIT, &pdata->dev_state);
pdata->link_check = ticks;
- if (pdata->phy.autoneg != AUTONEG_ENABLE)
- return xgbe_phy_config_fixed(pdata);
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret) {
+ axgbe_error("%s: an_config fail %d\n", __func__, ret);
+ goto out;
+ }
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = xgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv) {
+ if (ret)
+ axgbe_error("%s: fix conf fail %d\n", __func__, ret);
+ goto out;
+ }
+
+ axgbe_printf(2, "AN redriver support\n");
+ } else
+ axgbe_printf(2, "AN PHY configuration\n");
/* Disable auto-negotiation interrupt */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK);
+ axgbe_printf(2, "%s: set_mode %d AN int reg value 0x%x\n", __func__,
+ set_mode, reg);
/* Clear any auto-negotitation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
/* Start auto-negotiation in a supported mode */
- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
- xgbe_set_mode(pdata, XGBE_MODE_KR);
- } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
- (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
- xgbe_set_mode(pdata, XGBE_MODE_KX);
- } else {
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
- return -EINVAL;
+ if (set_mode) {
+ /* Start auto-negotiation in a supported mode */
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SFI);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ xgbe_set_mode(pdata, XGBE_MODE_X);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else {
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+ ret = -EINVAL;
+ goto out;
+ }
}
/* Disable and stop any in progress auto-negotiation */
- xgbe_disable_an(pdata);
+ xgbe_an_disable_all(pdata);
/* Clear any auto-negotitation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an_clear_interrupts_all(pdata);
pdata->an_result = XGBE_AN_READY;
pdata->an_state = XGBE_AN_READY;
@@ -847,300 +1257,338 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
/* Re-enable auto-negotiation interrupt */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK);
/* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
/* Enable and start auto-negotiation */
- xgbe_restart_an(pdata);
-
- return 0;
-}
+ xgbe_an_restart(pdata);
-static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
-{
- int ret;
-
- sx_xlock(&pdata->an_mutex);
-
- ret = __xgbe_phy_config_aneg(pdata);
- if (ret)
+out:
+ if (ret) {
+ axgbe_printf(0, "%s: set_mode %d AN int reg value 0x%x ret value %d\n",
+ __func__, set_mode, reg, ret);
set_bit(XGBE_LINK_ERR, &pdata->dev_state);
- else
+ } else
clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
sx_unlock(&pdata->an_mutex);
- return ret;
+ return (ret);
+}
+
+static int
+xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ return (__xgbe_phy_config_aneg(pdata, true));
+}
+
+static int
+xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata)
+{
+ return (__xgbe_phy_config_aneg(pdata, false));
}
-static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+static bool
+xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
{
return (pdata->an_result == XGBE_AN_COMPLETE);
}
-static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+static void
+xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz);
- if ((int)(ticks - link_timeout) >= 0) {
+ if ((int)(ticks - link_timeout) > 0) {
+ axgbe_printf(2, "AN link timeout\n");
xgbe_phy_config_aneg(pdata);
}
}
-static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+static enum xgbe_mode
+xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
{
- if (xgbe_in_kr_mode(pdata)) {
- pdata->phy.speed = SPEED_10000;
- } else {
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.speed = SPEED_1000;
- break;
-
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.speed = SPEED_2500;
- break;
- }
- }
- pdata->phy.duplex = DUPLEX_FULL;
+ return (pdata->phy_if.phy_impl.an_outcome(pdata));
}
-static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+static void
+xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
- unsigned int ad_reg, lp_reg;
+ enum xgbe_mode mode;
- pdata->phy.lp_advertising = 0;
+ XGBE_ZERO_LP_ADV(&pdata->phy);
if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
- return xgbe_phy_status_force(pdata);
-
- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
- pdata->phy.lp_advertising |= ADVERTISED_Backplane;
-
- /* Compare Advertisement and Link Partner register 1 */
- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
- if (lp_reg & 0x400)
- pdata->phy.lp_advertising |= ADVERTISED_Pause;
- if (lp_reg & 0x800)
- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
-
- if (pdata->phy.pause_autoneg) {
- /* Set flow control based on auto-negotiation result */
- pdata->phy.tx_pause = 0;
- pdata->phy.rx_pause = 0;
-
- if (ad_reg & lp_reg & 0x400) {
- pdata->phy.tx_pause = 1;
- pdata->phy.rx_pause = 1;
- } else if (ad_reg & lp_reg & 0x800) {
- if (ad_reg & 0x400)
- pdata->phy.rx_pause = 1;
- else if (lp_reg & 0x400)
- pdata->phy.tx_pause = 1;
- }
- }
-
- /* Compare Advertisement and Link Partner register 2 */
- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
- if (lp_reg & 0x80)
- pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
- if (lp_reg & 0x20) {
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
- break;
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
- break;
- }
- }
+ mode = xgbe_cur_mode(pdata);
+ else
+ mode = xgbe_phy_status_aneg(pdata);
- ad_reg &= lp_reg;
- if (ad_reg & 0x80) {
+ axgbe_printf(3, "%s: xgbe mode %d\n", __func__, mode);
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case XGBE_MODE_X:
+ case XGBE_MODE_KX_1000:
+ case XGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case XGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case XGBE_MODE_KR:
+ case XGBE_MODE_SFI:
pdata->phy.speed = SPEED_10000;
- xgbe_set_mode(pdata, XGBE_MODE_KR);
- } else if (ad_reg & 0x20) {
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.speed = SPEED_1000;
- break;
-
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.speed = SPEED_2500;
- break;
- }
-
- xgbe_set_mode(pdata, XGBE_MODE_KX);
- } else {
+ break;
+ case XGBE_MODE_UNKNOWN:
+ default:
+ axgbe_printf(1, "%s: unknown mode\n", __func__);
pdata->phy.speed = SPEED_UNKNOWN;
}
- /* Compare Advertisement and Link Partner register 3 */
- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ pdata->phy.duplex = DUPLEX_FULL;
+ axgbe_printf(2, "%s: speed %d duplex %d\n", __func__, pdata->phy.speed,
+ pdata->phy.duplex);
+
+ if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+ xgbe_phy_reconfig_aneg(pdata);
}
-static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+static void
+xgbe_phy_status(struct xgbe_prv_data *pdata)
{
- unsigned int reg, link_aneg;
+ bool link_aneg;
+ int an_restart;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ axgbe_error("%s: LINK_ERR\n", __func__);
pdata->phy.link = 0;
goto adjust_link;
}
link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+ axgbe_printf(3, "link_aneg - %d\n", link_aneg);
/* Get the link status. Link status is latched low, so read
* once to clear and then read again to get current state
*/
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+
+ axgbe_printf(1, "link_status returned Link:%d an_restart:%d aneg:%d\n",
+ pdata->phy.link, an_restart, link_aneg);
+
+ if (an_restart) {
+ xgbe_phy_config_aneg(pdata);
+ return;
+ }
if (pdata->phy.link) {
+ axgbe_printf(2, "Link Active\n");
if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+ axgbe_printf(1, "phy_link set check timeout\n");
xgbe_check_link_timeout(pdata);
return;
}
- xgbe_phy_status_aneg(pdata);
+ axgbe_printf(2, "%s: Link write phy_status result\n", __func__);
+ xgbe_phy_status_result(pdata);
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
} else {
+ axgbe_printf(2, "Link Deactive\n");
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ axgbe_printf(1, "phy_link not set check timeout\n");
xgbe_check_link_timeout(pdata);
- if (link_aneg)
+ if (link_aneg) {
+ axgbe_printf(2, "link_aneg case\n");
return;
+ }
}
- xgbe_phy_status_aneg(pdata);
+ xgbe_phy_status_result(pdata);
+
}
adjust_link:
+ axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link);
xgbe_phy_adjust_link(pdata);
}
-static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+static void
+xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
+ axgbe_printf(2, "stopping PHY\n");
- /* Disable auto-negotiation */
- xgbe_disable_an(pdata);
+ if (!pdata->phy_started)
+ return;
- /* Disable auto-negotiation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+
+ /* Disable auto-negotiation */
+ xgbe_an_disable_all(pdata);
- bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
+ pdata->phy_if.phy_impl.stop(pdata);
pdata->phy.link = 0;
xgbe_phy_adjust_link(pdata);
}
-static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+static int
+xgbe_phy_start(struct xgbe_prv_data *pdata)
{
int ret;
- ret = bus_setup_intr(pdata->dev, pdata->an_irq_res,
- INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_an_isr, pdata,
- &pdata->an_irq_tag);
+ DBGPR("-->xgbe_phy_start\n");
+
+ ret = pdata->phy_if.phy_impl.start(pdata);
if (ret) {
- return -ret;
+ axgbe_error("%s: impl start ret %d\n", __func__, ret);
+ return (ret);
}
/* Set initial mode - call the mode setting routines
* directly to insure we are properly configured
*/
- if (xgbe_use_xgmii_mode(pdata)) {
- xgbe_xgmii_mode(pdata);
- } else if (xgbe_use_gmii_mode(pdata)) {
- xgbe_gmii_mode(pdata);
- } else if (xgbe_use_gmii_2500_mode(pdata)) {
- xgbe_gmii_2500_mode(pdata);
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ axgbe_printf(2, "%s: KR\n", __func__);
+ xgbe_kr_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ axgbe_printf(2, "%s: KX 2500\n", __func__);
+ xgbe_kx_2500_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ axgbe_printf(2, "%s: KX 1000\n", __func__);
+ xgbe_kx_1000_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ axgbe_printf(2, "%s: SFI\n", __func__);
+ xgbe_sfi_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ axgbe_printf(2, "%s: X\n", __func__);
+ xgbe_x_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ axgbe_printf(2, "%s: SGMII 1000\n", __func__);
+ xgbe_sgmii_1000_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ axgbe_printf(2, "%s: SGMII 100\n", __func__);
+ xgbe_sgmii_100_mode(pdata);
} else {
+ axgbe_error("%s: invalid mode\n", __func__);
ret = -EINVAL;
- goto err_irq;
+ goto err_stop;
}
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
+
/* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
/* Enable auto-negotiation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+ xgbe_an_enable_interrupts(pdata);
- return xgbe_phy_config_aneg(pdata);
+ ret = xgbe_phy_config_aneg(pdata);
+ if (ret)
+ axgbe_error("%s: phy_config_aneg %d\n", __func__, ret);
+
+ return (ret);
-err_irq:
- bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
- return ret;
+ return (ret);
}
-static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+static int
+xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- unsigned int count, reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- reg |= MDIO_CTRL1_RESET;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
-
- count = 50;
- do {
- DELAY(20);
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- } while ((reg & MDIO_CTRL1_RESET) && --count);
+ int ret;
- if (reg & MDIO_CTRL1_RESET)
- return -ETIMEDOUT;
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret) {
+ axgbe_error("%s: impl phy reset %d\n", __func__, ret);
+ return (ret);
+ }
/* Disable auto-negotiation for now */
- xgbe_disable_an(pdata);
+ xgbe_an_disable_all(pdata);
/* Clear auto-negotiation interrupts */
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ xgbe_an_clear_interrupts_all(pdata);
- return 0;
+ return (0);
}
-static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+static int
+xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
{
+
+ if (XGBE_ADV(&pdata->phy, 10000baseKR_Full))
+ return (SPEED_10000);
+ else if (XGBE_ADV(&pdata->phy, 10000baseT_Full))
+ return (SPEED_10000);
+ else if (XGBE_ADV(&pdata->phy, 2500baseX_Full))
+ return (SPEED_2500);
+ else if (XGBE_ADV(&pdata->phy, 2500baseT_Full))
+ return (SPEED_2500);
+ else if (XGBE_ADV(&pdata->phy, 1000baseKX_Full))
+ return (SPEED_1000);
+ else if (XGBE_ADV(&pdata->phy, 1000baseT_Full))
+ return (SPEED_1000);
+ else if (XGBE_ADV(&pdata->phy, 100baseT_Full))
+ return (SPEED_100);
+
+ return (SPEED_UNKNOWN);
+}
+
+static void
+xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ pdata->phy_if.phy_impl.exit(pdata);
+}
+
+static int
+xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ int ret = 0;
+
+ DBGPR("-->xgbe_phy_init\n");
+
sx_init(&pdata->an_mutex, "axgbe AN lock");
pdata->mdio_mmd = MDIO_MMD_PCS;
/* Initialize supported features */
- pdata->phy.supported = SUPPORTED_Autoneg;
- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- pdata->phy.supported |= SUPPORTED_Backplane;
- pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
- break;
- case XGBE_SPEEDSET_2500_10000:
- pdata->phy.supported |= SUPPORTED_2500baseX_Full;
- break;
- }
-
pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
MDIO_PMA_10GBR_FECABLE);
pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
MDIO_PMA_10GBR_FECABLE_ERRABLE);
- if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
- pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
- pdata->phy.advertising = pdata->phy.supported;
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return (ret);
+
+ /* Copy supported link modes to advertising link modes */
+ XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported);
pdata->phy.address = 0;
- pdata->phy.autoneg = AUTONEG_ENABLE;
- pdata->phy.speed = SPEED_UNKNOWN;
- pdata->phy.duplex = DUPLEX_UNKNOWN;
+ if (XGBE_ADV(&pdata->phy, Autoneg)) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
pdata->phy.link = 0;
@@ -1149,26 +1597,38 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.rx_pause = pdata->rx_pause;
/* Fix up Flow Control advertising */
- pdata->phy.advertising &= ~ADVERTISED_Pause;
- pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+ XGBE_CLR_ADV(&pdata->phy, Pause);
+ XGBE_CLR_ADV(&pdata->phy, Asym_Pause);
if (pdata->rx_pause) {
- pdata->phy.advertising |= ADVERTISED_Pause;
- pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ XGBE_SET_ADV(&pdata->phy, Pause);
+ XGBE_SET_ADV(&pdata->phy, Asym_Pause);
+ }
+
+ if (pdata->tx_pause) {
+ if (XGBE_ADV(&pdata->phy, Asym_Pause))
+ XGBE_CLR_ADV(&pdata->phy, Asym_Pause);
+ else
+ XGBE_SET_ADV(&pdata->phy, Asym_Pause);
}
- if (pdata->tx_pause)
- pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+ return (0);
}
-void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+void
+xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
{
- phy_if->phy_init = xgbe_phy_init;
+ phy_if->phy_init = xgbe_phy_init;
+ phy_if->phy_exit = xgbe_phy_exit;
phy_if->phy_reset = xgbe_phy_reset;
phy_if->phy_start = xgbe_phy_start;
- phy_if->phy_stop = xgbe_phy_stop;
+ phy_if->phy_stop = xgbe_phy_stop;
phy_if->phy_status = xgbe_phy_status;
phy_if->phy_config_aneg = xgbe_phy_config_aneg;
+
+ phy_if->phy_valid_speed = xgbe_phy_valid_speed;
+
+ phy_if->an_isr = xgbe_an_combined_isr;
}
diff --git a/sys/dev/axgbe/xgbe-phy-v1.c b/sys/dev/axgbe/xgbe-phy-v1.c
new file mode 100644
index 000000000000..7bfb20de23aa
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-phy-v1.c
@@ -0,0 +1,707 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+struct xgbe_phy_data {
+ /* 1000/10000 vs 2500/10000 indicator */
+ unsigned int speed_set;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ uint32_t blwc[XGBE_SPEEDS];
+ uint32_t cdr_rate[XGBE_SPEEDS];
+ uint32_t pq_skew[XGBE_SPEEDS];
+ uint32_t tx_amp[XGBE_SPEEDS];
+ uint32_t dfe_tap_cfg[XGBE_SPEEDS];
+ uint32_t dfe_tap_ena[XGBE_SPEEDS];
+};
+
+static void
+xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+}
+
+static void
+xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+}
+
+static enum xgbe_mode
+xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(&pdata->phy, Autoneg);
+ XGBE_SET_LP_ADV(&pdata->phy, Backplane);
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ XGBE_SET_LP_ADV(&pdata->phy, Pause);
+ if (lp_reg & 0x800)
+ XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause);
+
+ axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n",
+ __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full);
+ if (lp_reg & 0x20) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ XGBE_SET_LP_ADV(&pdata->phy, 2500baseX_Full);
+ else
+ XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full);
+ }
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ pdata->phy.speed = SPEED_10000;
+ mode = XGBE_MODE_KR;
+ } else if (ad_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ mode = XGBE_MODE_KX_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ mode = XGBE_MODE_KX_2500;
+ break;
+ }
+ } else {
+ mode = XGBE_MODE_UNKNOWN;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC);
+
+ return (mode);
+}
+
+static void
+xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, struct xgbe_phy *dphy)
+{
+ XGBE_LM_COPY(dphy, advertising, &pdata->phy, advertising);
+}
+
+static int
+xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for an configuration */
+ return (0);
+}
+
+static enum xgbe_an_mode
+xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
+{
+ return (XGBE_AN_MODE_CL73);
+}
+
+static void
+xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+
+ reg |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ DELAY(75);
+
+ reg &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+}
+
+static void
+xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
+{
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void
+xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+ uint16_t status;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ DELAY(50);
+
+ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+ goto rx_reset;
+ }
+
+ axgbe_printf(2, "SerDes rx/tx not ready (%#hx)\n", status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void
+xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KR/10G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 10G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ axgbe_printf(2, "10GbE KR mode set\n");
+}
+
+static void
+xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 2.5G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ axgbe_printf(2, "2.5GbE KX mode set\n");
+}
+
+static void
+xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_phy_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 1G speed */
+ xgbe_phy_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ phy_data->cdr_rate[XGBE_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ phy_data->tx_amp[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ phy_data->blwc[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ phy_data->pq_skew[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ phy_data->dfe_tap_cfg[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ phy_data->dfe_tap_ena[XGBE_SPEED_1000]);
+
+ xgbe_phy_complete_ratechange(pdata);
+
+ axgbe_printf(2, "1GbE KX mode set\n");
+}
+
+static enum xgbe_mode
+xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= MDIO_PCS_CTRL2_TYPE;
+
+ if (reg == MDIO_PCS_CTRL2_10GBR) {
+ mode = XGBE_MODE_KR;
+ } else {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ }
+
+ return (mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+
+ /* If we are in KR switch to KX, and vice-versa */
+ if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ mode = XGBE_MODE_KX_2500;
+ else
+ mode = XGBE_MODE_KX_1000;
+ } else {
+ mode = XGBE_MODE_KR;
+ }
+
+ return (mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_get_mode(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (speed) {
+ case SPEED_1000:
+ return ((phy_data->speed_set == XGBE_SPEEDSET_1000_10000)
+ ? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN);
+ case SPEED_2500:
+ return ((phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+ ? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN);
+ case SPEED_10000:
+ return (XGBE_MODE_KR);
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static void
+xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_phy_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_phy_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_phy_kr_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+xgbe_phy_get_type(struct xgbe_prv_data *pdata, struct ifmediareq * ifmr)
+{
+
+ switch (pdata->phy.speed) {
+ case SPEED_10000:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case SPEED_2500:
+ ifmr->ifm_active |= IFM_2500_KX;
+ break;
+ case SPEED_1000:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ default:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ }
+}
+
+static bool
+xgbe_phy_check_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode, bool advert)
+{
+
+ if (pdata->phy.autoneg == AUTONEG_ENABLE)
+ return (advert);
+ else {
+ enum xgbe_mode cur_mode;
+
+ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return (true);
+ }
+
+ return (false);
+}
+
+static bool
+xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 1000baseKX_Full)));
+ case XGBE_MODE_KX_2500:
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 2500baseX_Full)));
+ case XGBE_MODE_KR:
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 10000baseKR_Full)));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (speed) {
+ case SPEED_1000:
+ if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000)
+ return (false);
+ return (true);
+ case SPEED_2500:
+ if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000)
+ return (false);
+ return (true);
+ case SPEED_10000:
+ return (true);
+ default:
+ return (false);
+ }
+}
+
+static int
+xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+{
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+
+ return ((reg & MDIO_STAT1_LSTATUS) ? 1 : 0);
+}
+
+static void
+xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for stop */
+}
+
+static int
+xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for start */
+ return (0);
+}
+
+static int
+xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, count;
+
+ /* Perform a software reset of the PCS */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ count = 50;
+ do {
+ DELAY(20);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+ if (reg & MDIO_CTRL1_RESET)
+ return (-ETIMEDOUT);
+
+ return (0);
+}
+
+static void
+xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ /* Nothing uniquely required for exit */
+}
+
+static int
+xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data;
+
+ phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO);
+
+ /* Initialize supported features */
+ XGBE_ZERO_SUP(&pdata->phy);
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, Backplane);
+ XGBE_SET_SUP(&pdata->phy, 10000baseKR_Full);
+ switch (phy_data->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ XGBE_SET_SUP(&pdata->phy, 1000baseKX_Full);
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ XGBE_SET_SUP(&pdata->phy, 2500baseX_Full);
+ break;
+ }
+
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC);
+
+ pdata->phy_data = phy_data;
+
+ return (0);
+}
+
+void
+xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if)
+{
+ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = xgbe_phy_init;
+ phy_impl->exit = xgbe_phy_exit;
+
+ phy_impl->reset = xgbe_phy_reset;
+ phy_impl->start = xgbe_phy_start;
+ phy_impl->stop = xgbe_phy_stop;
+
+ phy_impl->link_status = xgbe_phy_link_status;
+
+ phy_impl->valid_speed = xgbe_phy_valid_speed;
+
+ phy_impl->use_mode = xgbe_phy_use_mode;
+ phy_impl->set_mode = xgbe_phy_set_mode;
+ phy_impl->get_mode = xgbe_phy_get_mode;
+ phy_impl->switch_mode = xgbe_phy_switch_mode;
+ phy_impl->cur_mode = xgbe_phy_cur_mode;
+ phy_impl->get_type = xgbe_phy_get_type;
+
+ phy_impl->an_mode = xgbe_phy_an_mode;
+
+ phy_impl->an_config = xgbe_phy_an_config;
+
+ phy_impl->an_advertising = xgbe_phy_an_advertising;
+
+ phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
+}
diff --git a/sys/dev/axgbe/xgbe-phy-v2.c b/sys/dev/axgbe/xgbe-phy-v2.c
new file mode 100644
index 000000000000..8039909df057
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-phy-v2.c
@@ -0,0 +1,3771 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+struct mtx xgbe_phy_comm_lock;
+
+#define XGBE_PHY_PORT_SPEED_100 BIT(0)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define XGBE_MUTEX_RELEASE 0x80000000
+
+#define XGBE_SFP_DIRECT 7
+#define GPIO_MASK_WIDTH 4
+
+/* I2C target addresses */
+#define XGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define XGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define XGBE_SFP_PHY_ADDRESS 0x56
+#define XGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define XGBE_GPIO_NO_TX_FAULT BIT(0)
+#define XGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define XGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define XGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+/* CDR delay values for KR support (in usec) */
+#define XGBE_CDR_DELAY_INIT 10000
+#define XGBE_CDR_DELAY_INC 10000
+#define XGBE_CDR_DELAY_MAX 100000
+
+/* RRC frequency during link status check */
+#define XGBE_RRC_FREQUENCY 10
+
+enum xgbe_port_mode {
+ XGBE_PORT_MODE_RSVD = 0,
+ XGBE_PORT_MODE_BACKPLANE,
+ XGBE_PORT_MODE_BACKPLANE_2500,
+ XGBE_PORT_MODE_1000BASE_T,
+ XGBE_PORT_MODE_1000BASE_X,
+ XGBE_PORT_MODE_NBASE_T,
+ XGBE_PORT_MODE_10GBASE_T,
+ XGBE_PORT_MODE_10GBASE_R,
+ XGBE_PORT_MODE_SFP,
+ XGBE_PORT_MODE_MAX,
+};
+
+enum xgbe_conn_type {
+ XGBE_CONN_TYPE_NONE = 0,
+ XGBE_CONN_TYPE_SFP,
+ XGBE_CONN_TYPE_MDIO,
+ XGBE_CONN_TYPE_RSVD1,
+ XGBE_CONN_TYPE_BACKPLANE,
+ XGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum xgbe_sfp_comm {
+ XGBE_SFP_COMM_DIRECT = 0,
+ XGBE_SFP_COMM_PCA9545,
+};
+
+enum xgbe_sfp_cable {
+ XGBE_SFP_CABLE_UNKNOWN = 0,
+ XGBE_SFP_CABLE_ACTIVE,
+ XGBE_SFP_CABLE_PASSIVE,
+};
+
+enum xgbe_sfp_base {
+ XGBE_SFP_BASE_UNKNOWN = 0,
+ XGBE_SFP_BASE_1000_T,
+ XGBE_SFP_BASE_1000_SX,
+ XGBE_SFP_BASE_1000_LX,
+ XGBE_SFP_BASE_1000_CX,
+ XGBE_SFP_BASE_10000_SR,
+ XGBE_SFP_BASE_10000_LR,
+ XGBE_SFP_BASE_10000_LRM,
+ XGBE_SFP_BASE_10000_ER,
+ XGBE_SFP_BASE_10000_CR,
+};
+
+enum xgbe_sfp_speed {
+ XGBE_SFP_SPEED_UNKNOWN = 0,
+ XGBE_SFP_SPEED_100_1000,
+ XGBE_SFP_SPEED_1000,
+ XGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define XGBE_SFP_BASE_ID 0
+#define XGBE_SFP_ID_SFP 0x03
+
+#define XGBE_SFP_BASE_EXT_ID 1
+#define XGBE_SFP_EXT_ID_SFP 0x04
+
+#define XGBE_SFP_BASE_10GBE_CC 3
+#define XGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define XGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define XGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define XGBE_SFP_BASE_1GBE_CC 6
+#define XGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define XGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define XGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define XGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define XGBE_SFP_BASE_CABLE 8
+#define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define XGBE_SFP_BASE_BR 12
+#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
+#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
+
+#define XGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define XGBE_SFP_BASE_VENDOR_NAME 20
+#define XGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define XGBE_SFP_BASE_VENDOR_PN 40
+#define XGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define XGBE_SFP_BASE_VENDOR_REV 56
+#define XGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define XGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define XGBE_SFP_BASE_VENDOR_SN 4
+#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define XGBE_SFP_EXTD_OPT1 1
+#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
+#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
+
+#define XGBE_SFP_EXTD_DIAG 28
+#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define XGBE_SFP_EXTD_SFF_8472 30
+
+#define XGBE_SFP_EXTD_CC 31
+
+struct xgbe_sfp_eeprom {
+ uint8_t base[64];
+ uint8_t extd[32];
+ uint8_t vendor[32];
+};
+
+#define XGBE_SFP_DIAGS_SUPPORTED(_x) \
+ ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \
+ !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+
+#define XGBE_SFP_EEPROM_BASE_LEN 256
+#define XGBE_SFP_EEPROM_DIAG_LEN 256
+#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \
+ XGBE_SFP_EEPROM_DIAG_LEN)
+
+#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
+#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+
+struct xgbe_sfp_ascii {
+ union {
+ char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum xgbe_mdio_reset {
+ XGBE_MDIO_RESET_NONE = 0,
+ XGBE_MDIO_RESET_I2C_GPIO,
+ XGBE_MDIO_RESET_INT_GPIO,
+ XGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum xgbe_phy_redrv_if {
+ XGBE_PHY_REDRV_IF_MDIO = 0,
+ XGBE_PHY_REDRV_IF_I2C,
+ XGBE_PHY_REDRV_IF_MAX,
+};
+
+enum xgbe_phy_redrv_model {
+ XGBE_PHY_REDRV_MODEL_4223 = 0,
+ XGBE_PHY_REDRV_MODEL_4227,
+ XGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum xgbe_phy_redrv_mode {
+ XGBE_PHY_REDRV_MODE_CX = 5,
+ XGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define XGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct xgbe_phy_data {
+ enum xgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum xgbe_conn_type conn_type;
+
+ enum xgbe_mode cur_mode;
+ enum xgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ /* SFP Support */
+ enum xgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_inputs;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum xgbe_sfp_base sfp_base;
+ enum xgbe_sfp_cable sfp_cable;
+ enum xgbe_sfp_speed sfp_speed;
+ struct xgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum xgbe_mdio_mode phydev_mode;
+ uint32_t phy_id;
+ int phydev;
+ enum xgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
+
+ uint8_t port_sfp_inputs;
+};
+
+static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+
+static int
+xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *i2c_op)
+{
+ return (pdata->i2c_if.i2c_xfer(pdata, i2c_op));
+}
+
+static int
+xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ __be16 *redrv_val;
+ uint8_t redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (__be16 *)&redrv_data[2];
+ *redrv_val = cpu_to_be16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return (ret);
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = XGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return (ret);
+ }
+
+ if (redrv_data[0] != 0xff) {
+ axgbe_error("Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return (ret);
+}
+
+static int
+xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target, void *val,
+ unsigned int val_len)
+{
+ struct xgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again:
+ /* Write the specfied register */
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again;
+
+ return (ret);
+}
+
+static int
+xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target, void *reg,
+ unsigned int reg_len, void *val, unsigned int val_len)
+{
+ struct xgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ axgbe_printf(3, "%s: target 0x%x reg_len %d val_len %d\n", __func__,
+ target, reg_len, val_len);
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ axgbe_printf(3, "%s: ret1 %d retry %d\n", __func__, ret, retry);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return (ret);
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = XGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
+ axgbe_printf(3, "%s: ret2 %d retry %d\n", __func__, ret, retry);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return (ret);
+}
+
+static int
+xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ uint8_t mux_channel;
+
+ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
+ return (0);
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return (xgbe_phy_i2c_xfer(pdata, &i2c_op));
+}
+
+static int
+xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_i2c_op i2c_op;
+ uint8_t mux_channel;
+
+ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
+ return (0);
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return (xgbe_phy_i2c_xfer(pdata, &i2c_op));
+}
+
+static void
+xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
+{
+ mtx_unlock(&xgbe_phy_comm_lock);
+}
+
+static int
+xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned long timeout;
+ unsigned int mutex_id;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ mtx_lock(&xgbe_phy_comm_lock);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = ticks + (5 * hz);
+ while (ticks < timeout) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ DELAY(200);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ return (0);
+ }
+
+ mtx_unlock(&xgbe_phy_comm_lock);
+
+ axgbe_error("unable to obtain hardware mutexes\n");
+
+ return (-ETIMEDOUT);
+}
+
+static int
+xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr, int reg,
+ uint16_t val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (reg & MII_ADDR_C45) {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return (-ENOTSUP);
+ } else {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return (-ENOTSUP);
+ }
+
+ return (pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val));
+}
+
+static int
+xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, uint16_t val)
+{
+ __be16 *mii_val;
+ uint8_t mii_data[3];
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret)
+ return (ret);
+
+ mii_data[0] = reg & 0xff;
+ mii_val = (__be16 *)&mii_data[1];
+ *mii_val = cpu_to_be16(val);
+
+ ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS,
+ mii_data, sizeof(mii_data));
+
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return (ret);
+}
+
+int
+xgbe_phy_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ axgbe_printf(3, "%s: addr %d reg %d val %#x\n", __func__, addr, reg, val);
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return (ret);
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ else
+ ret = -ENOTSUP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return (ret);
+}
+
+static int
+xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr, int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (reg & MII_ADDR_C45) {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return (-ENOTSUP);
+ } else {
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return (-ENOTSUP);
+ }
+
+ return (pdata->hw_if.read_ext_mii_regs(pdata, addr, reg));
+}
+
+static int
+xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
+{
+ __be16 mii_val;
+ uint8_t mii_reg;
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret)
+ return (ret);
+
+ mii_reg = reg;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS,
+ &mii_reg, sizeof(mii_reg),
+ &mii_val, sizeof(mii_val));
+ if (!ret)
+ ret = be16_to_cpu(mii_val);
+
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return (ret);
+}
+
+int
+xgbe_phy_mii_read(struct xgbe_prv_data *pdata, int addr, int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ axgbe_printf(3, "%s: addr %d reg %d\n", __func__, addr, reg);
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return (ret);
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = xgbe_phy_i2c_mii_read(pdata, reg);
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ else
+ ret = -ENOTSUP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return (ret);
+}
+
+static void
+xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed)
+ return;
+
+ XGBE_ZERO_SUP(&pdata->phy);
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, TP);
+ XGBE_SET_SUP(&pdata->phy, FIBRE);
+
+ XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported);
+
+ return;
+ }
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ XGBE_SET_SUP(&pdata->phy, 100baseT_Full);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ XGBE_SET_SUP(&pdata->phy, 1000baseT_Full);
+ } else {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ XGBE_SET_SUP(&pdata->phy, 1000baseX_Full);
+ }
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_10000_SR:
+ XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full);
+ break;
+ case XGBE_SFP_BASE_10000_LR:
+ XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full);
+ break;
+ case XGBE_SFP_BASE_10000_LRM:
+ XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full);
+ break;
+ case XGBE_SFP_BASE_10000_ER:
+ XGBE_SET_SUP(&pdata->phy, 10000baseER_Full);
+ break;
+ case XGBE_SFP_BASE_10000_CR:
+ XGBE_SET_SUP(&pdata->phy, 10000baseCR_Full);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_CX:
+ case XGBE_SFP_BASE_10000_CR:
+ XGBE_SET_SUP(&pdata->phy, TP);
+ break;
+ default:
+ XGBE_SET_SUP(&pdata->phy, FIBRE);
+ break;
+ }
+
+ XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported);
+
+ axgbe_printf(1, "%s: link speed %d spf_base 0x%x pause_autoneg %d "
+ "advert 0x%x support 0x%x\n", __func__, pdata->phy.speed,
+ phy_data->sfp_base, pdata->phy.pause_autoneg,
+ pdata->phy.advertising, pdata->phy.supported);
+}
+
+static bool
+xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ enum xgbe_sfp_speed sfp_speed)
+{
+ uint8_t *sfp_base, min, max;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case XGBE_SFP_SPEED_1000:
+ min = XGBE_SFP_BASE_BR_1GBE_MIN;
+ max = XGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ min = XGBE_SFP_BASE_BR_10GBE_MIN;
+ max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return (false);
+ }
+
+ return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
+ (sfp_base[XGBE_SFP_BASE_BR] <= max));
+}
+
+static void
+xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev)
+ phy_data->phydev = 0;
+}
+
+static bool
+xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int phy_id = phy_data->phy_id;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return (false);
+
+ if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
+ return (false);
+
+ /* Enable Base-T AN */
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0001);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0000);
+
+ /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1b, 0x9084);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x09, 0x0e00);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x8140);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x04, 0x0d01);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140);
+
+ axgbe_printf(3, "Finisar PHY quirk in place\n");
+
+ return (true);
+}
+
+static bool
+xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ unsigned int phy_id = phy_data->phy_id;
+ int reg;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return (false);
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
+ return (false);
+
+ /* For Bel-Fuse, use the extra AN flag */
+ pdata->an_again = 1;
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN))
+ return (false);
+
+ if ((phy_id & 0xfffffff0) != 0x03625d10)
+ return (false);
+
+ /* Disable RGMII mode */
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, 0x7007);
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x18);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, reg & ~0x0080);
+
+ /* Enable fiber register bank */
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00);
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0001;
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 |
+ reg | 0x0001);
+
+ /* Power down SerDes */
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg | 0x00800);
+
+ /* Configure SGMII-to-Copper mode */
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00);
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0006;
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 |
+ reg | 0x0004);
+
+ /* Power up SerDes */
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800);
+
+ /* Enable copper register bank */
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00);
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0001;
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 |
+ reg);
+
+ /* Power up SerDes */
+ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00);
+ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800);
+
+ axgbe_printf(3, "BelFuse PHY quirk in place\n");
+
+ return (true);
+}
+
+static void
+xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_phy_belfuse_phy_quirks(pdata))
+ return;
+
+ if (xgbe_phy_finisar_phy_quirks(pdata))
+ return;
+}
+
+static int
+xgbe_get_phy_id(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ uint32_t oui, model, phy_id1, phy_id2;
+ int phy_reg;
+
+ phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x02);
+ if (phy_reg < 0)
+ return (-EIO);
+
+ phy_id1 = (phy_reg & 0xffff);
+ phy_data->phy_id = (phy_reg & 0xffff) << 16;
+
+ phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x03);
+ if (phy_reg < 0)
+ return (-EIO);
+
+ phy_id2 = (phy_reg & 0xffff);
+ phy_data->phy_id |= (phy_reg & 0xffff);
+
+ oui = MII_OUI(phy_id1, phy_id2);
+ model = MII_MODEL(phy_id2);
+
+ axgbe_printf(2, "%s: phy_id1: 0x%x phy_id2: 0x%x oui: %#x model %#x\n",
+ __func__, phy_id1, phy_id2, oui, model);
+
+ return (0);
+}
+
+static int
+xgbe_phy_start_aneg(struct xgbe_prv_data *pdata)
+{
+ uint16_t ctl = 0;
+ int changed = 0;
+ int ret;
+
+ if (AUTONEG_ENABLE != pdata->phy.autoneg) {
+ if (SPEED_1000 == pdata->phy.speed)
+ ctl |= BMCR_SPEED1;
+ else if (SPEED_100 == pdata->phy.speed)
+ ctl |= BMCR_SPEED100;
+
+ if (DUPLEX_FULL == pdata->phy.duplex)
+ ctl |= BMCR_FDX;
+
+ ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR);
+ if (ret)
+ return (ret);
+
+ ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR,
+ (ret & ~(~(BMCR_LOOP | BMCR_ISO | BMCR_PDOWN))) | ctl);
+ }
+
+ ctl = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR);
+ if (ctl < 0)
+ return (ctl);
+
+ if (!(ctl & BMCR_AUTOEN) || (ctl & BMCR_ISO))
+ changed = 1;
+
+ if (changed > 0) {
+ ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR);
+ if (ret)
+ return (ret);
+
+ ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR,
+ (ret & ~(BMCR_ISO)) | (BMCR_AUTOEN | BMCR_STARTNEG));
+ }
+
+ return (0);
+}
+
+static int
+xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ axgbe_printf(2, "%s: phydev %d phydev_mode %d sfp_phy_avail %d phy_id "
+ "0x%08x\n", __func__, phy_data->phydev, phy_data->phydev_mode,
+ phy_data->sfp_phy_avail, phy_data->phy_id);
+
+ /* If we already have a PHY, just return */
+ if (phy_data->phydev) {
+ axgbe_printf(3, "%s: phy present already\n", __func__);
+ return (0);
+ }
+
+ /* Clear the extra AN flag */
+ pdata->an_again = 0;
+
+ /* Check for the use of an external PHY */
+ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) {
+ axgbe_printf(3, "%s: phydev_mode %d\n", __func__,
+ phy_data->phydev_mode);
+ return (0);
+ }
+
+ /* For SFP, only use an external PHY if available */
+ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
+ !phy_data->sfp_phy_avail) {
+ axgbe_printf(3, "%s: port_mode %d avail %d\n", __func__,
+ phy_data->port_mode, phy_data->sfp_phy_avail);
+ return (0);
+ }
+
+ /* Set the proper MDIO mode for the PHY */
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ axgbe_error("mdio port/clause not compatible (%u/%u) ret %d\n",
+ phy_data->mdio_addr, phy_data->phydev_mode, ret);
+ return (ret);
+ }
+
+ ret = xgbe_get_phy_id(pdata);
+ if (ret)
+ return (ret);
+ axgbe_printf(2, "Get phy_id 0x%08x\n", phy_data->phy_id);
+
+ phy_data->phydev = 1;
+ xgbe_phy_external_phy_quirks(pdata);
+ xgbe_phy_start_aneg(pdata);
+
+ return (0);
+}
+
+static void
+xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ axgbe_printf(3, "%s: sfp_changed: 0x%x\n", __func__,
+ phy_data->sfp_changed);
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return;
+
+ /* Check access to the PHY by reading CTRL1 */
+ ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR);
+ if (ret < 0) {
+ axgbe_error("%s: ext phy fail %d\n", __func__, ret);
+ return;
+ }
+
+ /* Successfully accessed the PHY */
+ phy_data->sfp_phy_avail = 1;
+ axgbe_printf(3, "Successfully accessed External PHY\n");
+}
+
+static bool
+xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
+{
+ uint8_t *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
+ return (false);
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
+ return (false);
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
+ return (true);
+
+ return (false);
+}
+
+static bool
+xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
+{
+ uint8_t *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
+ return (false);
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
+ return (false);
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
+ return (true);
+
+ return (false);
+}
+
+static bool
+xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
+{
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
+ return (false);
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
+ return (true);
+
+ return (false);
+}
+
+static void
+xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ uint8_t *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP) {
+ axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_ID]);
+ return;
+ }
+
+ if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) {
+ axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_EXT_ID]);
+ return;
+ }
+
+ /* Update transceiver signals (eeprom extd/options) */
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
+ } else
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+
+ /* Determine the type of SFP */
+ if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
+ else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000;
+ break;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = XGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+ axgbe_printf(3, "%s: sfp_base: 0x%x sfp_speed: 0x%x sfp_cable: 0x%x "
+ "rx_los 0x%x tx_fault 0x%x\n", __func__, phy_data->sfp_base,
+ phy_data->sfp_speed, phy_data->sfp_cable, phy_data->sfp_rx_los,
+ phy_data->sfp_tx_fault);
+}
+
+static void
+xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata,
+ struct xgbe_sfp_eeprom *sfp_eeprom)
+{
+ struct xgbe_sfp_ascii sfp_ascii;
+ char *sfp_data = (char *)&sfp_ascii;
+
+ axgbe_printf(3, "SFP detected:\n");
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_SFP_BASE_VENDOR_NAME_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0';
+ axgbe_printf(3, " vendor: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_SFP_BASE_VENDOR_PN_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0';
+ axgbe_printf(3, " part number: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV],
+ XGBE_SFP_BASE_VENDOR_REV_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0';
+ axgbe_printf(3, " revision level: %s\n",
+ sfp_data);
+
+ memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN],
+ XGBE_SFP_BASE_VENDOR_SN_LEN);
+ sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0';
+ axgbe_printf(3, " serial number: %s\n",
+ sfp_data);
+}
+
+static bool
+xgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, unsigned int len)
+{
+ uint8_t cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return ((cc == cc_in) ? true : false);
+}
+
+static void
+dump_sfp_eeprom(struct xgbe_prv_data *pdata, uint8_t *sfp_base)
+{
+ axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_ID] : 0x%04x\n",
+ sfp_base[XGBE_SFP_BASE_ID]);
+ axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_EXT_ID] : 0x%04x\n",
+ sfp_base[XGBE_SFP_BASE_EXT_ID]);
+ axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_CABLE] : 0x%04x\n",
+ sfp_base[XGBE_SFP_BASE_CABLE]);
+}
+
+static int
+xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom sfp_eeprom, *eeprom;
+ uint8_t eeprom_addr, *base;
+ int ret;
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ axgbe_error("I2C error setting SFP MUX\n");
+ return (ret);
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+
+ eeprom = &sfp_eeprom;
+ base = eeprom->base;
+ dump_sfp_eeprom(pdata, base);
+ if (ret) {
+ axgbe_error("I2C error reading SFP EEPROM\n");
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC],
+ sfp_eeprom.base, sizeof(sfp_eeprom.base) - 1)) {
+ axgbe_error("verify eeprom base failed\n");
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd, sizeof(sfp_eeprom.extd) - 1)) {
+ axgbe_error("verify eeprom extd failed\n");
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+
+ xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom);
+
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ xgbe_phy_free_phy_device(pdata);
+ } else
+ phy_data->sfp_changed = 0;
+
+put:
+ xgbe_phy_sfp_put_mux(pdata);
+
+ return (ret);
+}
+
+static void
+xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ uint8_t gpio_reg, gpio_ports[2];
+ int ret, prev_sfp_inputs = phy_data->port_sfp_inputs;
+ int shift = GPIO_MASK_WIDTH * (3 - phy_data->port_id);
+
+ /* Read the input port registers */
+ axgbe_printf(3, "%s: befor sfp_mod:%d sfp_gpio_address:0x%x\n",
+ __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_address);
+
+ gpio_reg = 0;
+ ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, &gpio_reg,
+ sizeof(gpio_reg), gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ axgbe_error("%s: I2C error reading SFP GPIO addr:0x%x\n",
+ __func__, phy_data->sfp_gpio_address);
+ return;
+ }
+
+ phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
+ phy_data->port_sfp_inputs = (phy_data->sfp_gpio_inputs >> shift) & 0x0F;
+
+ if (prev_sfp_inputs != phy_data->port_sfp_inputs)
+ axgbe_printf(0, "%s: port_sfp_inputs: 0x%0x\n", __func__,
+ phy_data->port_sfp_inputs);
+
+ phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
+
+ axgbe_printf(3, "%s: after sfp_mod:%d sfp_gpio_inputs:0x%x\n",
+ __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_inputs);
+}
+
+static void
+xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_free_phy_device(pdata);
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void
+xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void
+xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret, prev_sfp_state = phy_data->sfp_mod_absent;
+
+ /* Reset the SFP signals and info */
+ xgbe_phy_sfp_reset(phy_data);
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ xgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ if (prev_sfp_state != phy_data->sfp_mod_absent)
+ axgbe_error("%s: mod absent\n", __func__);
+ xgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = xgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ axgbe_error("%s: eeprom read failed\n", __func__);
+ xgbe_phy_sfp_reset(phy_data);
+ xgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ xgbe_phy_sfp_parse_eeprom(pdata);
+
+ xgbe_phy_sfp_external_phy(pdata);
+
+put:
+ xgbe_phy_sfp_phy_settings(pdata);
+
+ axgbe_printf(3, "%s: phy speed: 0x%x duplex: 0x%x autoneg: 0x%x "
+ "pause_autoneg: 0x%x\n", __func__, pdata->phy.speed,
+ pdata->phy.duplex, pdata->phy.autoneg, pdata->phy.pause_autoneg);
+
+ xgbe_phy_put_comm_ownership(pdata);
+}
+
+static int
+xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ uint8_t eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX];
+ struct xgbe_sfp_eeprom *sfp_eeprom;
+ int ret;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP) {
+ ret = -ENXIO;
+ goto done;
+ }
+
+ if (phy_data->sfp_mod_absent) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ axgbe_error("I2C error setting SFP MUX\n");
+ ret = -EIO;
+ goto put_own;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ eeprom_data, XGBE_SFP_EEPROM_BASE_LEN);
+ if (ret) {
+ axgbe_error("I2C error reading SFP EEPROM\n");
+ ret = -EIO;
+ goto put_mux;
+ }
+
+ sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data;
+
+ if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) {
+ /* Read the SFP diagnostic eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ eeprom_data + XGBE_SFP_EEPROM_BASE_LEN,
+ XGBE_SFP_EEPROM_DIAG_LEN);
+ if (ret) {
+ axgbe_error("I2C error reading SFP DIAGS\n");
+ ret = -EIO;
+ goto put_mux;
+ }
+ }
+
+put_mux:
+ xgbe_phy_sfp_put_mux(pdata);
+
+put_own:
+ xgbe_phy_put_comm_ownership(pdata);
+
+done:
+ return (ret);
+}
+
+static int
+xgbe_phy_module_info(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return (-ENXIO);
+
+ if (phy_data->sfp_mod_absent)
+ return (-EIO);
+
+ return (0);
+}
+
+static void
+xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (!phy_data->phydev)
+ return;
+
+ if (pdata->phy.pause)
+ XGBE_SET_LP_ADV(&pdata->phy, Pause);
+
+ if (pdata->phy.asym_pause)
+ XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause);
+
+ axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__,
+ pdata->phy.tx_pause, pdata->phy.rx_pause);
+}
+
+static enum xgbe_mode
+xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+
+ XGBE_SET_LP_ADV(&pdata->phy, Autoneg);
+ XGBE_SET_LP_ADV(&pdata->phy, TP);
+
+ axgbe_printf(1, "%s: pause_autoneg %d\n", __func__,
+ pdata->phy.pause_autoneg);
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ xgbe_phy_phydev_flowctrl(pdata);
+
+ switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_100:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Full);
+ mode = XGBE_MODE_SGMII_100;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ case XGBE_SGMII_AN_LINK_SPEED_1000:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Full);
+ mode = XGBE_MODE_SGMII_1000;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ default:
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ return (mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(&pdata->phy, Autoneg);
+ XGBE_SET_LP_ADV(&pdata->phy, FIBRE);
+
+ /* Compare Advertisement and Link Partner register */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY);
+ if (lp_reg & 0x100)
+ XGBE_SET_LP_ADV(&pdata->phy, Pause);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause);
+
+ axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n",
+ __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x100) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x80) {
+ if (ad_reg & 0x100)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x100)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause,
+ pdata->phy.rx_pause);
+
+ if (lp_reg & 0x20)
+ XGBE_SET_LP_ADV(&pdata->phy, 1000baseX_Full);
+
+ /* Half duplex is not supported */
+ ad_reg &= lp_reg;
+ mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN;
+
+ return (mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(&pdata->phy, Autoneg);
+ XGBE_SET_LP_ADV(&pdata->phy, Backplane);
+
+ axgbe_printf(1, "%s: pause_autoneg %d\n", __func__,
+ pdata->phy.pause_autoneg);
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ xgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full);
+ if (lp_reg & 0x20)
+ XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full);
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ mode = XGBE_MODE_KR;
+ break;
+ default:
+ mode = XGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ mode = XGBE_MODE_KX_1000;
+ break;
+ case XGBE_PORT_MODE_1000BASE_X:
+ mode = XGBE_MODE_X;
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ if ((phy_data->phydev) &&
+ (pdata->phy.speed == SPEED_100))
+ mode = XGBE_MODE_SGMII_100;
+ else
+ mode = XGBE_MODE_SGMII_1000;
+ break;
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ default:
+ mode = XGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ if ((phy_data->phydev) &&
+ (pdata->phy.speed == SPEED_100))
+ mode = XGBE_MODE_SGMII_100;
+ else
+ mode = XGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = XGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC);
+
+ return (mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ XGBE_SET_LP_ADV(&pdata->phy, Autoneg);
+ XGBE_SET_LP_ADV(&pdata->phy, Backplane);
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ XGBE_SET_LP_ADV(&pdata->phy, Pause);
+ if (lp_reg & 0x800)
+ XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause);
+
+ axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n",
+ __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause,
+ pdata->phy.rx_pause);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full);
+ if (lp_reg & 0x20)
+ XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full);
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = XGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = XGBE_MODE_KX_1000;
+ else
+ mode = XGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC);
+
+ return (mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ return (xgbe_phy_an73_outcome(pdata));
+ case XGBE_AN_MODE_CL73_REDRV:
+ return (xgbe_phy_an73_redrv_outcome(pdata));
+ case XGBE_AN_MODE_CL37:
+ return (xgbe_phy_an37_outcome(pdata));
+ case XGBE_AN_MODE_CL37_SGMII:
+ return (xgbe_phy_an37_sgmii_outcome(pdata));
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static void
+xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, struct xgbe_phy *dphy)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ XGBE_LM_COPY(dphy, advertising, &pdata->phy, advertising);
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ XGBE_CLR_ADV(dphy, 1000baseKX_Full);
+ XGBE_CLR_ADV(dphy, 10000baseKR_Full);
+
+ /* Advertise FEC support is present */
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_ADV(dphy, 10000baseR_FEC);
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ XGBE_SET_ADV(dphy, 10000baseKR_Full);
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ XGBE_SET_ADV(dphy, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+ XGBE_SET_ADV(dphy, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->phydev) &&
+ (pdata->phy.speed == SPEED_10000))
+ XGBE_SET_ADV(dphy, 10000baseKR_Full);
+ else
+ XGBE_SET_ADV(dphy, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+ XGBE_SET_ADV(dphy, 10000baseKR_Full);
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ XGBE_SET_ADV(dphy, 1000baseKX_Full);
+ break;
+ default:
+ XGBE_SET_ADV(dphy, 10000baseKR_Full);
+ break;
+ }
+ break;
+ default:
+ XGBE_SET_ADV(dphy, 10000baseKR_Full);
+ break;
+ }
+}
+
+static int
+xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_find_phy_device(pdata);
+ if (ret)
+ return (ret);
+
+ axgbe_printf(2, "%s: find_phy_device return %s.\n", __func__,
+ ret ? "Failure" : "Success");
+
+ if (!phy_data->phydev)
+ return (0);
+
+ ret = xgbe_phy_start_aneg(pdata);
+ return (ret);
+}
+
+static enum xgbe_an_mode
+xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ return (XGBE_AN_MODE_CL37_SGMII);
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+ return (XGBE_AN_MODE_CL37);
+ default:
+ return (XGBE_AN_MODE_NONE);
+ }
+}
+
+static enum xgbe_an_mode
+xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return (XGBE_AN_MODE_CL73_REDRV);
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return (XGBE_AN_MODE_CL73);
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return (XGBE_AN_MODE_NONE);
+ case XGBE_PORT_MODE_1000BASE_T:
+ return (XGBE_AN_MODE_CL37_SGMII);
+ case XGBE_PORT_MODE_1000BASE_X:
+ return (XGBE_AN_MODE_CL37);
+ case XGBE_PORT_MODE_NBASE_T:
+ return (XGBE_AN_MODE_CL37_SGMII);
+ case XGBE_PORT_MODE_10GBASE_T:
+ return (XGBE_AN_MODE_CL73);
+ case XGBE_PORT_MODE_10GBASE_R:
+ return (XGBE_AN_MODE_NONE);
+ case XGBE_PORT_MODE_SFP:
+ return (xgbe_phy_an_sfp_mode(phy_data));
+ default:
+ return (XGBE_AN_MODE_NONE);
+ }
+}
+
+static int
+xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
+ enum xgbe_phy_redrv_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ uint16_t redrv_reg, redrv_val;
+
+ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (uint16_t)mode;
+
+ return (pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val));
+}
+
+static int
+xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
+ enum xgbe_phy_redrv_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return (ret);
+}
+
+static void
+xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = XGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR))
+ mode = XGBE_PHY_REDRV_MODE_SR;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ axgbe_printf(2, "%s: redrv_if set: %d\n", __func__, phy_data->redrv_if);
+ if (phy_data->redrv_if)
+ xgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ xgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ xgbe_phy_put_comm_ownership(pdata);
+}
+
+static void
+xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd,
+ unsigned int sub_cmd)
+{
+ unsigned int s0 = 0;
+ unsigned int wait;
+
+ /* Log if a previous command did not complete */
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ axgbe_error("firmware mailbox not ready for command\n");
+
+ /* Construct the command */
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd);
+
+ /* Issue the command */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ /* Wait for command to complete */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
+ axgbe_printf(3, "%s: Rate change done\n", __func__);
+ return;
+ }
+
+ DELAY(2000);
+ }
+
+ axgbe_printf(3, "firmware mailbox command did not complete\n");
+}
+
+static void
+xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+{
+ /* Receiver Reset Cycle */
+ xgbe_phy_perform_ratechange(pdata, 5, 0);
+
+ axgbe_printf(3, "receiver reset complete\n");
+}
+
+static void
+xgbe_phy_power_off(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Power off */
+ xgbe_phy_perform_ratechange(pdata, 0, 0);
+
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ axgbe_printf(3, "phy powered off\n");
+}
+
+static void
+xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10G/SFI */
+ axgbe_printf(3, "%s: cable %d len %d\n", __func__, phy_data->sfp_cable,
+ phy_data->sfp_cable_len);
+
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE)
+ xgbe_phy_perform_ratechange(pdata, 3, 0);
+ else {
+ if (phy_data->sfp_cable_len <= 1)
+ xgbe_phy_perform_ratechange(pdata, 3, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ xgbe_phy_perform_ratechange(pdata, 3, 2);
+ else
+ xgbe_phy_perform_ratechange(pdata, 3, 3);
+ }
+
+ phy_data->cur_mode = XGBE_MODE_SFI;
+
+ axgbe_printf(3, "10GbE SFI mode set\n");
+}
+
+static void
+xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 1G/X */
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
+
+ phy_data->cur_mode = XGBE_MODE_X;
+
+ axgbe_printf(3, "1GbE X mode set\n");
+}
+
+static void
+xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 1G/SGMII */
+ xgbe_phy_perform_ratechange(pdata, 1, 2);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_1000;
+
+ axgbe_printf(2, "1GbE SGMII mode set\n");
+}
+
+static void
+xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 100M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, 1, 1);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_100;
+
+ axgbe_printf(3, "100MbE SGMII mode set\n");
+}
+
+static void
+xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10G/KR */
+ xgbe_phy_perform_ratechange(pdata, 4, 0);
+
+ phy_data->cur_mode = XGBE_MODE_KR;
+
+ axgbe_printf(3, "10GbE KR mode set\n");
+}
+
+static void
+xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 2.5G/KX */
+ xgbe_phy_perform_ratechange(pdata, 2, 0);
+
+ phy_data->cur_mode = XGBE_MODE_KX_2500;
+
+ axgbe_printf(3, "2.5GbE KX mode set\n");
+}
+
+static void
+xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 1G/KX */
+ xgbe_phy_perform_ratechange(pdata, 1, 3);
+
+ phy_data->cur_mode = XGBE_MODE_KX_1000;
+
+ axgbe_printf(3, "1GbE KX mode set\n");
+}
+
+static enum xgbe_mode
+xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ return (phy_data->cur_mode);
+}
+
+static enum xgbe_mode
+xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T)
+ return (xgbe_phy_cur_mode(pdata));
+
+ switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_100:
+ case XGBE_MODE_SGMII_1000:
+ return (XGBE_MODE_KR);
+ case XGBE_MODE_KR:
+ default:
+ return (XGBE_MODE_SGMII_1000);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata)
+{
+ return (XGBE_MODE_KX_2500);
+}
+
+static enum xgbe_mode
+xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_KX_1000:
+ return (XGBE_MODE_KR);
+ case XGBE_MODE_KR:
+ default:
+ return (XGBE_MODE_KX_1000);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return (xgbe_phy_switch_bp_mode(pdata));
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return (xgbe_phy_switch_bp_2500_mode(pdata));
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return (xgbe_phy_switch_baset_mode(pdata));
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ case XGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return (xgbe_phy_cur_mode(pdata));
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data, int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return (XGBE_MODE_X);
+ case SPEED_10000:
+ return (XGBE_MODE_KR);
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data, int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return (XGBE_MODE_SGMII_100);
+ case SPEED_1000:
+ return (XGBE_MODE_SGMII_1000);
+ case SPEED_2500:
+ return (XGBE_MODE_KX_2500);
+ case SPEED_10000:
+ return (XGBE_MODE_KR);
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data, int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return (XGBE_MODE_SGMII_100);
+ case SPEED_1000:
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return (XGBE_MODE_SGMII_1000);
+ else
+ return (XGBE_MODE_X);
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return (XGBE_MODE_SFI);
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return (XGBE_MODE_KX_2500);
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return (XGBE_MODE_KX_1000);
+ case SPEED_10000:
+ return (XGBE_MODE_KR);
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static enum xgbe_mode
+xgbe_phy_get_mode(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return (xgbe_phy_get_bp_mode(speed));
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return (xgbe_phy_get_bp_2500_mode(speed));
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return (xgbe_phy_get_baset_mode(phy_data, speed));
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return (xgbe_phy_get_basex_mode(phy_data, speed));
+ case XGBE_PORT_MODE_SFP:
+ return (xgbe_phy_get_sfp_mode(phy_data, speed));
+ default:
+ return (XGBE_MODE_UNKNOWN);
+ }
+}
+
+static void
+xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ xgbe_phy_kx_1000_mode(pdata);
+ break;
+ case XGBE_MODE_KX_2500:
+ xgbe_phy_kx_2500_mode(pdata);
+ break;
+ case XGBE_MODE_KR:
+ xgbe_phy_kr_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_100:
+ xgbe_phy_sgmii_100_mode(pdata);
+ break;
+ case XGBE_MODE_SGMII_1000:
+ xgbe_phy_sgmii_1000_mode(pdata);
+ break;
+ case XGBE_MODE_X:
+ xgbe_phy_x_mode(pdata);
+ break;
+ case XGBE_MODE_SFI:
+ xgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+xgbe_phy_get_type(struct xgbe_prv_data *pdata, struct ifmediareq * ifmr)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->phy.speed) {
+ case SPEED_10000:
+ if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE)
+ ifmr->ifm_active |= IFM_10G_KR;
+ else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T)
+ ifmr->ifm_active |= IFM_10G_T;
+ else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R)
+ ifmr->ifm_active |= IFM_10G_KR;
+ else if(phy_data->port_mode == XGBE_PORT_MODE_SFP)
+ ifmr->ifm_active |= IFM_10G_SFI;
+ else
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ case SPEED_2500:
+ if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE_2500)
+ ifmr->ifm_active |= IFM_2500_KX;
+ else
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ case SPEED_1000:
+ if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE)
+ ifmr->ifm_active |= IFM_1000_KX;
+ else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_T)
+ ifmr->ifm_active |= IFM_1000_T;
+#if 0
+ else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X)
+ ifmr->ifm_active |= IFM_1000_SX;
+ ifmr->ifm_active |= IFM_1000_LX;
+ ifmr->ifm_active |= IFM_1000_CX;
+#endif
+ else if(phy_data->port_mode == XGBE_PORT_MODE_SFP)
+ ifmr->ifm_active |= IFM_1000_SGMII;
+ else
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ case SPEED_100:
+ if(phy_data->port_mode == XGBE_PORT_MODE_NBASE_T)
+ ifmr->ifm_active |= IFM_100_T;
+ else if(phy_data->port_mode == XGBE_PORT_MODE_SFP)
+ ifmr->ifm_active |= IFM_1000_SGMII;
+ else
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ default:
+ ifmr->ifm_active |= IFM_OTHER;
+ axgbe_printf(1, "Unknown mode detected\n");
+ break;
+ }
+}
+
+static bool
+xgbe_phy_check_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode,
+ bool advert)
+{
+
+ if (pdata->phy.autoneg == AUTONEG_ENABLE)
+ return (advert);
+ else {
+ enum xgbe_mode cur_mode;
+
+ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return (true);
+ }
+
+ return (false);
+}
+
+static bool
+xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+
+ switch (mode) {
+ case XGBE_MODE_X:
+ return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy,
+ 1000baseX_Full)));
+ case XGBE_MODE_KR:
+ return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy,
+ 10000baseKR_Full)));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+
+ axgbe_printf(3, "%s: check mode %d\n", __func__, mode);
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy,
+ 100baseT_Full)));
+ case XGBE_MODE_SGMII_1000:
+ return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy,
+ 1000baseT_Full)));
+ case XGBE_MODE_KX_2500:
+ return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy,
+ 2500baseT_Full)));
+ case XGBE_MODE_KR:
+ return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy,
+ 10000baseT_Full)));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case XGBE_MODE_X:
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return (false);
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 1000baseX_Full)));
+ case XGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return (false);
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 100baseT_Full)));
+ case XGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return (false);
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 1000baseT_Full)));
+ case XGBE_MODE_SFI:
+ if (phy_data->sfp_mod_absent)
+ return (true);
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 10000baseSR_Full) ||
+ XGBE_ADV(&pdata->phy, 10000baseLR_Full) ||
+ XGBE_ADV(&pdata->phy, 10000baseLRM_Full) ||
+ XGBE_ADV(&pdata->phy, 10000baseER_Full) ||
+ XGBE_ADV(&pdata->phy, 10000baseCR_Full)));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+
+ switch (mode) {
+ case XGBE_MODE_KX_2500:
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 2500baseX_Full)));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 1000baseKX_Full)));
+ case XGBE_MODE_KR:
+ return (xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(&pdata->phy, 10000baseKR_Full)));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return (xgbe_phy_use_bp_mode(pdata, mode));
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return (xgbe_phy_use_bp_2500_mode(pdata, mode));
+ case XGBE_PORT_MODE_1000BASE_T:
+ axgbe_printf(3, "use_mode %s\n",
+ xgbe_phy_use_baset_mode(pdata, mode) ? "found" : "Not found");
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return (xgbe_phy_use_baset_mode(pdata, mode));
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return (xgbe_phy_use_basex_mode(pdata, mode));
+ case XGBE_PORT_MODE_SFP:
+ return (xgbe_phy_use_sfp_mode(pdata, mode));
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data, int speed)
+{
+
+ switch (speed) {
+ case SPEED_1000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X);
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R);
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data, int speed)
+{
+
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ return (true);
+ case SPEED_2500:
+ return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data, int speed)
+{
+
+ switch (speed) {
+ case SPEED_100:
+ return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
+ case SPEED_1000:
+ return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) ||
+ (phy_data->sfp_speed == XGBE_SFP_SPEED_1000));
+ case SPEED_10000:
+ return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000);
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed_bp_2500_mode(int speed)
+{
+
+ switch (speed) {
+ case SPEED_2500:
+ return (true);
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed_bp_mode(int speed)
+{
+
+ switch (speed) {
+ case SPEED_1000:
+ case SPEED_10000:
+ return (true);
+ default:
+ return (false);
+ }
+}
+
+static bool
+xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ return (xgbe_phy_valid_speed_bp_mode(speed));
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ return (xgbe_phy_valid_speed_bp_2500_mode(speed));
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ return (xgbe_phy_valid_speed_baset_mode(phy_data, speed));
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_10GBASE_R:
+ return (xgbe_phy_valid_speed_basex_mode(phy_data, speed));
+ case XGBE_PORT_MODE_SFP:
+ return (xgbe_phy_valid_speed_sfp_mode(phy_data, speed));
+ default:
+ return (false);
+ }
+}
+
+static int
+xgbe_upd_link(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link);
+ reg = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR);
+ if (reg < 0)
+ return (reg);
+
+ if ((reg & BMSR_LINK) == 0)
+ pdata->phy.link = 0;
+ else
+ pdata->phy.link = 1;
+
+ axgbe_printf(2, "Link: %d updated reg %#x\n", pdata->phy.link, reg);
+ return (0);
+}
+
+static int
+xgbe_phy_read_status(struct xgbe_prv_data *pdata)
+{
+ int common_adv_gb;
+ int common_adv;
+ int lpagb = 0;
+ int adv, lpa;
+ int ret;
+
+ ret = xgbe_upd_link(pdata);
+ if (ret) {
+ axgbe_printf(2, "Link Update return %d\n", ret);
+ return (ret);
+ }
+
+ if (AUTONEG_ENABLE == pdata->phy.autoneg) {
+ if (pdata->phy.supported == SUPPORTED_1000baseT_Half ||
+ pdata->phy.supported == SUPPORTED_1000baseT_Full) {
+ lpagb = xgbe_phy_mii_read(pdata, pdata->mdio_addr,
+ MII_100T2SR);
+ if (lpagb < 0)
+ return (lpagb);
+
+ adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr,
+ MII_100T2CR);
+ if (adv < 0)
+ return (adv);
+
+ if (lpagb & GTSR_MAN_MS_FLT) {
+ if (adv & GTCR_MAN_MS)
+ axgbe_printf(2, "Master/Slave Resolution "
+ "failed, maybe conflicting manual settings\n");
+ else
+ axgbe_printf(2, "Master/Slave Resolution failed\n");
+ return (-ENOLINK);
+ }
+
+ if (pdata->phy.supported == SUPPORTED_1000baseT_Half)
+ XGBE_ADV(&pdata->phy, 1000baseT_Half);
+ else if (pdata->phy.supported == SUPPORTED_1000baseT_Full)
+ XGBE_ADV(&pdata->phy, 1000baseT_Full);
+
+ common_adv_gb = lpagb & adv << 2;
+ }
+
+ lpa = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANLPAR);
+ if (lpa < 0)
+ return (lpa);
+
+ if (pdata->phy.supported == SUPPORTED_Autoneg)
+ XGBE_ADV(&pdata->phy, Autoneg);
+
+ adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANAR);
+ if (adv < 0)
+ return (adv);
+
+ common_adv = lpa & adv;
+
+ pdata->phy.speed = SPEED_10;
+ pdata->phy.duplex = DUPLEX_HALF;
+ pdata->phy.pause = 0;
+ pdata->phy.asym_pause = 0;
+
+ axgbe_printf(2, "%s: lpa %#x adv %#x common_adv_gb %#x "
+ "common_adv %#x\n", __func__, lpa, adv, common_adv_gb,
+ common_adv);
+ if (common_adv_gb & (GTSR_LP_1000TFDX | GTSR_LP_1000THDX)) {
+ axgbe_printf(2, "%s: SPEED 1000\n", __func__);
+ pdata->phy.speed = SPEED_1000;
+
+ if (common_adv_gb & GTSR_LP_1000TFDX)
+ pdata->phy.duplex = DUPLEX_FULL;
+ } else if (common_adv & (ANLPAR_TX_FD | ANLPAR_TX)) {
+ axgbe_printf(2, "%s: SPEED 100\n", __func__);
+ pdata->phy.speed = SPEED_100;
+
+ if (common_adv & ANLPAR_TX_FD)
+ pdata->phy.duplex = DUPLEX_FULL;
+ } else
+ if (common_adv & ANLPAR_10_FD)
+ pdata->phy.duplex = DUPLEX_FULL;
+
+ if (pdata->phy.duplex == DUPLEX_FULL) {
+ pdata->phy.pause = lpa & ANLPAR_FC ? 1 : 0;
+ pdata->phy.asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ int bmcr = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR);
+ if (bmcr < 0)
+ return (bmcr);
+
+ if (bmcr & BMCR_FDX)
+ pdata->phy.duplex = DUPLEX_FULL;
+ else
+ pdata->phy.duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1)
+ pdata->phy.speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ pdata->phy.speed = SPEED_100;
+ else
+ pdata->phy.speed = SPEED_10;
+
+ pdata->phy.pause = 0;
+ pdata->phy.asym_pause = 0;
+ axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x "
+ "autoneg %#x\n", __func__, pdata->phy.speed,
+ pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg);
+ }
+
+ return (0);
+}
+
+static int
+xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct mii_data *mii = NULL;
+ unsigned int reg;
+ int ret;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == XGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ axgbe_printf(3, "%s: calling phy detect\n", __func__);
+ xgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ axgbe_printf(1, "%s: SFP changed observed\n", __func__);
+ *an_restart = 1;
+ return (0);
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+ axgbe_printf(1, "%s: SFP absent 0x%x & sfp_rx_los 0x%x\n",
+ __func__, phy_data->sfp_mod_absent,
+ phy_data->sfp_rx_los);
+ return (0);
+ }
+ } else {
+ mii = device_get_softc(pdata->axgbe_miibus);
+ mii_tick(mii);
+
+ ret = xgbe_phy_read_status(pdata);
+ if (ret) {
+ axgbe_printf(2, "Link: Read status returned %d\n", ret);
+ return (ret);
+ }
+
+ axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x "
+ "autoneg %#x\n", __func__, pdata->phy.speed,
+ pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg);
+ ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR);
+ ret = (ret < 0) ? ret : (ret & BMSR_ACOMP);
+ axgbe_printf(2, "Link: BMCR returned %d\n", ret);
+ if ((pdata->phy.autoneg == AUTONEG_ENABLE) && !ret)
+ return (0);
+
+ return (pdata->phy.link);
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ axgbe_printf(1, "%s: link_status reg: 0x%x\n", __func__, reg);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return (1);
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ axgbe_printf(1, "ENTERED RRC: rrc_count: %d\n",
+ phy_data->rrc_count);
+ phy_data->rrc_count = 0;
+ xgbe_phy_rrc(pdata);
+ }
+
+ return (0);
+}
+
+static void
+xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_ADDR);
+ phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_MASK);
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_RATE_SELECT);
+
+ DBGPR("SFP: gpio_address=%#x\n", phy_data->sfp_gpio_address);
+ DBGPR("SFP: gpio_mask=%#x\n", phy_data->sfp_gpio_mask);
+ DBGPR("SFP: gpio_rx_los=%u\n", phy_data->sfp_gpio_rx_los);
+ DBGPR("SFP: gpio_tx_fault=%u\n", phy_data->sfp_gpio_tx_fault);
+ DBGPR("SFP: gpio_mod_absent=%u\n",
+ phy_data->sfp_gpio_mod_absent);
+ DBGPR("SFP: gpio_rate_select=%u\n",
+ phy_data->sfp_gpio_rate_select);
+}
+
+static void
+xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int mux_addr_hi, mux_addr_lo;
+
+ mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == XGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4,
+ MUX_CHAN);
+
+ DBGPR("SFP: mux_address=%#x\n", phy_data->sfp_mux_address);
+ DBGPR("SFP: mux_channel=%u\n", phy_data->sfp_mux_channel);
+}
+
+static void
+xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
+{
+ xgbe_phy_sfp_comm_setup(pdata);
+ xgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static int
+xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ret;
+
+ ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
+ if (ret)
+ return (ret);
+
+ ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio);
+
+ return (ret);
+}
+
+static int
+xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ uint8_t gpio_reg, gpio_ports[2], gpio_data[3];
+ int ret;
+
+ /* Read the output port registers */
+ gpio_reg = 2;
+ ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret)
+ return (ret);
+
+ /* Prepare to write the GPIO data */
+ gpio_data[0] = 2;
+ gpio_data[1] = gpio_ports[0];
+ gpio_data[2] = gpio_ports[1];
+
+ /* Set the GPIO pin */
+ if (phy_data->mdio_reset_gpio < 8)
+ gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8));
+ else
+ gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8));
+
+ /* Write the output port registers */
+ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
+ gpio_data, sizeof(gpio_data));
+ if (ret)
+ return (ret);
+
+ /* Clear the GPIO pin */
+ if (phy_data->mdio_reset_gpio < 8)
+ gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
+ else
+ gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
+
+ /* Write the output port registers */
+ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
+ gpio_data, sizeof(gpio_data));
+
+ return (ret);
+}
+
+static int
+xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return (0);
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return (ret);
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO)
+ ret = xgbe_phy_i2c_mdio_reset(pdata);
+ else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO)
+ ret = xgbe_phy_int_mdio_reset(pdata);
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return (ret);
+}
+
+static bool
+xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return (false);
+
+ if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX)
+ return (true);
+
+ switch (phy_data->redrv_model) {
+ case XGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return (true);
+ break;
+ case XGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return (true);
+ break;
+ default:
+ return (true);
+ }
+
+ return (false);
+}
+
+static int
+xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return (0);
+
+ phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case XGBE_MDIO_RESET_NONE:
+ case XGBE_MDIO_RESET_I2C_GPIO:
+ case XGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ axgbe_error("unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return (-EINVAL);
+ }
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO)
+ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+
+ return (0);
+}
+
+static bool
+xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return (false);
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)
+ return (false);
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
+ return (false);
+ break;
+ case XGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ return (false);
+ break;
+ case XGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
+ return (false);
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return (false);
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ return (false);
+ break;
+ case XGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
+ return (false);
+ break;
+ default:
+ break;
+ }
+
+ return (true);
+}
+
+static bool
+xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
+ return (false);
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+ case XGBE_PORT_MODE_10GBASE_T:
+ case XGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO)
+ return (false);
+ break;
+ case XGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ return (false);
+ break;
+ default:
+ break;
+ }
+
+ return (true);
+}
+
+static bool
+xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
+{
+
+ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS))
+ return (false);
+ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE))
+ return (false);
+
+ return (true);
+}
+
+static void
+xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n",
+ __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack);
+
+ if (!pdata->sysctl_an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ DELAY(phy_data->phy_cdr_delay + 500);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+
+ axgbe_printf(2, "CDR TRACK DONE\n");
+}
+
+static void
+xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n",
+ __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack);
+
+ if (!pdata->sysctl_an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_OFF);
+
+ xgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void
+xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->sysctl_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void
+xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ if (pdata->sysctl_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void
+xgbe_phy_an_post(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case XGBE_AN_READY:
+ case XGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
+ else
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* If we have an external PHY, free it */
+ xgbe_phy_free_phy_device(pdata);
+
+ /* Reset SFP data */
+ xgbe_phy_sfp_reset(phy_data);
+ xgbe_phy_sfp_mod_absent(pdata);
+
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
+ /* Power off the PHY */
+ xgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int
+xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ axgbe_printf(2, "%s: redrv %d redrv_if %d start_mode %d\n", __func__,
+ phy_data->redrv, phy_data->redrv_if, phy_data->start_mode);
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret) {
+ axgbe_error("%s: impl i2c start ret %d\n", __func__, ret);
+ return (ret);
+ }
+
+ /* Set the proper MDIO mode for the re-driver */
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ XGBE_MDIO_MODE_CL22);
+ if (ret) {
+ axgbe_error("redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return (ret);
+ }
+ }
+
+ /* Start in highest supported mode */
+ xgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_SFP:
+ axgbe_printf(3, "%s: calling phy detect\n", __func__);
+ xgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ /* If we have an external PHY, start it */
+ ret = xgbe_phy_find_phy_device(pdata);
+ if (ret) {
+ axgbe_error("%s: impl find phy dev ret %d\n", __func__, ret);
+ goto err_i2c;
+ }
+
+ axgbe_printf(3, "%s: impl return success\n", __func__);
+ return (0);
+
+err_i2c:
+ pdata->i2c_if.i2c_stop(pdata);
+
+ return (ret);
+}
+
+static int
+xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode cur_mode;
+ int ret;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ xgbe_phy_power_off(pdata);
+ xgbe_phy_set_mode(pdata, cur_mode);
+
+ axgbe_printf(3, "%s: mode %d\n", __func__, cur_mode);
+ if (!phy_data->phydev) {
+ axgbe_printf(1, "%s: no phydev\n", __func__);
+ return (0);
+ }
+
+ /* Reset the external PHY */
+ ret = xgbe_phy_mdio_reset(pdata);
+ if (ret) {
+ axgbe_error("%s: mdio reset %d\n", __func__, ret);
+ return (ret);
+ }
+
+ axgbe_printf(3, "%s: return success\n", __func__);
+
+ return (0);
+}
+
+static void
+axgbe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct axgbe_if_softc *sc;
+ struct xgbe_prv_data *pdata;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+ pdata = &sc->pdata;
+
+ axgbe_printf(2, "%s: Invoked\n", __func__);
+ mtx_lock_spin(&pdata->mdio_mutex);
+ mii = device_get_softc(pdata->axgbe_miibus);
+ axgbe_printf(2, "%s: media_active %#x media_status %#x\n", __func__,
+ mii->mii_media_active, mii->mii_media_status);
+ mii_pollstat(mii);
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+ mtx_unlock_spin(&pdata->mdio_mutex);
+}
+
+static int
+axgbe_ifmedia_upd(struct ifnet *ifp)
+{
+ struct xgbe_prv_data *pdata;
+ struct axgbe_if_softc *sc;
+ struct mii_data *mii;
+ struct mii_softc *miisc;
+ int ret;
+
+ sc = ifp->if_softc;
+ pdata = &sc->pdata;
+
+ axgbe_printf(2, "%s: Invoked\n", __func__);
+ mtx_lock_spin(&pdata->mdio_mutex);
+ mii = device_get_softc(pdata->axgbe_miibus);
+ LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
+ PHY_RESET(miisc);
+ ret = mii_mediachg(mii);
+ mtx_unlock_spin(&pdata->mdio_mutex);
+
+ return (ret);
+}
+
+static void
+xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ if (pdata->axgbe_miibus != NULL)
+ device_delete_child(pdata->dev, pdata->axgbe_miibus);
+
+ /* free phy_data structure */
+ free(pdata->phy_data, M_AXGBE);
+}
+
+static int
+xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data;
+ int ret;
+
+ /* Initialize the global lock */
+ if (!mtx_initialized(&xgbe_phy_comm_lock))
+ mtx_init(&xgbe_phy_comm_lock, "xgbe phy common lock", NULL, MTX_DEF);
+
+ /* Check if enabled */
+ if (!xgbe_phy_port_enabled(pdata)) {
+ axgbe_error("device is not enabled\n");
+ return (-ENODEV);
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return (ret);
+
+ phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO);
+ if (!phy_data)
+ return (-ENOMEM);
+ pdata->phy_data = phy_data;
+
+ phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR);
+
+ pdata->mdio_addr = phy_data->mdio_addr;
+ DBGPR("port mode=%u\n", phy_data->port_mode);
+ DBGPR("port id=%u\n", phy_data->port_id);
+ DBGPR("port speeds=%#x\n", phy_data->port_speeds);
+ DBGPR("conn type=%u\n", phy_data->conn_type);
+ DBGPR("mdio addr=%u\n", phy_data->mdio_addr);
+
+ phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL);
+
+ if (phy_data->redrv) {
+ DBGPR("redrv present\n");
+ DBGPR("redrv i/f=%u\n", phy_data->redrv_if);
+ DBGPR("redrv addr=%#x\n", phy_data->redrv_addr);
+ DBGPR("redrv lane=%u\n", phy_data->redrv_lane);
+ DBGPR("redrv model=%u\n", phy_data->redrv_model);
+ }
+
+ DBGPR("%s: redrv addr=%#x redrv i/f=%u\n", __func__,
+ phy_data->redrv_addr, phy_data->redrv_if);
+ /* Validate the connection requested */
+ if (xgbe_phy_conn_type_mismatch(pdata)) {
+ axgbe_error("phy mode/connection mismatch "
+ "(%#x/%#x)\n", phy_data->port_mode, phy_data->conn_type);
+ return (-EINVAL);
+ }
+
+ /* Validate the mode requested */
+ if (xgbe_phy_port_mode_mismatch(pdata)) {
+ axgbe_error("phy mode/speed mismatch "
+ "(%#x/%#x)\n", phy_data->port_mode, phy_data->port_speeds);
+ return (-EINVAL);
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = xgbe_phy_mdio_reset_setup(pdata);
+ if (ret) {
+ axgbe_error("%s, mdio_reset_setup ret %d\n", __func__, ret);
+ return (ret);
+ }
+
+ /* Validate the re-driver information */
+ if (xgbe_phy_redrv_error(phy_data)) {
+ axgbe_error("phy re-driver settings error\n");
+ return (-EINVAL);
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features. Current code does not support ethtool */
+ XGBE_ZERO_SUP(&pdata->phy);
+
+ DBGPR("%s: port mode %d\n", __func__, phy_data->port_mode);
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case XGBE_PORT_MODE_BACKPLANE:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, Backplane);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(&pdata->phy, 1000baseKX_Full);
+ phy_data->start_mode = XGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ XGBE_SET_SUP(&pdata->phy, 10000baseKR_Full);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC);
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, Backplane);
+ XGBE_SET_SUP(&pdata->phy, 2500baseX_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case XGBE_PORT_MODE_1000BASE_T:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ XGBE_SET_SUP(&pdata->phy, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(&pdata->phy, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case XGBE_PORT_MODE_1000BASE_X:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, FIBRE);
+ XGBE_SET_SUP(&pdata->phy, 1000baseX_Full);
+ phy_data->start_mode = XGBE_MODE_X;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case XGBE_PORT_MODE_NBASE_T:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ XGBE_SET_SUP(&pdata->phy, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(&pdata->phy, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(&pdata->phy, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case XGBE_PORT_MODE_10GBASE_T:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+ XGBE_SET_SUP(&pdata->phy, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+ XGBE_SET_SUP(&pdata->phy, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ XGBE_SET_SUP(&pdata->phy, 10000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-R support */
+ case XGBE_PORT_MODE_10GBASE_R:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, FIBRE);
+ XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full);
+ XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full);
+ XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full);
+ XGBE_SET_SUP(&pdata->phy, 10000baseER_Full);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC);
+ phy_data->start_mode = XGBE_MODE_SFI;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case XGBE_PORT_MODE_SFP:
+ XGBE_SET_SUP(&pdata->phy, Autoneg);
+ XGBE_SET_SUP(&pdata->phy, Pause);
+ XGBE_SET_SUP(&pdata->phy, Asym_Pause);
+ XGBE_SET_SUP(&pdata->phy, TP);
+ XGBE_SET_SUP(&pdata->phy, FIBRE);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ phy_data->start_mode = XGBE_MODE_SFI;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+
+ xgbe_phy_sfp_setup(pdata);
+ DBGPR("%s: start %d mode %d adv 0x%x\n", __func__,
+ phy_data->start_mode, phy_data->phydev_mode,
+ pdata->phy.advertising);
+ break;
+ default:
+ return (-EINVAL);
+ }
+
+ axgbe_printf(2, "%s: start %d mode %d adv 0x%x\n", __func__,
+ phy_data->start_mode, phy_data->phydev_mode, pdata->phy.advertising);
+
+ DBGPR("%s: conn type %d mode %d\n", __func__,
+ phy_data->conn_type, phy_data->phydev_mode);
+ if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ axgbe_error("mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return (-EINVAL);
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ XGBE_MDIO_MODE_CL22);
+ if (ret) {
+ axgbe_error("redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return (-EINVAL);
+ }
+ }
+
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP) {
+ ret = mii_attach(pdata->dev, &pdata->axgbe_miibus, pdata->netdev,
+ (ifm_change_cb_t)axgbe_ifmedia_upd,
+ (ifm_stat_cb_t)axgbe_ifmedia_sts, BMSR_DEFCAPMASK,
+ pdata->mdio_addr, MII_OFFSET_ANY, MIIF_FORCEANEG);
+
+ if (ret){
+ axgbe_printf(2, "mii attach failed with err=(%d)\n", ret);
+ return (-EINVAL);
+ }
+ }
+
+ DBGPR("%s: return success\n", __func__);
+
+ return (0);
+}
+
+void
+xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
+{
+ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = xgbe_phy_init;
+ phy_impl->exit = xgbe_phy_exit;
+
+ phy_impl->reset = xgbe_phy_reset;
+ phy_impl->start = xgbe_phy_start;
+ phy_impl->stop = xgbe_phy_stop;
+
+ phy_impl->link_status = xgbe_phy_link_status;
+
+ phy_impl->valid_speed = xgbe_phy_valid_speed;
+
+ phy_impl->use_mode = xgbe_phy_use_mode;
+ phy_impl->set_mode = xgbe_phy_set_mode;
+ phy_impl->get_mode = xgbe_phy_get_mode;
+ phy_impl->switch_mode = xgbe_phy_switch_mode;
+ phy_impl->cur_mode = xgbe_phy_cur_mode;
+ phy_impl->get_type = xgbe_phy_get_type;
+
+ phy_impl->an_mode = xgbe_phy_an_mode;
+
+ phy_impl->an_config = xgbe_phy_an_config;
+
+ phy_impl->an_advertising = xgbe_phy_an_advertising;
+
+ phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->an_pre = xgbe_phy_an_pre;
+ phy_impl->an_post = xgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
+
+ phy_impl->module_info = xgbe_phy_module_info;
+ phy_impl->module_eeprom = xgbe_phy_module_eeprom;
+}
diff --git a/sys/dev/axgbe/xgbe-ptp.c b/sys/dev/axgbe/xgbe-ptp.c
new file mode 100644
index 000000000000..a2d2a8b0e05e
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-ptp.c
@@ -0,0 +1,276 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "xgbe.h"
+
+static u64 xgbe_cc_read(const struct cyclecounter *cc)
+{
+ struct xgbe_prv_data *pdata = container_of(cc,
+ struct xgbe_prv_data,
+ tstamp_cc);
+ u64 nsec;
+
+ nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+ return (nsec);
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 adjust;
+ u32 addend, diff;
+ unsigned int neg_adjust = 0;
+
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ adjust = pdata->tstamp_addend;
+ adjust *= delta;
+ diff = div_u64(adjust, 1000000000UL);
+
+ addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+ pdata->tstamp_addend + diff;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return (0);
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ timecounter_adjtime(&pdata->tstamp_tc, delta);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return (0);
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return (0);
+}
+
+static int xgbe_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return (0);
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+ void *request, int on)
+{
+ return (-EOPNOTSUPP);
+}
+
+void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+{
+ struct ptp_clock_info *info = &pdata->ptp_clock_info;
+ //struct ptp_clock *clock;
+ struct cyclecounter *cc = &pdata->tstamp_cc;
+ u64 dividend;
+
+ snprintf(info->name, sizeof(info->name), "axgbe-ptp");
+ //info->owner = THIS_MODULE;
+ info->max_adj = pdata->ptpclk_rate;
+ info->adjfreq = xgbe_adjfreq;
+ info->adjtime = xgbe_adjtime;
+ info->gettime64 = xgbe_gettime;
+ info->settime64 = xgbe_settime;
+ info->enable = xgbe_enable;
+#if 0
+ clock = ptp_clock_register(info, pdata->dev);
+ if (IS_ERR(clock)) {
+ dev_err(pdata->dev, "ptp_clock_register failed\n");
+ return;
+ }
+
+ pdata->ptp_clock = clock;
+#endif
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / 50Mhz)
+ * = (2^32 * 50Mhz) / PTP ref clock
+ */
+ dividend = 50000000;
+ dividend <<= 32;
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ /* Setup the timecounter */
+ cc->read = xgbe_cc_read;
+ cc->mask = CLOCKSOURCE_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Disable all timestamping to start */
+ XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
+ pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+#if 0
+ if (pdata->ptp_clock)
+ ptp_clock_unregister(pdata->ptp_clock);
+#endif
+}
diff --git a/sys/dev/axgbe/xgbe-sysctl.c b/sys/dev/axgbe/xgbe-sysctl.c
new file mode 100644
index 000000000000..eee7c61170de
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-sysctl.c
@@ -0,0 +1,1715 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Contact Information :
+ * Rajesh Kumar <rajesh1.kumar@amd.com>
+ * Arpan Palit <Arpan.Palit@amd.com>
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <sys/sbuf.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define SYSCTL_BUF_LEN 64
+
+typedef enum{
+ /* Coalesce flag */
+ rx_coalesce_usecs = 1,
+ rx_max_coalesced_frames,
+ rx_coalesce_usecs_irq,
+ rx_max_coalesced_frames_irq,
+ tx_coalesce_usecs,
+ tx_max_coalesced_frames,
+ tx_coalesce_usecs_irq,
+ tx_max_coalesced_frames_irq,
+ stats_block_coalesce_usecs,
+ use_adaptive_rx_coalesce,
+ use_adaptive_tx_coalesce,
+ pkt_rate_low,
+ rx_coalesce_usecs_low,
+ rx_max_coalesced_frames_low,
+ tx_coalesce_usecs_low,
+ tx_max_coalesced_frames_low,
+ pkt_rate_high,
+ rx_coalesce_usecs_high,
+ rx_max_coalesced_frames_high,
+ tx_coalesce_usecs_high,
+ tx_max_coalesced_frames_high,
+ rate_sample_interval,
+
+ /* Pasue flag */
+ autoneg,
+ tx_pause,
+ rx_pause,
+
+ /* link settings */
+ speed,
+ duplex,
+
+ /* Ring settings */
+ rx_pending,
+ rx_mini_pending,
+ rx_jumbo_pending,
+ tx_pending,
+
+ /* Channels settings */
+ rx_count,
+ tx_count,
+ other_count,
+ combined_count,
+} sysctl_variable_t;
+
+typedef enum {
+ SYSL_NONE,
+ SYSL_BOOL,
+ SYSL_S32,
+ SYSL_U8,
+ SYSL_U16,
+ SYSL_U32,
+ SYSL_U64,
+ SYSL_BE16,
+ SYSL_IP4,
+ SYSL_STR,
+ SYSL_FLAG,
+ SYSL_MAC,
+} sysctl_type_t;
+
+struct sysctl_info {
+ uint8_t name[32];
+ sysctl_type_t type;
+ sysctl_variable_t flag;
+ uint8_t support[16];
+};
+
+struct sysctl_op {
+ /* Coalesce options */
+ unsigned int rx_coalesce_usecs;
+ unsigned int rx_max_coalesced_frames;
+ unsigned int rx_coalesce_usecs_irq;
+ unsigned int rx_max_coalesced_frames_irq;
+ unsigned int tx_coalesce_usecs;
+ unsigned int tx_max_coalesced_frames;
+ unsigned int tx_coalesce_usecs_irq;
+ unsigned int tx_max_coalesced_frames_irq;
+ unsigned int stats_block_coalesce_usecs;
+ unsigned int use_adaptive_rx_coalesce;
+ unsigned int use_adaptive_tx_coalesce;
+ unsigned int pkt_rate_low;
+ unsigned int rx_coalesce_usecs_low;
+ unsigned int rx_max_coalesced_frames_low;
+ unsigned int tx_coalesce_usecs_low;
+ unsigned int tx_max_coalesced_frames_low;
+ unsigned int pkt_rate_high;
+ unsigned int rx_coalesce_usecs_high;
+ unsigned int rx_max_coalesced_frames_high;
+ unsigned int tx_coalesce_usecs_high;
+ unsigned int tx_max_coalesced_frames_high;
+ unsigned int rate_sample_interval;
+
+ /* Pasue options */
+ unsigned int autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* Link settings options */
+ unsigned int speed;
+ unsigned int duplex;
+
+ /* Ring param options */
+ unsigned int rx_max_pending;
+ unsigned int rx_mini_max_pending;
+ unsigned int rx_jumbo_max_pending;
+ unsigned int tx_max_pending;
+ unsigned int rx_pending;
+ unsigned int rx_mini_pending;
+ unsigned int rx_jumbo_pending;
+ unsigned int tx_pending;
+
+ /* Channels options */
+ unsigned int max_rx;
+ unsigned int max_tx;
+ unsigned int max_other;
+ unsigned int max_combined;
+ unsigned int rx_count;
+ unsigned int tx_count;
+ unsigned int other_count;
+ unsigned int combined_count;
+} sys_op;
+
+#define GSTRING_LEN 32
+
+struct xgbe_stats {
+ char stat_string[GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+#define XGMAC_EXT_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
+ offsetof(struct xgbe_prv_data, ext_stats._var), \
+ }
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets),
+ XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors),
+ XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+ XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+ XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
+};
+
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+char** alloc_sysctl_buffer(void);
+void get_val(char *buf, char **op, char **val, int *n_op);
+void fill_data(struct sysctl_op *sys_op, int flag, unsigned int value);
+
+static int
+exit_bad_op(void)
+{
+
+ printf("SYSCTL: bad command line option (s)\n");
+ return(-EINVAL);
+}
+
+static inline unsigned
+fls_long(unsigned long l)
+{
+
+ if (sizeof(l) == 4)
+ return (fls(l));
+ return (fls64(l));
+}
+
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+
+ return (1UL << (fls_long(n) - 1));
+}
+
+static inline int
+get_ubuf(struct sysctl_req *req, char *ubuf)
+{
+ int rc;
+
+ printf("%s: len:0x%li idx:0x%li\n", __func__, req->newlen,
+ req->newidx);
+ if (req->newlen >= SYSCTL_BUF_LEN)
+ return (-EINVAL);
+
+ rc = SYSCTL_IN(req, ubuf, req->newlen);
+ if (rc)
+ return (rc);
+ ubuf[req->newlen] = '\0';
+
+ return (0);
+}
+
+char**
+alloc_sysctl_buffer(void)
+{
+ char **buffer;
+ int i;
+
+ buffer = malloc(sizeof(char *)*32, M_AXGBE, M_WAITOK | M_ZERO);
+ for(i = 0; i < 32; i++)
+ buffer[i] = malloc(sizeof(char)*32, M_AXGBE, M_WAITOK | M_ZERO);
+
+ return (buffer);
+}
+
+void
+get_val(char *buf, char **op, char **val, int *n_op)
+{
+ int blen = strlen(buf);
+ int count = 0;
+ int i, j;
+
+ *n_op = 0;
+ for (i = 0; i < blen; i++) {
+ count++;
+ /* Get sysctl command option */
+ for (j = 0; buf[i] != ' '; j++) {
+ if (i >= blen)
+ break;
+ op[*n_op][j] = buf[i++];
+ }
+ op[*n_op][j+1] = '\0';
+ if (i >= strlen(buf))
+ goto out;
+
+ /* Get sysctl value*/
+ i++;
+ for (j = 0; buf[i] != ' '; j++) {
+ if (i >= blen)
+ break;
+ val[*n_op][j] = buf[i++];
+ }
+ val[*n_op][j+1] = '\0';
+ if (i >= strlen(buf))
+ goto out;
+
+ *n_op = count;
+ }
+
+out:
+ *n_op = count;
+}
+
+void
+fill_data(struct sysctl_op *sys_op, int flag, unsigned int value)
+{
+
+ switch(flag) {
+ case 1:
+ sys_op->rx_coalesce_usecs = value;
+ break;
+ case 2:
+ sys_op->rx_max_coalesced_frames = value;
+ break;
+ case 3:
+ sys_op->rx_coalesce_usecs_irq = value;
+ break;
+ case 4:
+ sys_op->rx_max_coalesced_frames_irq = value;
+ break;
+ case 5:
+ sys_op->tx_coalesce_usecs = value;
+ break;
+ case 6:
+ sys_op->tx_max_coalesced_frames = value;
+ break;
+ case 7:
+ sys_op->tx_coalesce_usecs_irq = value;
+ break;
+ case 8:
+ sys_op->tx_max_coalesced_frames_irq = value;
+ break;
+ case 9:
+ sys_op->stats_block_coalesce_usecs = value;
+ break;
+ case 10:
+ sys_op->use_adaptive_rx_coalesce = value;
+ break;
+ case 11:
+ sys_op->use_adaptive_tx_coalesce = value;
+ break;
+ case 12:
+ sys_op->pkt_rate_low = value;
+ break;
+ case 13:
+ sys_op->rx_coalesce_usecs_low = value;
+ break;
+ case 14:
+ sys_op->rx_max_coalesced_frames_low = value;
+ break;
+ case 15:
+ sys_op->tx_coalesce_usecs_low = value;
+ break;
+ case 16:
+ sys_op->tx_max_coalesced_frames_low = value;
+ break;
+ case 17:
+ sys_op->pkt_rate_high = value;
+ break;
+ case 18:
+ sys_op->rx_coalesce_usecs_high = value;
+ break;
+ case 19:
+ sys_op->rx_max_coalesced_frames_high = value;
+ break;
+ case 20:
+ sys_op->tx_coalesce_usecs_high = value;
+ break;
+ case 21:
+ sys_op->tx_max_coalesced_frames_high = value;
+ break;
+ case 22:
+ sys_op->rate_sample_interval = value;
+ break;
+ case 23:
+ sys_op->autoneg = value;
+ break;
+ case 24:
+ sys_op->rx_pause = value;
+ break;
+ case 25:
+ sys_op->tx_pause = value;
+ break;
+ case 26:
+ sys_op->speed = value;
+ break;
+ case 27:
+ sys_op->duplex = value;
+ break;
+ case 28:
+ sys_op->rx_pending = value;
+ break;
+ case 29:
+ sys_op->rx_mini_pending = value;
+ break;
+ case 30:
+ sys_op->rx_jumbo_pending = value;
+ break;
+ case 31:
+ sys_op->tx_pending = value;
+ break;
+ default:
+ printf("Option error\n");
+ }
+}
+
+static int
+parse_generic_sysctl(struct xgbe_prv_data *pdata, char *buf,
+ struct sysctl_info *info, unsigned int n_info)
+{
+ struct sysctl_op *sys_op = pdata->sys_op;
+ unsigned int value;
+ char **op, **val;
+ int n_op = 0;
+ int rc = 0;
+ int i, idx;
+
+ op = alloc_sysctl_buffer();
+ val = alloc_sysctl_buffer();
+ get_val(buf, op, val, &n_op);
+
+ for (i = 0; i < n_op; i++) {
+ for (idx = 0; idx < n_info; idx++) {
+ if (strcmp(info[idx].name, op[i]) == 0) {
+ if (strcmp(info[idx].support,
+ "not-supported") == 0){
+ axgbe_printf(1, "ignoring not-supported "
+ "option \"%s\"\n", info[idx].name);
+ break;
+ }
+ switch(info[idx].type) {
+ case SYSL_BOOL: {
+ if (!strcmp(val[i], "on"))
+ fill_data(sys_op,
+ info[idx].flag, 1);
+ else if (!strcmp(val[i], "off"))
+ fill_data(sys_op,
+ info[idx].flag, 0);
+ else
+ rc = exit_bad_op();
+ break;
+ }
+ case SYSL_S32:
+ sscanf(val[i], "%u", &value);
+ fill_data(sys_op, info[idx].flag, value);
+ break;
+ case SYSL_U8:
+ if (!strcmp(val[i], "half"))
+ fill_data(sys_op,
+ info[idx].flag, DUPLEX_HALF);
+ else if (!strcmp(val[i], "full"))
+ fill_data(sys_op,
+ info[idx].flag, DUPLEX_FULL);
+ else
+ exit_bad_op();
+ default:
+ rc = exit_bad_op();
+ }
+ }
+ }
+ }
+
+ for(i = 0; i < 32; i++)
+ free(op[i], M_AXGBE);
+ free(op, M_AXGBE);
+
+ for(i = 0; i < 32; i++)
+ free(val[i], M_AXGBE);
+ free(val, M_AXGBE);
+ return (rc);
+}
+
+
+static int
+sysctl_xgmac_reg_addr_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ unsigned int reg;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: sysctl_xgmac_reg: 0x%x\n", __func__,
+ pdata->sysctl_xgmac_reg);
+ sbuf_printf(sb, "\nXGMAC reg_addr: 0x%x\n",
+ pdata->sysctl_xgmac_reg);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &reg);
+ axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg);
+ pdata->sysctl_xgmac_reg = reg;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_get_drv_info_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+ ssize_t buf_size = 64;
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ sbuf_printf(sb, "\ndriver: %s", XGBE_DRV_NAME);
+ sbuf_printf(sb, "\nversion: %s", XGBE_DRV_VERSION);
+ sbuf_printf(sb, "\nfirmware-version: %d.%d.%d",
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
+ sbuf_printf(sb, "\nbus-info: %04d:%02d:%02d",
+ pdata->pcie_bus, pdata->pcie_device, pdata->pcie_func);
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ return (-EINVAL);
+}
+
+static int
+sysctl_get_link_info_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ sbuf_printf(sb, "\nLink is %s", pdata->phy.link ? "Up" : "Down");
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (0);
+ }
+
+ return (-EINVAL);
+}
+
+#define COALESCE_SYSCTL_INFO(__coalop) \
+{ \
+ { "adaptive-rx", SYSL_BOOL, use_adaptive_rx_coalesce, "not-supported" }, \
+ { "adaptive-tx", SYSL_BOOL, use_adaptive_tx_coalesce, "not-supported" }, \
+ { "sample-interval", SYSL_S32, rate_sample_interval, "not-supported" }, \
+ { "stats-block-usecs", SYSL_S32, stats_block_coalesce_usecs, "not-supported" }, \
+ { "pkt-rate-low", SYSL_S32, pkt_rate_low, "not-supported" }, \
+ { "pkt-rate-high", SYSL_S32, pkt_rate_high, "not-supported" }, \
+ { "rx-usecs", SYSL_S32, rx_coalesce_usecs, "supported" }, \
+ { "rx-frames", SYSL_S32, rx_max_coalesced_frames, "supported" }, \
+ { "rx-usecs-irq", SYSL_S32, rx_coalesce_usecs_irq, "not-supported" }, \
+ { "rx-frames-irq", SYSL_S32, rx_max_coalesced_frames_irq, "not-supported" }, \
+ { "tx-usecs", SYSL_S32, tx_coalesce_usecs, "not-supported" }, \
+ { "tx-frames", SYSL_S32, tx_max_coalesced_frames, "supported" }, \
+ { "tx-usecs-irq", SYSL_S32, tx_coalesce_usecs_irq, "not-supported" }, \
+ { "tx-frames-irq", SYSL_S32, tx_max_coalesced_frames_irq, "not-supported" }, \
+ { "rx-usecs-low", SYSL_S32, rx_coalesce_usecs_low, "not-supported" }, \
+ { "rx-frames-low", SYSL_S32, rx_max_coalesced_frames_low, "not-supported"}, \
+ { "tx-usecs-low", SYSL_S32, tx_coalesce_usecs_low, "not-supported" }, \
+ { "tx-frames-low", SYSL_S32, tx_max_coalesced_frames_low, "not-supported" }, \
+ { "rx-usecs-high", SYSL_S32, rx_coalesce_usecs_high, "not-supported" }, \
+ { "rx-frames-high", SYSL_S32, rx_max_coalesced_frames_high, "not-supported" }, \
+ { "tx-usecs-high", SYSL_S32, tx_coalesce_usecs_high, "not-supported" }, \
+ { "tx-frames-high", SYSL_S32, tx_max_coalesced_frames_high, "not-supported" }, \
+}
+
+static int
+sysctl_coalesce_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sysctl_op *sys_op = pdata->sys_op;
+ struct sysctl_info sysctl_coalesce[] = COALESCE_SYSCTL_INFO(coalop);
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+ sys_op->rx_coalesce_usecs = pdata->rx_usecs;
+ sys_op->rx_max_coalesced_frames = pdata->rx_frames;
+ sys_op->tx_max_coalesced_frames = pdata->tx_frames;
+
+ sbuf_printf(sb, "\nAdaptive RX: %s TX: %s\n",
+ sys_op->use_adaptive_rx_coalesce ? "on" : "off",
+ sys_op->use_adaptive_tx_coalesce ? "on" : "off");
+
+ sbuf_printf(sb, "stats-block-usecs: %u\n"
+ "sample-interval: %u\n"
+ "pkt-rate-low: %u\n"
+ "pkt-rate-high: %u\n"
+ "\n"
+ "rx-usecs: %u\n"
+ "rx-frames: %u\n"
+ "rx-usecs-irq: %u\n"
+ "rx-frames-irq: %u\n"
+ "\n"
+ "tx-usecs: %u\n"
+ "tx-frames: %u\n"
+ "tx-usecs-irq: %u\n"
+ "tx-frames-irq: %u\n"
+ "\n"
+ "rx-usecs-low: %u\n"
+ "rx-frames-low: %u\n"
+ "tx-usecs-low: %u\n"
+ "tx-frames-low: %u\n"
+ "\n"
+ "rx-usecs-high: %u\n"
+ "rx-frames-high: %u\n"
+ "tx-usecs-high: %u\n"
+ "tx-frames-high: %u\n",
+ sys_op->stats_block_coalesce_usecs,
+ sys_op->rate_sample_interval,
+ sys_op->pkt_rate_low,
+ sys_op->pkt_rate_high,
+
+ sys_op->rx_coalesce_usecs,
+ sys_op->rx_max_coalesced_frames,
+ sys_op->rx_coalesce_usecs_irq,
+ sys_op->rx_max_coalesced_frames_irq,
+
+ sys_op->tx_coalesce_usecs,
+ sys_op->tx_max_coalesced_frames,
+ sys_op->tx_coalesce_usecs_irq,
+ sys_op->tx_max_coalesced_frames_irq,
+
+ sys_op->rx_coalesce_usecs_low,
+ sys_op->rx_max_coalesced_frames_low,
+ sys_op->tx_coalesce_usecs_low,
+ sys_op->tx_max_coalesced_frames_low,
+
+ sys_op->rx_coalesce_usecs_high,
+ sys_op->rx_max_coalesced_frames_high,
+ sys_op->tx_coalesce_usecs_high,
+ sys_op->tx_max_coalesced_frames_high);
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (0);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ parse_generic_sysctl(pdata, buf, sysctl_coalesce,
+ ARRAY_SIZE(sysctl_coalesce));
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, sys_op->rx_coalesce_usecs);
+ rx_usecs = sys_op->rx_coalesce_usecs;
+ rx_frames = sys_op->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (rx_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ axgbe_printf(2, "rx-usec is limited to %d usecs\n",
+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ return (-EINVAL);
+ }
+ if (rx_frames > pdata->rx_desc_count) {
+ axgbe_printf(2, "rx-frames is limited to %d frames\n",
+ pdata->rx_desc_count);
+ return (-EINVAL);
+ }
+
+ tx_frames = sys_op->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->tx_desc_count) {
+ axgbe_printf(2, "tx-frames is limited to %d frames\n",
+ pdata->tx_desc_count);
+ return (-EINVAL);
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_usecs = rx_usecs;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+
+ return (rc);
+}
+
+static int
+sysctl_pauseparam_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ struct sysctl_op *sys_op = pdata->sys_op;
+ struct sysctl_info sysctl_pauseparam[] = {
+ { "autoneg", SYSL_BOOL, autoneg, "supported" },
+ { "rx", SYSL_BOOL, rx_pause, "supported" },
+ { "tx", SYSL_BOOL, tx_pause, "supported" },
+ };
+ ssize_t buf_size = 512;
+ char buf[buf_size];
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+ sys_op->autoneg = pdata->phy.pause_autoneg;
+ sys_op->tx_pause = pdata->phy.tx_pause;
+ sys_op->rx_pause = pdata->phy.rx_pause;
+
+ sbuf_printf(sb,
+ "\nAutonegotiate: %s\n"
+ "RX: %s\n"
+ "TX: %s\n",
+ sys_op->autoneg ? "on" : "off",
+ sys_op->rx_pause ? "on" : "off",
+ sys_op->tx_pause ? "on" : "off");
+
+ if (pdata->phy.lp_advertising) {
+ int an_rx = 0, an_tx = 0;
+
+ if (pdata->phy.advertising & pdata->phy.lp_advertising &
+ ADVERTISED_Pause) {
+ an_tx = 1;
+ an_rx = 1;
+ } else if (pdata->phy.advertising &
+ pdata->phy.lp_advertising & ADVERTISED_Asym_Pause) {
+ if (pdata->phy.advertising & ADVERTISED_Pause)
+ an_rx = 1;
+ else if (pdata->phy.lp_advertising &
+ ADVERTISED_Pause)
+ an_tx = 1;
+ }
+ sbuf_printf(sb,
+ "\n->\nRX negotiated: %s\n"
+ "TX negotiated: %s\n",
+ an_rx ? "on" : "off",
+ an_tx ? "on" : "off");
+ }
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (0);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ parse_generic_sysctl(pdata, buf, sysctl_pauseparam,
+ ARRAY_SIZE(sysctl_pauseparam));
+
+ if (sys_op->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+ axgbe_error("autoneg disabled, pause autoneg not available\n");
+ return (-EINVAL);
+ }
+
+ pdata->phy.pause_autoneg = sys_op->autoneg;
+ pdata->phy.tx_pause = sys_op->tx_pause;
+ pdata->phy.rx_pause = sys_op->rx_pause;
+
+ XGBE_CLR_ADV(&pdata->phy, Pause);
+ XGBE_CLR_ADV(&pdata->phy, Asym_Pause);
+
+ if (sys_op->rx_pause) {
+ XGBE_SET_ADV(&pdata->phy, Pause);
+ XGBE_SET_ADV(&pdata->phy, Asym_Pause);
+ }
+
+ if (sys_op->tx_pause) {
+ /* Equivalent to XOR of Asym_Pause */
+ if (XGBE_ADV(&pdata->phy, Asym_Pause))
+ XGBE_CLR_ADV(&pdata->phy, Asym_Pause);
+ else
+ XGBE_SET_ADV(&pdata->phy, Asym_Pause);
+ }
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ rc = pdata->phy_if.phy_config_aneg(pdata);
+
+ }
+
+ return (rc);
+}
+
+static int
+sysctl_link_ksettings_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ struct sysctl_op *sys_op = pdata->sys_op;
+ struct sysctl_info sysctl_linksettings[] = {
+ { "autoneg", SYSL_BOOL, autoneg, "supported" },
+ { "speed", SYSL_U32, speed, "supported" },
+ { "duplex", SYSL_U8, duplex, "supported" },
+ };
+ ssize_t buf_size = 512;
+ char buf[buf_size], link_modes[16], speed_modes[16];
+ struct sbuf *sb;
+ uint32_t speed;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+ sys_op->autoneg = pdata->phy.autoneg;
+ sys_op->speed = pdata->phy.speed;
+ sys_op->duplex = pdata->phy.duplex;
+
+ XGBE_LM_COPY(&pdata->phy, supported, &pdata->phy, supported);
+ XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, advertising);
+ XGBE_LM_COPY(&pdata->phy, lp_advertising, &pdata->phy, lp_advertising);
+
+ switch (sys_op->speed) {
+ case 1:
+ strcpy(link_modes, "Unknown");
+ strcpy(speed_modes, "Unknown");
+ break;
+ case 2:
+ strcpy(link_modes, "10Gbps/Full");
+ strcpy(speed_modes, "10000");
+ break;
+ case 3:
+ strcpy(link_modes, "2.5Gbps/Full");
+ strcpy(speed_modes, "2500");
+ break;
+ case 4:
+ strcpy(link_modes, "1Gbps/Full");
+ strcpy(speed_modes, "1000");
+ break;
+ case 5:
+ strcpy(link_modes, "100Mbps/Full");
+ strcpy(speed_modes, "100");
+ break;
+ case 6:
+ strcpy(link_modes, "10Mbps/Full");
+ strcpy(speed_modes, "10");
+ break;
+ }
+
+ sbuf_printf(sb,
+ "\nlink_modes: %s\n"
+ "autonegotiation: %s\n"
+ "speed: %sMbps\n",
+ link_modes,
+ (sys_op->autoneg == AUTONEG_DISABLE) ? "off" : "on",
+ speed_modes);
+
+ switch (sys_op->duplex) {
+ case DUPLEX_HALF:
+ sbuf_printf(sb, "Duplex: Half\n");
+ break;
+ case DUPLEX_FULL:
+ sbuf_printf(sb, "Duplex: Full\n");
+ break;
+ default:
+ sbuf_printf(sb, "Duplex: Unknown\n");
+ break;
+ }
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (0);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ parse_generic_sysctl(pdata, buf, sysctl_linksettings,
+ ARRAY_SIZE(sysctl_linksettings));
+
+ speed = sys_op->speed;
+
+ if ((sys_op->autoneg != AUTONEG_ENABLE) &&
+ (sys_op->autoneg != AUTONEG_DISABLE)) {
+ axgbe_error("unsupported autoneg %hhu\n",
+ (unsigned char)sys_op->autoneg);
+ return (-EINVAL);
+ }
+
+ if (sys_op->autoneg == AUTONEG_DISABLE) {
+ if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
+ axgbe_error("unsupported speed %u\n", speed);
+ return (-EINVAL);
+ }
+
+ if (sys_op->duplex != DUPLEX_FULL) {
+ axgbe_error("unsupported duplex %hhu\n",
+ (unsigned char)sys_op->duplex);
+ return (-EINVAL);
+ }
+ }
+
+ pdata->phy.autoneg = sys_op->autoneg;
+ pdata->phy.speed = speed;
+ pdata->phy.duplex = sys_op->duplex;
+
+ if (sys_op->autoneg == AUTONEG_ENABLE)
+ XGBE_SET_ADV(&pdata->phy, Autoneg);
+ else
+ XGBE_CLR_ADV(&pdata->phy, Autoneg);
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ rc = pdata->phy_if.phy_config_aneg(pdata);
+ }
+
+ return (rc);
+}
+
+static int
+sysctl_ringparam_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ struct sysctl_op *sys_op = pdata->sys_op;
+ struct sysctl_info sysctl_ringparam[] = {
+ { "rx", SYSL_S32, rx_pending, "supported" },
+ { "rx-mini", SYSL_S32, rx_mini_pending, "supported" },
+ { "rx-jumbo", SYSL_S32, rx_jumbo_pending, "supported" },
+ { "tx", SYSL_S32, tx_pending, "supported" },
+ };
+ ssize_t buf_size = 512;
+ unsigned int rx, tx;
+ char buf[buf_size];
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+ sys_op->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
+ sys_op->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
+ sys_op->rx_pending = pdata->rx_desc_count;
+ sys_op->tx_pending = pdata->tx_desc_count;
+
+ sbuf_printf(sb,
+ "\nPre-set maximums:\n"
+ "RX: %u\n"
+ "RX Mini: %u\n"
+ "RX Jumbo: %u\n"
+ "TX: %u\n",
+ sys_op->rx_max_pending,
+ sys_op->rx_mini_max_pending,
+ sys_op->rx_jumbo_max_pending,
+ sys_op->tx_max_pending);
+
+ sbuf_printf(sb,
+ "\nCurrent hardware settings:\n"
+ "RX: %u\n"
+ "RX Mini: %u\n"
+ "RX Jumbo: %u\n"
+ "TX: %u\n",
+ sys_op->rx_pending,
+ sys_op->rx_mini_pending,
+ sys_op->rx_jumbo_pending,
+ sys_op->tx_pending);
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (0);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ parse_generic_sysctl(pdata, buf, sysctl_ringparam,
+ ARRAY_SIZE(sysctl_ringparam));
+
+ if (sys_op->rx_mini_pending || sys_op->rx_jumbo_pending) {
+ axgbe_error("unsupported ring parameter\n");
+ return (-EINVAL);
+ }
+
+ if ((sys_op->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
+ (sys_op->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
+ axgbe_error("rx ring param must be between %u and %u\n",
+ XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
+ return (-EINVAL);
+ }
+
+ if ((sys_op->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
+ (sys_op->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
+ axgbe_error("tx ring param must be between %u and %u\n",
+ XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
+ return (-EINVAL);
+ }
+
+ rx = __rounddown_pow_of_two(sys_op->rx_pending);
+ if (rx != sys_op->rx_pending)
+ axgbe_printf(1, "rx ring param rounded to power of 2: %u\n",
+ rx);
+
+ tx = __rounddown_pow_of_two(sys_op->tx_pending);
+ if (tx != sys_op->tx_pending)
+ axgbe_printf(1, "tx ring param rounded to power of 2: %u\n",
+ tx);
+
+ if ((rx == pdata->rx_desc_count) &&
+ (tx == pdata->tx_desc_count))
+ goto out;
+
+ pdata->rx_desc_count = rx;
+ pdata->tx_desc_count = tx;
+
+ /* TODO - restart dev */
+ }
+
+out:
+ return (0);
+}
+
+static int
+sysctl_channels_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ struct sysctl_op *sys_op = pdata->sys_op;
+ struct sysctl_info sysctl_channels[] = {
+ { "rx", SYSL_S32, rx_count, "supported" },
+ { "tx", SYSL_S32, tx_count, "supported" },
+ { "other", SYSL_S32, other_count, "supported" },
+ { "combined", SYSL_S32, combined_count, "supported" },
+ };
+ unsigned int rx, tx, combined;
+ ssize_t buf_size = 512;
+ char buf[buf_size];
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
+ rx = min(rx, pdata->channel_irq_count);
+ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
+ tx = min(tx, pdata->channel_irq_count);
+ tx = min(tx, pdata->tx_max_q_count);
+
+ combined = min(rx, tx);
+
+ sys_op->max_combined = combined;
+ sys_op->max_rx = rx ? rx - 1 : 0;
+ sys_op->max_tx = tx ? tx - 1 : 0;
+
+ /* Get current settings based on device state */
+ rx = pdata->rx_ring_count;
+ tx = pdata->tx_ring_count;
+
+ combined = min(rx, tx);
+ rx -= combined;
+ tx -= combined;
+
+ sys_op->combined_count = combined;
+ sys_op->rx_count = rx;
+ sys_op->tx_count = tx;
+
+ sbuf_printf(sb,
+ "\nPre-set maximums:\n"
+ "RX: %u\n"
+ "TX: %u\n"
+ "Other: %u\n"
+ "Combined: %u\n",
+ sys_op->max_rx, sys_op->max_tx,
+ sys_op->max_other,
+ sys_op->max_combined);
+
+ sbuf_printf(sb,
+ "\nCurrent hardware settings:\n"
+ "RX: %u\n"
+ "TX: %u\n"
+ "Other: %u\n"
+ "Combined: %u\n",
+ sys_op->rx_count, sys_op->tx_count,
+ sys_op->other_count,
+ sys_op->combined_count);
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (0);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ parse_generic_sysctl(pdata, buf, sysctl_channels,
+ ARRAY_SIZE(sysctl_channels));
+
+ axgbe_error( "channel inputs: combined=%u, rx-only=%u,"
+ " tx-only=%u\n", sys_op->combined_count,
+ sys_op->rx_count, sys_op->tx_count);
+ }
+
+ return (rc);
+}
+
+
+static int
+sysctl_mac_stats_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ struct sbuf *sb;
+ int rc = 0;
+ int i;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ sbuf_printf(sb, "\n %s: %lu",
+ xgbe_gstring_stats[i].stat_string,
+ *(uint64_t *)((uint8_t *)pdata + xgbe_gstring_stats[i].stat_offset));
+ }
+ for (i = 0; i < pdata->tx_ring_count; i++) {
+ sbuf_printf(sb,
+ "\n txq_packets[%d]: %lu"
+ "\n txq_bytes[%d]: %lu",
+ i, pdata->ext_stats.txq_packets[i],
+ i, pdata->ext_stats.txq_bytes[i]);
+ }
+ for (i = 0; i < pdata->rx_ring_count; i++) {
+ sbuf_printf(sb,
+ "\n rxq_packets[%d]: %lu"
+ "\n rxq_bytes[%d]: %lu",
+ i, pdata->ext_stats.rxq_packets[i],
+ i, pdata->ext_stats.rxq_bytes[i]);
+ }
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ return (-EINVAL);
+}
+
+static int
+sysctl_xgmac_reg_value_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ unsigned int value;
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ value = XGMAC_IOREAD(pdata, pdata->sysctl_xgmac_reg);
+ axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value);
+ sbuf_printf(sb, "\nXGMAC reg_value: 0x%x\n", value);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &value);
+ axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value);
+ XGMAC_IOWRITE(pdata, pdata->sysctl_xgmac_reg, value);
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xpcs_mmd_reg_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ unsigned int reg;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: xpcs_mmd: 0x%x\n", __func__,
+ pdata->sysctl_xpcs_mmd);
+ sbuf_printf(sb, "\nXPCS mmd_reg: 0x%x\n",
+ pdata->sysctl_xpcs_mmd);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &reg);
+ axgbe_printf(2, "WRITE: %s: mmd_reg: 0x%x\n", __func__, reg);
+ pdata->sysctl_xpcs_mmd = reg;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xpcs_reg_addr_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ unsigned int reg;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: sysctl_xpcs_reg: 0x%x\n", __func__,
+ pdata->sysctl_xpcs_reg);
+ sbuf_printf(sb, "\nXPCS reg_addr: 0x%x\n",
+ pdata->sysctl_xpcs_reg);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &reg);
+ axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg);
+ pdata->sysctl_xpcs_reg = reg;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xpcs_reg_value_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ unsigned int value;
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ value = XMDIO_READ(pdata, pdata->sysctl_xpcs_mmd,
+ pdata->sysctl_xpcs_reg);
+ axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value);
+ sbuf_printf(sb, "\nXPCS reg_value: 0x%x\n", value);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &value);
+ axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value);
+ XMDIO_WRITE(pdata, pdata->sysctl_xpcs_mmd,
+ pdata->sysctl_xpcs_reg, value);
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xprop_reg_addr_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ unsigned int reg;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: sysctl_xprop_reg: 0x%x\n", __func__,
+ pdata->sysctl_xprop_reg);
+ sbuf_printf(sb, "\nXPROP reg_addr: 0x%x\n",
+ pdata->sysctl_xprop_reg);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &reg);
+ axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg);
+ pdata->sysctl_xprop_reg = reg;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xprop_reg_value_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ unsigned int value;
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ value = XP_IOREAD(pdata, pdata->sysctl_xprop_reg);
+ axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value);
+ sbuf_printf(sb, "\nXPROP reg_value: 0x%x\n", value);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &value);
+ axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value);
+ XP_IOWRITE(pdata, pdata->sysctl_xprop_reg, value);
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xi2c_reg_addr_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ unsigned int reg;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: sysctl_xi2c_reg: 0x%x\n", __func__,
+ pdata->sysctl_xi2c_reg);
+ sbuf_printf(sb, "\nXI2C reg_addr: 0x%x\n",
+ pdata->sysctl_xi2c_reg);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &reg);
+ axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg);
+ pdata->sysctl_xi2c_reg = reg;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_xi2c_reg_value_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ unsigned int value;
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ value = XI2C_IOREAD(pdata, pdata->sysctl_xi2c_reg);
+ axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value);
+ sbuf_printf(sb, "\nXI2C reg_value: 0x%x\n", value);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%x", &value);
+ axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value);
+ XI2C_IOWRITE(pdata, pdata->sysctl_xi2c_reg, value);
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_an_cdr_wr_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ unsigned int an_cdr_wr = 0;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: an_cdr_wr: %d\n", __func__,
+ pdata->sysctl_an_cdr_workaround);
+ sbuf_printf(sb, "%d\n", pdata->sysctl_an_cdr_workaround);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%u", &an_cdr_wr);
+ axgbe_printf(2, "WRITE: %s: an_cdr_wr: 0x%d\n", __func__,
+ an_cdr_wr);
+
+ if (an_cdr_wr)
+ pdata->sysctl_an_cdr_workaround = 1;
+ else
+ pdata->sysctl_an_cdr_workaround = 0;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+static int
+sysctl_an_cdr_track_early_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1;
+ unsigned int an_cdr_track_early = 0;
+ ssize_t buf_size = 64;
+ char buf[buf_size];
+ struct sbuf *sb;
+ int rc = 0;
+
+ if (req->newptr == NULL) {
+ sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req);
+ if (sb == NULL) {
+ rc = sb->s_error;
+ return (rc);
+ }
+
+ axgbe_printf(2, "READ: %s: an_cdr_track_early %d\n", __func__,
+ pdata->sysctl_an_cdr_track_early);
+ sbuf_printf(sb, "%d\n", pdata->sysctl_an_cdr_track_early);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+ }
+
+ rc = get_ubuf(req, buf);
+ if (rc == 0) {
+ sscanf(buf, "%u", &an_cdr_track_early);
+ axgbe_printf(2, "WRITE: %s: an_cdr_track_early: %d\n", __func__,
+ an_cdr_track_early);
+
+ if (an_cdr_track_early)
+ pdata->sysctl_an_cdr_track_early = 1;
+ else
+ pdata->sysctl_an_cdr_track_early = 0;
+ }
+
+ axgbe_printf(2, "%s: rc= %d\n", __func__, rc);
+ return (rc);
+}
+
+void
+axgbe_sysctl_exit(struct xgbe_prv_data *pdata)
+{
+
+ if (pdata->sys_op)
+ free(pdata->sys_op, M_AXGBE);
+}
+
+void
+axgbe_sysctl_init(struct xgbe_prv_data *pdata)
+{
+ struct sysctl_ctx_list *clist;
+ struct sysctl_oid_list *top;
+ struct sysctl_oid *parent;
+ struct sysctl_op *sys_op;
+
+ sys_op = malloc(sizeof(*sys_op), M_AXGBE, M_WAITOK | M_ZERO);
+ pdata->sys_op = sys_op;
+
+ clist = device_get_sysctl_ctx(pdata->dev);
+ parent = device_get_sysctl_tree(pdata->dev);
+ top = SYSCTL_CHILDREN(parent);
+
+ /* Set defaults */
+ pdata->sysctl_xgmac_reg = 0;
+ pdata->sysctl_xpcs_mmd = 1;
+ pdata->sysctl_xpcs_reg = 0;
+
+ SYSCTL_ADD_UINT(clist, top, OID_AUTO, "axgbe_debug_level", CTLFLAG_RWTUN,
+ &pdata->debug_level, 0, "axgbe log level -- higher is verbose");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xgmac_register",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xgmac_reg_addr_handler, "IU",
+ "xgmac register addr");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xgmac_register_value",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xgmac_reg_value_handler, "IU",
+ "xgmac register value");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_mmd",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xpcs_mmd_reg_handler, "IU", "xpcs mmd register");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_register",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xpcs_reg_addr_handler, "IU", "xpcs register");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_register_value",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xpcs_reg_value_handler, "IU",
+ "xpcs register value");
+
+ if (pdata->xpcs_res) {
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xprop_register",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xprop_reg_addr_handler,
+ "IU", "xprop register");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xprop_register_value",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xprop_reg_value_handler,
+ "IU", "xprop register value");
+ }
+
+ if (pdata->xpcs_res) {
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xi2c_register",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xi2c_reg_addr_handler,
+ "IU", "xi2c register");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xi2c_register_value",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_xi2c_reg_value_handler,
+ "IU", "xi2c register value");
+ }
+
+ if (pdata->vdata->an_cdr_workaround) {
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "an_cdr_workaround",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_an_cdr_wr_handler, "IU",
+ "an cdr workaround");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "an_cdr_track_early",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_an_cdr_track_early_handler, "IU",
+ "an cdr track early");
+ }
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "drv_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_get_drv_info_handler, "IU",
+ "xgbe drv info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "link_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_get_link_info_handler, "IU",
+ "xgbe link info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "coalesce_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_coalesce_handler, "IU",
+ "xgbe coalesce info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "pauseparam_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_pauseparam_handler, "IU",
+ "xgbe pauseparam info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "link_ksettings_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_link_ksettings_handler, "IU",
+ "xgbe link_ksettings info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "ringparam_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_ringparam_handler, "IU",
+ "xgbe ringparam info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "channels_info",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_channels_handler, "IU",
+ "xgbe channels info");
+
+ SYSCTL_ADD_PROC(clist, top, OID_AUTO, "mac_stats",
+ CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ pdata, 0, sysctl_mac_stats_handler, "IU",
+ "xgbe mac stats");
+}
diff --git a/sys/dev/axgbe/xgbe-txrx.c b/sys/dev/axgbe/xgbe-txrx.c
new file mode 100644
index 000000000000..c6872e584f81
--- /dev/null
+++ b/sys/dev/axgbe/xgbe-txrx.c
@@ -0,0 +1,777 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Contact Information :
+ * Rajesh Kumar <rajesh1.kumar@amd.com>
+ * Shreyank Amartya <Shreyank.Amartya@amd.com>
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+/*
+ * IFLIB interfaces
+ */
+static int axgbe_isc_txd_encap(void *, if_pkt_info_t);
+static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t);
+static int axgbe_isc_txd_credits_update(void *, uint16_t, bool);
+static void axgbe_isc_rxd_refill(void *, if_rxd_update_t);
+static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
+static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
+static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
+
+struct if_txrx axgbe_txrx = {
+ .ift_txd_encap = axgbe_isc_txd_encap,
+ .ift_txd_flush = axgbe_isc_txd_flush,
+ .ift_txd_credits_update = axgbe_isc_txd_credits_update,
+ .ift_rxd_available = axgbe_isc_rxd_available,
+ .ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get,
+ .ift_rxd_refill = axgbe_isc_rxd_refill,
+ .ift_rxd_flush = axgbe_isc_rxd_flush,
+ .ift_legacy_intr = NULL
+};
+
+static void
+xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi)
+{
+
+ axgbe_printf(1, "------Packet Info Start------\n");
+ axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
+ pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
+ axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n",
+ pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag);
+ axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
+ pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
+ axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n",
+ pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz);
+}
+
+static bool
+axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+ if_pkt_info_t pi)
+{
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ bool inc_cur = false;
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n",
+ pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur);
+
+ axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n",
+ pi->ipi_vtag, ring->tx.cur_vlan_ctag);
+
+ if ((pi->ipi_csum_flags & CSUM_TSO) &&
+ (pi->ipi_tso_segsz != ring->tx.cur_mss)) {
+ /*
+ * Set TSO maximum segment size
+ * Mark as context descriptor
+ * Indicate this descriptor contains MSS
+ */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, pi->ipi_tso_segsz);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1);
+ ring->tx.cur_mss = pi->ipi_tso_segsz;
+ inc_cur = true;
+ }
+
+ if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) {
+ /*
+ * Mark it as context descriptor
+ * Set the VLAN tag
+ * Indicate this descriptor contains the VLAN tag
+ */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, pi->ipi_vtag);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1);
+ ring->tx.cur_vlan_ctag = pi->ipi_vtag;
+ inc_cur = true;
+ }
+
+ return (inc_cur);
+}
+
+static uint16_t
+axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi,
+ struct xgbe_packet_data *packet)
+{
+ uint32_t tcp_payload_len = 0, bytes = 0;
+ uint16_t max_len, hlen, payload_len, pkts = 0;
+
+ packet->tx_packets = packet->tx_bytes = 0;
+
+ hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+
+ tcp_payload_len = pi->ipi_len - hlen;
+ axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n",
+ __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen,
+ pi->ipi_tcp_hlen);
+
+ max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
+ if (pi->ipi_vtag)
+ max_len += VLAN_HLEN;
+
+ while (tcp_payload_len) {
+
+ payload_len = max_len - hlen;
+ payload_len = min(payload_len, tcp_payload_len);
+ tcp_payload_len -= payload_len;
+ pkts++;
+ bytes += (hlen + payload_len);
+ axgbe_printf(1, "%s: max_len %d payload_len %d "
+ "tcp_len %d\n", __func__, max_len, payload_len,
+ tcp_payload_len);
+ }
+ } else {
+ pkts = 1;
+ bytes = pi->ipi_len;
+ }
+
+ packet->tx_packets = pkts;
+ packet->tx_bytes = bytes;
+
+ axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__,
+ packet->tx_packets, packet->tx_bytes, hlen);
+
+ return (hlen);
+}
+
+static int
+axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ unsigned int cur, start, tx_set_ic;
+ uint16_t offset, hlen, datalen, tcp_payload_len = 0;
+ int cur_seg = 0;
+
+ xgbe_print_pkt_info(pdata, pi);
+
+ channel = pdata->channel[pi->ipi_qsidx];
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+ cur = start = ring->cur;
+
+ axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n",
+ __func__, pi->ipi_qsidx, ring->cur, ring->dirty);
+
+ MPASS(pi->ipi_len != 0);
+ if (__predict_false(pi->ipi_len == 0)) {
+ axgbe_error("empty packet received from stack\n");
+ return (0);
+ }
+
+ MPASS(ring->cur == pi->ipi_pidx);
+ if (__predict_false(ring->cur != pi->ipi_pidx)) {
+ axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
+ ring->cur, pi->ipi_pidx);
+ }
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ memset(packet, 0, sizeof(*packet));
+ hlen = axgbe_calculate_tx_parms(pdata, pi, packet);
+ axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n",
+ __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen);
+
+ ring->coalesce_count += packet->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (packet->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets))
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ /* Add Context descriptor if needed (for TSO, VLAN cases) */
+ if (axgbe_ctx_desc_setup(pdata, ring, pi))
+ cur++;
+
+ rdata = XGBE_GET_DESC_DATA(ring, cur);
+ rdesc = rdata->rdesc;
+
+ axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
+ "ipi_len 0x%x\n", __func__, cur,
+ lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
+ upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
+ (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr));
+
+ /* Update the buffer length */
+ if (hlen == 0)
+ hlen = pi->ipi_segs[cur_seg].ds_len;
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen);
+
+ /* VLAN tag insertion check */
+ if (pi->ipi_vtag) {
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ }
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /*
+ * Set the OWN bit if this is not the first descriptor. For first
+ * descriptor, OWN bit will be set at last so that hardware will
+ * process the descriptors only after the OWN bit for the first
+ * descriptor is set
+ */
+ if (cur != start)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+
+ tcp_payload_len = pi->ipi_len - hlen;
+
+ /* Set TCP payload length*/
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ tcp_payload_len);
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ pi->ipi_tcp_hlen/4);
+
+ axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len,
+ pi->ipi_tcp_hlen/4);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM*/
+ if (pi->ipi_csum_flags)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+
+ /* Set total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len);
+ }
+
+ cur++;
+
+ for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) {
+
+ if (cur_seg == 0) {
+ offset = hlen;
+ datalen = pi->ipi_segs[cur_seg].ds_len - hlen;
+ } else {
+ offset = 0;
+ datalen = pi->ipi_segs[cur_seg].ds_len;
+ }
+
+ if (datalen) {
+ rdata = XGBE_GET_DESC_DATA(ring, cur);
+ rdesc = rdata->rdesc;
+
+
+ /* Update buffer address */
+ rdesc->desc0 =
+ cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
+ rdesc->desc1 =
+ cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM*/
+ if (pi->ipi_csum_flags)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+
+ axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
+ "ipi_len 0x%x\n", __func__, cur,
+ lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
+ upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
+ (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
+
+ cur++;
+ }
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, start);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1));
+
+ axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur,
+ ring->dirty);
+
+ return (0);
+}
+
+static void
+axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel = pdata->channel[txqid];
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata = XGBE_GET_DESC_DATA(ring, pidx);
+
+ axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n",
+ __func__, txqid, pidx, ring->cur, ring->dirty);
+
+ MPASS(ring->cur == pidx);
+ if (__predict_false(ring->cur != pidx)) {
+ axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
+ ring->cur, pidx);
+ }
+
+ wmb();
+
+ /* Ring Doorbell */
+ if (XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO) !=
+ lower_32_bits(rdata->rdata_paddr)) {
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdata_paddr));
+ }
+}
+
+static int
+axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_hw_if *hw_if = &sc->pdata.hw_if;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel = pdata->channel[txqid];
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ int processed = 0;
+
+ axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n",
+ __func__, txqid, clear, ring->cur, ring->dirty);
+
+ if (__predict_false(ring->cur == ring->dirty)) {
+ axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n",
+ __func__, ring->cur, ring->dirty);
+ return (0);
+ }
+
+ /* Check whether the first dirty descriptor is Tx complete */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+ if (!hw_if->tx_complete(rdata->rdesc)) {
+ axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty);
+ return (0);
+ }
+
+ /*
+ * If clear is false just let the caller know that there
+ * are descriptors to reclaim
+ */
+ if (!clear) {
+ axgbe_printf(1, "<-- %s: (!clear)\n", __func__);
+ return (1);
+ }
+
+ do {
+ hw_if->tx_desc_reset(rdata);
+ processed++;
+ ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1);
+
+ /*
+ * tx_complete will return true for unused descriptors also.
+ * so, check tx_complete only until used descriptors.
+ */
+ if (ring->cur == ring->dirty)
+ break;
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+ } while (hw_if->tx_complete(rdata->rdesc));
+
+ axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__,
+ processed, ring->cur, ring->dirty);
+
+ return (processed);
+}
+
+static void
+axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx];
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int inte;
+ uint8_t count = iru->iru_count;
+ int i, j;
+
+ axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d "
+ "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx,
+ iru->iru_pidx, count, ring->cur, ring->dirty);
+
+ for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) {
+
+ if (i == XGBE_RX_DESC_CNT_DEFAULT)
+ i = 0;
+
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3,
+ RX_NORMAL_DESC3, OWN))) {
+ axgbe_error("%s: refill clash, cur %d dirty %d index %d"
+ "pidx %d\n", __func__, ring->cur, ring->dirty, j, i);
+ }
+
+ /* Assuming split header is enabled */
+ if (iru->iru_flidx == 0) {
+
+ /* Fill header/buffer1 address */
+ rdesc->desc0 =
+ cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
+ rdesc->desc1 =
+ cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
+ } else {
+
+ /* Fill data/buffer2 address */
+ rdesc->desc2 =
+ cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
+ rdesc->desc3 =
+ cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames &&
+ !(((ring->dirty + 1) &(ring->rdesc_count - 1)) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ wmb();
+
+ ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1));
+ }
+ }
+
+ axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__,
+ channel->queue_index, ring->cur, ring->dirty);
+}
+
+static void
+axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel = pdata->channel[qsidx];
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+
+ axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n",
+ __func__, qsidx, flidx, pidx, ring->cur, ring->dirty);
+
+ if (flidx == 1) {
+
+ rdata = XGBE_GET_DESC_DATA(ring, pidx);
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdata_paddr));
+ }
+
+ wmb();
+}
+
+static int
+axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_channel *channel = pdata->channel[qsidx];
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ unsigned int cur;
+ int count;
+ uint8_t incomplete = 1, context_next = 0, running = 0;
+
+ axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n",
+ __func__, qsidx, idx, budget, ring->cur, ring->dirty);
+
+ cur = ring->cur;
+ for (count = 0; count <= budget; ) {
+
+ rdata = XGBE_GET_DESC_DATA(ring, cur);
+ rdesc = rdata->rdesc;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ break;
+
+ running = 1;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
+ incomplete = 0;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+ context_next = 1;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT))
+ context_next = 0;
+
+ cur = (cur + 1) & (ring->rdesc_count - 1);
+
+ if (incomplete || context_next)
+ continue;
+
+ /* Increment pkt count & reset variables for next full packet */
+ count++;
+ incomplete = 1;
+ context_next = 0;
+ running = 0;
+ }
+
+ axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d "
+ "count %d\n", __func__, qsidx, cur, incomplete, context_next,
+ running, count);
+
+ return (count);
+}
+
+static unsigned int
+xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet)
+{
+
+ /* Always zero if not the first descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) {
+ axgbe_printf(1, "%s: Not First\n", __func__);
+ return (0);
+ }
+
+ /* First descriptor with split header, return header length */
+ if (rdata->rx.hdr_len) {
+ axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len);
+ return (rdata->rx.hdr_len);
+ }
+
+ /* First descriptor but not the last descriptor and no split header,
+ * so the full buffer was used
+ */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
+ axgbe_printf(1, "%s: Not last %d\n", __func__,
+ pdata->rx_buf_size);
+ return (256);
+ }
+
+ /* First descriptor and last descriptor and no split header, so
+ * calculate how much of the buffer was used
+ */
+ axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len,
+ pdata->rx_buf_size);
+
+ return (min_t(unsigned int, 256, rdata->rx.len));
+}
+
+static unsigned int
+xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet, unsigned int len)
+{
+
+ /* Always the full buffer if not the last descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
+ axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size);
+ return (pdata->rx_buf_size);
+ }
+
+ /* Last descriptor so calculate how much of the buffer was used
+ * for the last bit of data
+ */
+ return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0);
+}
+
+static inline void
+axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len,
+ int pos, int flid)
+{
+ axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid);
+ ri->iri_frags[pos].irf_flid = flid;
+ ri->iri_frags[pos].irf_idx = idx;
+ ri->iri_frags[pos].irf_len = len;
+}
+
+static int
+axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
+{
+ struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
+ struct xgbe_prv_data *pdata = &sc->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx];
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ struct xgbe_ring_data *rdata;
+ unsigned int last, context_next, context;
+ unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur;
+ int i = 0;
+
+ axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__,
+ ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty);
+
+ memset(packet, 0, sizeof(struct xgbe_packet_data));
+
+ while (1) {
+
+read_again:
+ if (hw_if->dev_read(channel)) {
+ axgbe_printf(2, "<-- %s: OWN bit seen on %d\n",
+ __func__, ring->cur);
+ break;
+ }
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ prev_cur = ring->cur;
+ ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1);
+
+ last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ LAST);
+
+ context_next = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CONTEXT_NEXT);
+
+ context = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CONTEXT);
+
+ if (!context) {
+ /* Get the data length in the descriptor buffers */
+ buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet);
+ len += buf1_len;
+ buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len);
+ len += buf2_len;
+ } else
+ buf1_len = buf2_len = 0;
+
+ if (packet->errors)
+ axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d "
+ "buf2 %d len %d frags %d error %d\n", __func__, last, context,
+ context_next, buf1_len, buf2_len, len, i, packet->errors);
+
+ axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0);
+ i++;
+ axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1);
+ i++;
+
+ if (!last || context_next)
+ goto read_again;
+
+ break;
+ }
+
+ if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) {
+ ri->iri_csum_flags |= CSUM_IP_CHECKED;
+ ri->iri_csum_flags |= CSUM_IP_VALID;
+ axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags);
+ }
+
+ max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
+ if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) {
+ ri->iri_flags |= M_VLANTAG;
+ ri->iri_vtag = packet->vlan_ctag;
+ max_len += VLAN_HLEN;
+ axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__,
+ ri->iri_flags, ri->iri_vtag);
+ }
+
+
+ if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) {
+ ri->iri_flowid = packet->rss_hash;
+ ri->iri_rsstype = packet->rss_hash_type;
+ axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n",
+ __func__, packet->rss_hash, ri->iri_flowid,
+ packet->rss_hash_type, ri->iri_rsstype);
+ }
+
+ if (__predict_false(len == 0))
+ axgbe_error("%s: Zero len packet\n", __func__);
+
+ if (__predict_false(len > max_len))
+ axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len);
+
+ if (__predict_false(packet->errors))
+ axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d "
+ "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i,
+ ri->iri_cidx, ring->cur, ring->dirty, packet->errors);
+
+ axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i);
+
+ ri->iri_len = len;
+ ri->iri_nfrags = i;
+
+ return (0);
+}
diff --git a/sys/dev/axgbe/xgbe.h b/sys/dev/axgbe/xgbe.h
index ee55ef8f0a16..fac642cc16fa 100644
--- a/sys/dev/axgbe/xgbe.h
+++ b/sys/dev/axgbe/xgbe.h
@@ -1,13 +1,13 @@
/*
* AMD 10Gb Ethernet driver
*
+ * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
+ *
* This file is available to you under your choice of the following two
* licenses:
*
* License 1: GPLv2
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or (at
@@ -56,9 +56,6 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
@@ -119,13 +116,27 @@
#ifndef __XGBE_H__
#define __XGBE_H__
+#include <sys/param.h>
+#if __FreeBSD_version < 1300000
+#include <sys/kernel.h>
+#endif
+#include <sys/bus.h>
+#include <sys/socket.h>
+#include <sys/bitstring.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
#include "xgbe_osdep.h"
/* From linux/dcbnl.h */
#define IEEE_8021QAZ_MAX_TCS 8
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.2"
+#define XGBE_DRV_VERSION "1.0.3"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
@@ -134,6 +145,13 @@
#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
#define XGBE_RX_DESC_CNT 512
+#define XGBE_TX_DESC_CNT_MIN 64
+#define XGBE_TX_DESC_CNT_MAX 4096
+#define XGBE_RX_DESC_CNT_MIN 64
+#define XGBE_RX_DESC_CNT_MAX 4096
+#define XGBE_TX_DESC_CNT_DEFAULT 512
+#define XGBE_RX_DESC_CNT_DEFAULT 512
+
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
@@ -150,29 +168,42 @@
#define XGBE_RX_MIN_BUF_SIZE 1522
#define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
-#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZ */
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
+#define XGBE_PRIORITY_QUEUES 8
#define XGBE_DMA_STOP_TIMEOUT 5
/* DMA cache settings - Outer sharable, write-back, write-allocate */
-#define XGBE_DMA_OS_AXDOMAIN 0x2
-#define XGBE_DMA_OS_ARCACHE 0xb
-#define XGBE_DMA_OS_AWCACHE 0xf
+#define XGBE_DMA_OS_ARCR 0x002b2b2b
+#define XGBE_DMA_OS_AWCR 0x2f2f2f2f
/* DMA cache settings - System, no caches used */
-#define XGBE_DMA_SYS_AXDOMAIN 0x3
-#define XGBE_DMA_SYS_ARCACHE 0x0
-#define XGBE_DMA_SYS_AWCACHE 0x0
+#define XGBE_DMA_SYS_ARCR 0x00303030
+#define XGBE_DMA_SYS_AWCR 0x30303030
-#define XGBE_DMA_INTERRUPT_MASK 0x31c7
+/* DMA cache settings - PCI device */
+#define XGBE_DMA_PCI_ARCR 0x00000003
+#define XGBE_DMA_PCI_AWCR 0x13131313
+#define XGBE_DMA_PCI_AWARCR 0x00000313
+
+/* DMA channel interrupt modes */
+#define XGBE_IRQ_MODE_EDGE 0
+#define XGBE_IRQ_MODE_LEVEL 1
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
+
+#define XGMAC_PFC_DATA_LEN 46
+#define XGMAC_PFC_DELAYS 14000
+
+#define XGMAC_PRIO_QUEUES(_cnt) \
+ min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt))
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
@@ -194,6 +225,20 @@
#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+/* PCI BAR mapping */
+#define XGBE_XGMAC_BAR 0
+#define XGBE_XPCS_BAR 1
+#define XGBE_MAC_PROP_OFFSET 0x1d000
+#define XGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI MSI/MSIx support */
+#define XGBE_MSI_BASE_COUNT 4
+#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+
+/* PCI clock frequencies */
+#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
+#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
+
/* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec
*/
@@ -204,6 +249,12 @@
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
+#define XGMAC_FIFO_MIN_ALLOC 2048
+#define XGMAC_FIFO_UNIT 256
+#define XGMAC_FIFO_ALIGN(_x) \
+ (((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define XGMAC_FIFO_FC_OFF 2048
+#define XGMAC_FIFO_FC_MIN 4096
#define XGBE_FIFO_MAX 81920
#define XGBE_TC_MIN_QUANTUM 10
@@ -229,6 +280,14 @@
/* Flow control queue count */
#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+/* Flow control threshold units */
+#define XGMAC_FLOW_CONTROL_UNIT 512
+#define XGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1))
+#define XGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2)
+#define XGMAC_FLOW_CONTROL_MAX 33280
+
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
@@ -242,11 +301,26 @@
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 10
+#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define XGBE_ECC_LIMIT 60
+
#define XGBE_AN_INT_CMPLT 0x01
#define XGBE_AN_INC_LINK 0x02
#define XGBE_AN_PG_RCV 0x04
#define XGBE_AN_INT_MASK 0x07
+#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
@@ -281,6 +355,52 @@
#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+/* TSO related macros */
+#define XGBE_TSO_MAX_SIZE UINT16_MAX
+
+/* MDIO port types */
+#define XGMAC_MAX_C22_PORT 3
+
+/* Link mode bit operations */
+#define XGBE_ZERO_SUP(_phy) \
+ ((_phy)->supported = 0)
+
+#define XGBE_SET_SUP(_phy, _mode) \
+ ((_phy)->supported |= SUPPORTED_##_mode)
+
+#define XGBE_CLR_SUP(_phy, _mode) \
+ ((_phy)->supported &= ~SUPPORTED_##_mode)
+
+#define XGBE_IS_SUP(_phy, _mode) \
+ ((_phy)->supported & SUPPORTED_##_mode)
+
+#define XGBE_ZERO_ADV(_phy) \
+ ((_phy)->advertising = 0)
+
+#define XGBE_SET_ADV(_phy, _mode) \
+ ((_phy)->advertising |= ADVERTISED_##_mode)
+
+#define XGBE_CLR_ADV(_phy, _mode) \
+ ((_phy)->advertising &= ~ADVERTISED_##_mode)
+
+#define XGBE_ADV(_phy, _mode) \
+ ((_phy)->advertising & ADVERTISED_##_mode)
+
+#define XGBE_ZERO_LP_ADV(_phy) \
+ ((_phy)->lp_advertising = 0)
+
+#define XGBE_SET_LP_ADV(_phy, _mode) \
+ ((_phy)->lp_advertising |= ADVERTISED_##_mode)
+
+#define XGBE_CLR_LP_ADV(_phy, _mode) \
+ ((_phy)->lp_advertising &= ~ADVERTISED_##_mode)
+
+#define XGBE_LP_ADV(_phy, _mode) \
+ ((_phy)->lp_advertising & ADVERTISED_##_mode)
+
+#define XGBE_LM_COPY(_dphy, _dname, _sphy, _sname) \
+ ((_dphy)->_dname = (_sphy)->_sname)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -293,10 +413,20 @@ struct xgbe_packet_data {
unsigned int rdesc_count;
unsigned int length;
- u64 rx_tstamp;
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ uint64_t rx_tstamp;
unsigned int tx_packets;
unsigned int tx_bytes;
+
+ uint32_t rss_hash;
+ uint32_t rss_hash_type;
};
/* Common Rx and Tx descriptor mapping */
@@ -327,17 +457,22 @@ struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
bus_addr_t rdata_paddr;
- bus_dma_tag_t mbuf_dmat;
- bus_dmamap_t mbuf_map;
- bus_addr_t mbuf_hdr_paddr;
- bus_addr_t mbuf_data_paddr;
- bus_size_t mbuf_len;
-
- int mbuf_free;
- struct mbuf *mb;
-
struct xgbe_tx_ring_data tx; /* Tx-related data */
struct xgbe_rx_ring_data rx; /* Rx-related data */
+
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ struct mbuf *m;
+ unsigned int len;
+ unsigned int error;
+ } state;
+
};
struct xgbe_ring {
@@ -349,14 +484,9 @@ struct xgbe_ring {
/* Virtual/DMA addresses and count of allocated descriptor memory */
struct xgbe_ring_desc *rdesc;
- bus_dmamap_t rdesc_map;
- bus_dma_tag_t rdesc_dmat;
bus_addr_t rdesc_paddr;
unsigned int rdesc_count;
- bus_dma_tag_t mbuf_dmat;
- bus_dmamap_t mbuf_map;
-
/* Array of descriptor data corresponding the descriptor memory
* (always use the XGBE_GET_DESC_DATA macro to access this data)
*/
@@ -364,9 +494,9 @@ struct xgbe_ring {
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
- * Rx: index of descriptor to check for packet availability
+ * Rx: index of descriptor to check for packet availability
* dirty - Tx: index of descriptor to check for transfer complete
- * Rx: index of descriptor to check for buffer reallocation
+ * Rx: index of descriptor to check for buffer reallocation
*/
unsigned int cur;
unsigned int dirty;
@@ -382,6 +512,10 @@ struct xgbe_ring {
unsigned short cur_vlan_ctag;
} tx;
};
+
+ uint16_t prev_pidx;
+ uint8_t prev_count;
+
} __aligned(CACHE_LINE_SIZE);
/* Structure used to describe the descriptor rings associated with
@@ -397,11 +531,14 @@ struct xgbe_channel {
unsigned int queue_index;
bus_space_tag_t dma_tag;
bus_space_handle_t dma_handle;
+ int dma_irq_rid;
/* Per channel interrupt irq number */
struct resource *dma_irq_res;
void *dma_irq_tag;
+ /* Per channel interrupt enablement tracker */
+ unsigned int curr_ier;
unsigned int saved_ier;
struct xgbe_ring *tx_ring;
@@ -412,6 +549,7 @@ enum xgbe_state {
XGBE_DOWN,
XGBE_LINK_INIT,
XGBE_LINK_ERR,
+ XGBE_STOPPED,
};
enum xgbe_int {
@@ -431,6 +569,12 @@ enum xgbe_int_state {
XGMAC_INT_STATE_RESTORE,
};
+enum xgbe_ecc_sec {
+ XGBE_ECC_SEC_TX,
+ XGBE_ECC_SEC_RX,
+ XGBE_ECC_SEC_DESC,
+};
+
enum xgbe_speed {
XGBE_SPEED_1000 = 0,
XGBE_SPEED_2500,
@@ -438,6 +582,19 @@ enum xgbe_speed {
XGBE_SPEEDS,
};
+enum xgbe_xpcs_access {
+ XGBE_XPCS_ACCESS_V1 = 0,
+ XGBE_XPCS_ACCESS_V2,
+};
+
+enum xgbe_an_mode {
+ XGBE_AN_MODE_CL73 = 0,
+ XGBE_AN_MODE_CL73_REDRV,
+ XGBE_AN_MODE_CL37,
+ XGBE_AN_MODE_CL37_SGMII,
+ XGBE_AN_MODE_NONE,
+};
+
enum xgbe_an {
XGBE_AN_READY = 0,
XGBE_AN_PAGE_RECEIVED,
@@ -457,6 +614,13 @@ enum xgbe_rx {
enum xgbe_mode {
XGBE_MODE_KR = 0,
XGBE_MODE_KX,
+ XGBE_MODE_KX_1000,
+ XGBE_MODE_KX_2500,
+ XGBE_MODE_X,
+ XGBE_MODE_SGMII_100,
+ XGBE_MODE_SGMII_1000,
+ XGBE_MODE_SFI,
+ XGBE_MODE_UNKNOWN,
};
enum xgbe_speedset {
@@ -464,10 +628,16 @@ enum xgbe_speedset {
XGBE_SPEEDSET_2500_10000,
};
+enum xgbe_mdio_mode {
+ XGBE_MDIO_MODE_NONE = 0,
+ XGBE_MDIO_MODE_CL22,
+ XGBE_MDIO_MODE_CL45,
+};
+
struct xgbe_phy {
- u32 supported;
- u32 advertising;
- u32 lp_advertising;
+ uint32_t supported;
+ uint32_t advertising;
+ uint32_t lp_advertising;
int address;
@@ -480,65 +650,115 @@ struct xgbe_phy {
int pause_autoneg;
int tx_pause;
int rx_pause;
+
+ int pause;
+ int asym_pause;
+};
+
+enum xgbe_i2c_cmd {
+ XGBE_I2C_CMD_READ = 0,
+ XGBE_I2C_CMD_WRITE,
+};
+
+struct xgbe_i2c_op {
+ enum xgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ void *buf;
+ unsigned int len;
+};
+
+struct xgbe_i2c_op_state {
+ struct xgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct xgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct xgbe_i2c_op_state op_state;
};
struct xgbe_mmc_stats {
/* Tx Stats */
- u64 txoctetcount_gb;
- u64 txframecount_gb;
- u64 txbroadcastframes_g;
- u64 txmulticastframes_g;
- u64 tx64octets_gb;
- u64 tx65to127octets_gb;
- u64 tx128to255octets_gb;
- u64 tx256to511octets_gb;
- u64 tx512to1023octets_gb;
- u64 tx1024tomaxoctets_gb;
- u64 txunicastframes_gb;
- u64 txmulticastframes_gb;
- u64 txbroadcastframes_gb;
- u64 txunderflowerror;
- u64 txoctetcount_g;
- u64 txframecount_g;
- u64 txpauseframes;
- u64 txvlanframes_g;
+ uint64_t txoctetcount_gb;
+ uint64_t txframecount_gb;
+ uint64_t txbroadcastframes_g;
+ uint64_t txmulticastframes_g;
+ uint64_t tx64octets_gb;
+ uint64_t tx65to127octets_gb;
+ uint64_t tx128to255octets_gb;
+ uint64_t tx256to511octets_gb;
+ uint64_t tx512to1023octets_gb;
+ uint64_t tx1024tomaxoctets_gb;
+ uint64_t txunicastframes_gb;
+ uint64_t txmulticastframes_gb;
+ uint64_t txbroadcastframes_gb;
+ uint64_t txunderflowerror;
+ uint64_t txoctetcount_g;
+ uint64_t txframecount_g;
+ uint64_t txpauseframes;
+ uint64_t txvlanframes_g;
/* Rx Stats */
- u64 rxframecount_gb;
- u64 rxoctetcount_gb;
- u64 rxoctetcount_g;
- u64 rxbroadcastframes_g;
- u64 rxmulticastframes_g;
- u64 rxcrcerror;
- u64 rxrunterror;
- u64 rxjabbererror;
- u64 rxundersize_g;
- u64 rxoversize_g;
- u64 rx64octets_gb;
- u64 rx65to127octets_gb;
- u64 rx128to255octets_gb;
- u64 rx256to511octets_gb;
- u64 rx512to1023octets_gb;
- u64 rx1024tomaxoctets_gb;
- u64 rxunicastframes_g;
- u64 rxlengtherror;
- u64 rxoutofrangetype;
- u64 rxpauseframes;
- u64 rxfifooverflow;
- u64 rxvlanframes_gb;
- u64 rxwatchdogerror;
+ uint64_t rxframecount_gb;
+ uint64_t rxoctetcount_gb;
+ uint64_t rxoctetcount_g;
+ uint64_t rxbroadcastframes_g;
+ uint64_t rxmulticastframes_g;
+ uint64_t rxcrcerror;
+ uint64_t rxrunterror;
+ uint64_t rxjabbererror;
+ uint64_t rxundersize_g;
+ uint64_t rxoversize_g;
+ uint64_t rx64octets_gb;
+ uint64_t rx65to127octets_gb;
+ uint64_t rx128to255octets_gb;
+ uint64_t rx256to511octets_gb;
+ uint64_t rx512to1023octets_gb;
+ uint64_t rx1024tomaxoctets_gb;
+ uint64_t rxunicastframes_g;
+ uint64_t rxlengtherror;
+ uint64_t rxoutofrangetype;
+ uint64_t rxpauseframes;
+ uint64_t rxfifooverflow;
+ uint64_t rxvlanframes_gb;
+ uint64_t rxwatchdogerror;
};
struct xgbe_ext_stats {
- u64 tx_tso_packets;
- u64 rx_split_header_packets;
- u64 rx_buffer_unavailable;
+ uint64_t tx_tso_packets;
+ uint64_t rx_split_header_packets;
+ uint64_t rx_buffer_unavailable;
+
+ uint64_t txq_packets[XGBE_MAX_DMA_CHANNELS];
+ uint64_t txq_bytes[XGBE_MAX_DMA_CHANNELS];
+ uint64_t rxq_packets[XGBE_MAX_DMA_CHANNELS];
+ uint64_t rxq_bytes[XGBE_MAX_DMA_CHANNELS];
+
+ uint64_t tx_vxlan_packets;
+ uint64_t rx_vxlan_packets;
+ uint64_t rx_csum_errors;
+ uint64_t rx_vxlan_csum_errors;
};
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
- int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*set_mac_address)(struct xgbe_prv_data *, uint8_t *addr);
int (*config_rx_mode)(struct xgbe_prv_data *);
int (*enable_rx_csum)(struct xgbe_prv_data *);
@@ -552,9 +772,15 @@ struct xgbe_hw_if {
int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
- int (*set_gmii_speed)(struct xgbe_prv_data *);
- int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
- int (*set_xgmii_speed)(struct xgbe_prv_data *);
+ int (*set_speed)(struct xgbe_prv_data *, int);
+
+ int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
+ enum xgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, uint16_t);
+
+ int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
+ int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
void (*enable_tx)(struct xgbe_prv_data *);
void (*disable_tx)(struct xgbe_prv_data *);
@@ -571,16 +797,12 @@ struct xgbe_hw_if {
int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
- void (*dev_xmit)(struct xgbe_channel *);
int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *);
void (*tx_desc_reset)(struct xgbe_ring_data *);
- void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
- unsigned int);
int (*is_last_desc)(struct xgbe_ring_desc *);
int (*is_context_desc)(struct xgbe_ring_desc *);
- void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
/* For FLOW ctrl */
int (*config_tx_flow_control)(struct xgbe_prv_data *);
@@ -603,25 +825,84 @@ struct xgbe_hw_if {
/* For TX DMA Operate on Second Frame config */
int (*config_osp_mode)(struct xgbe_prv_data *);
- /* For RX and TX PBL config */
- int (*config_rx_pbl_val)(struct xgbe_prv_data *);
- int (*get_rx_pbl_val)(struct xgbe_prv_data *);
- int (*config_tx_pbl_val)(struct xgbe_prv_data *);
- int (*get_tx_pbl_val)(struct xgbe_prv_data *);
- int (*config_pblx8)(struct xgbe_prv_data *);
-
/* For MMC statistics */
void (*rx_mmc_int)(struct xgbe_prv_data *);
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
/* For Receive Side Scaling */
+ int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const uint8_t *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const uint32_t *);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * an_pre, an_post
+ * kr_training_pre, kr_training_post
+ * module_info, module_eeprom
+ */
+struct xgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct xgbe_prv_data *);
+ void (*exit)(struct xgbe_prv_data *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct xgbe_prv_data *);
+ int (*start)(struct xgbe_prv_data *);
+ void (*stop)(struct xgbe_prv_data *);
+
+ /* Return the link status */
+ int (*link_status)(struct xgbe_prv_data *, int *);
+
+ /* Indicate if a particular speed is valid */
+ bool (*valid_speed)(struct xgbe_prv_data *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *);
+ /* Retrieve current mode */
+ enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *);
+ /* Retrieve interface sub-type */
+ void (*get_type)(struct xgbe_prv_data *, struct ifmediareq *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct xgbe_prv_data *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ void (*an_advertising)(struct xgbe_prv_data *,
+ struct xgbe_phy *);
+
+ /* Process results of auto-negotiation */
+ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct xgbe_prv_data *);
+ void (*an_post)(struct xgbe_prv_data *);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct xgbe_prv_data *);
+ void (*kr_training_post)(struct xgbe_prv_data *);
+
+ /* SFP module related info */
+ int (*module_info)(struct xgbe_prv_data *pdata);
+ int (*module_eeprom)(struct xgbe_prv_data *pdata);
};
struct xgbe_phy_if {
- /* For initial PHY setup */
- void (*phy_init)(struct xgbe_prv_data *);
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct xgbe_prv_data *);
+ void (*phy_exit)(struct xgbe_prv_data *);
/* For PHY support when setting device up/down */
int (*phy_reset)(struct xgbe_prv_data *);
@@ -631,6 +912,30 @@ struct xgbe_phy_if {
/* For PHY support while device is up */
void (*phy_status)(struct xgbe_prv_data *);
int (*phy_config_aneg)(struct xgbe_prv_data *);
+
+ /* For PHY settings validation */
+ bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
+
+ /* For single interrupt support */
+ void (*an_isr)(struct xgbe_prv_data *);
+
+ /* PHY implementation specific services */
+ struct xgbe_phy_impl_if phy_impl;
+};
+
+struct xgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct xgbe_prv_data *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct xgbe_prv_data *);
+ void (*i2c_stop)(struct xgbe_prv_data *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
+
+ /* For single interrupt support */
+ void (*i2c_isr)(struct xgbe_prv_data *);
};
struct xgbe_desc_if {
@@ -666,6 +971,7 @@ struct xgbe_hw_features {
unsigned int addn_mac; /* Additional MAC Addresses */
unsigned int ts_src; /* Timestamp Source */
unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+ unsigned int vxn; /* VXLAN/NVGRE */
/* HW Feature Register1 */
unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
@@ -690,12 +996,31 @@ struct xgbe_hw_features {
unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
};
+struct xgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
+ enum xgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int irq_reissue_support;
+ unsigned int tx_desc_prefetch;
+ unsigned int rx_desc_prefetch;
+ unsigned int an_cdr_workaround;
+};
+
struct xgbe_prv_data {
struct ifnet *netdev;
+
struct platform_device *pdev;
struct acpi_device *adev;
device_t dev;
+ /* Version related data */
+ struct xgbe_version_data *vdata;
+
/* ACPI or DT flag */
unsigned int use_acpi;
@@ -706,59 +1031,124 @@ struct xgbe_prv_data {
struct resource *sir0_res; /* SerDes integration registers (1/2) */
struct resource *sir1_res; /* SerDes integration registers (2/2) */
+ /* Port property registers */
+ unsigned int pp0;
+ unsigned int pp1;
+ unsigned int pp2;
+ unsigned int pp3;
+ unsigned int pp4;
+
/* DMA tag */
bus_dma_tag_t dmat;
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+
+ /* RSS addressing mutex */
+ struct mtx rss_mutex;
/* Flags representing xgbe_state */
unsigned long dev_state;
- struct resource *dev_irq_res;
- struct resource *chan_irq_res[4];
+ /* ECC support */
+ unsigned long tx_sec_period;
+ unsigned long tx_ded_period;
+ unsigned long rx_sec_period;
+ unsigned long rx_ded_period;
+ unsigned long desc_sec_period;
+ unsigned long desc_ded_period;
+
+ unsigned int tx_sec_count;
+ unsigned int tx_ded_count;
+ unsigned int rx_sec_count;
+ unsigned int rx_ded_count;
+ unsigned int desc_ded_count;
+ unsigned int desc_sec_count;
+
+ struct if_irq dev_irq;
+
+ struct resource *dev_irq_res;
+ struct resource *ecc_irq_res;
+ struct resource *i2c_irq_res;
+ struct resource *an_irq_res;
+
+ int ecc_rid;
+ int i2c_rid;
+ int an_rid;
+
void *dev_irq_tag;
+ void *ecc_irq_tag;
+ void *i2c_irq_tag;
+ void *an_irq_tag;
+
+ struct resource *chan_irq_res[XGBE_MAX_DMA_CHANNELS];
+
unsigned int per_channel_irq;
+ unsigned int irq_count;
+ unsigned int channel_irq_count;
+ unsigned int channel_irq_mode;
+ char ecc_name[IFNAMSIZ + 32];
+
+ unsigned int isr_as_tasklet;
struct xgbe_hw_if hw_if;
struct xgbe_phy_if phy_if;
struct xgbe_desc_if desc_if;
+ struct xgbe_i2c_if i2c_if;
/* AXI DMA settings */
unsigned int coherent;
- unsigned int axdomain;
- unsigned int arcache;
- unsigned int awcache;
+ unsigned int arcr;
+ unsigned int awcr;
+ unsigned int awarcr;
/* Service routine support */
struct taskqueue *dev_workqueue;
struct task service_work;
struct callout service_timer;
+ struct mtx timer_mutex;
/* Rings for Tx/Rx on