aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/igc
diff options
context:
space:
mode:
authorPeter Grehan <grehan@FreeBSD.org>2021-07-12 04:50:15 +0000
committerPeter Grehan <grehan@FreeBSD.org>2021-08-15 10:33:54 +0000
commitd7388d33b4ddffb8900b511ae767875916dd5126 (patch)
treed6c29d5ec228b0938a18a4008829d19285f21066 /sys/dev/igc
parent64085efb677fbfcf76235a1a9dcb497819e2b72f (diff)
downloadsrc-d7388d33b4ddffb8900b511ae767875916dd5126.tar.gz
src-d7388d33b4ddffb8900b511ae767875916dd5126.zip
MFC 517904de5cca: igc(4): Introduce new driver for the Intel I225 Ethernet controller.
This controller supports 2.5G/1G/100MB/10MB speeds, and allows tx/rx checksum offload, TSO, LRO, and multi-queue operation. The driver was derived from code contributed by Intel, and modified by Netgate to fit into the iflib framework. Thanks to Mike Karels for testing and feedback on the driver. Reviewed by: bcr (manpages), kbowling, scottl, erj Relnotes: yes Sponsored by: Rubicon Communications, LLC ("Netgate") Differential Revision: https://reviews.freebsd.org/D30668 (cherry picked from commit 517904de5ccac643589c71ac0d2751797f89e4f9)
Diffstat (limited to 'sys/dev/igc')
-rw-r--r--sys/dev/igc/if_igc.c2984
-rw-r--r--sys/dev/igc/if_igc.h430
-rw-r--r--sys/dev/igc/igc_api.c735
-rw-r--r--sys/dev/igc/igc_api.h58
-rw-r--r--sys/dev/igc/igc_base.c188
-rw-r--r--sys/dev/igc/igc_base.h131
-rw-r--r--sys/dev/igc/igc_defines.h1347
-rw-r--r--sys/dev/igc/igc_hw.h548
-rw-r--r--sys/dev/igc/igc_i225.c1232
-rw-r--r--sys/dev/igc/igc_i225.h112
-rw-r--r--sys/dev/igc/igc_mac.c1050
-rw-r--r--sys/dev/igc/igc_mac.h48
-rw-r--r--sys/dev/igc/igc_nvm.c721
-rw-r--r--sys/dev/igc/igc_nvm.h32
-rw-r--r--sys/dev/igc/igc_osdep.h133
-rw-r--r--sys/dev/igc/igc_phy.c1109
-rw-r--r--sys/dev/igc/igc_phy.h134
-rw-r--r--sys/dev/igc/igc_regs.h424
-rw-r--r--sys/dev/igc/igc_txrx.c580
19 files changed, 11996 insertions, 0 deletions
diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c
new file mode 100644
index 000000000000..6d94a7c223c7
--- /dev/null
+++ b/sys/dev/igc/if_igc.c
@@ -0,0 +1,2984 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
+ * All rights reserved.
+ * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "if_igc.h"
+#include <sys/sbuf.h>
+#include <machine/_inttypes.h>
+
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
+#endif
+
+/*********************************************************************
+ * PCI Device ID Table
+ *
+ * Used by probe to select devices to load on
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, String }
+ *********************************************************************/
+
+static pci_vendor_info_t igc_vendor_info_array[] =
+{
+ /* Intel(R) PRO/1000 Network Connection - igc */
+ PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"),
+ PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"),
+ PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"),
+ PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"),
+ PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"),
+ PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"),
+ PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"),
+ PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"),
+ PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"),
+ PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"),
+ PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"),
+ PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"),
+ PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"),
+ PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"),
+ PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"),
+ /* required last entry */
+ PVID_END
+};
+
+/*********************************************************************
+ * Function prototypes
+ *********************************************************************/
+static void *igc_register(device_t dev);
+static int igc_if_attach_pre(if_ctx_t ctx);
+static int igc_if_attach_post(if_ctx_t ctx);
+static int igc_if_detach(if_ctx_t ctx);
+static int igc_if_shutdown(if_ctx_t ctx);
+static int igc_if_suspend(if_ctx_t ctx);
+static int igc_if_resume(if_ctx_t ctx);
+
+static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
+static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
+static void igc_if_queues_free(if_ctx_t ctx);
+
+static uint64_t igc_if_get_counter(if_ctx_t, ift_counter);
+static void igc_if_init(if_ctx_t ctx);
+static void igc_if_stop(if_ctx_t ctx);
+static void igc_if_media_status(if_ctx_t, struct ifmediareq *);
+static int igc_if_media_change(if_ctx_t ctx);
+static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void igc_if_timer(if_ctx_t ctx, uint16_t qid);
+static void igc_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void igc_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+static void igc_if_watchdog_reset(if_ctx_t ctx);
+static bool igc_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
+
+static void igc_identify_hardware(if_ctx_t ctx);
+static int igc_allocate_pci_resources(if_ctx_t ctx);
+static void igc_free_pci_resources(if_ctx_t ctx);
+static void igc_reset(if_ctx_t ctx);
+static int igc_setup_interface(if_ctx_t ctx);
+static int igc_setup_msix(if_ctx_t ctx);
+
+static void igc_initialize_transmit_unit(if_ctx_t ctx);
+static void igc_initialize_receive_unit(if_ctx_t ctx);
+
+static void igc_if_intr_enable(if_ctx_t ctx);
+static void igc_if_intr_disable(if_ctx_t ctx);
+static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
+static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
+static void igc_if_multi_set(if_ctx_t ctx);
+static void igc_if_update_admin_status(if_ctx_t ctx);
+static void igc_if_debug(if_ctx_t ctx);
+static void igc_update_stats_counters(struct igc_adapter *);
+static void igc_add_hw_stats(struct igc_adapter *adapter);
+static int igc_if_set_promisc(if_ctx_t ctx, int flags);
+static void igc_setup_vlan_hw_support(struct igc_adapter *);
+static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
+static void igc_print_nvm_info(struct igc_adapter *);
+static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
+static int igc_get_rs(SYSCTL_HANDLER_ARGS);
+static void igc_print_debug_info(struct igc_adapter *);
+static int igc_is_valid_ether_addr(u8 *);
+static int igc_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
+static void igc_add_int_delay_sysctl(struct igc_adapter *, const char *,
+ const char *, struct igc_int_delay_info *, int, int);
+/* Management and WOL Support */
+static void igc_get_hw_control(struct igc_adapter *);
+static void igc_release_hw_control(struct igc_adapter *);
+static void igc_get_wakeup(if_ctx_t ctx);
+static void igc_enable_wakeup(if_ctx_t ctx);
+
+int igc_intr(void *arg);
+
+/* MSI-X handlers */
+static int igc_if_msix_intr_assign(if_ctx_t, int);
+static int igc_msix_link(void *);
+static void igc_handle_link(void *context);
+
+static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS);
+
+static int igc_get_regs(SYSCTL_HANDLER_ARGS);
+
+static void igc_configure_queues(struct igc_adapter *adapter);
+
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+static device_method_t igc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_register, igc_register),
+ DEVMETHOD(device_probe, iflib_device_probe),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
+ DEVMETHOD_END
+};
+
+static driver_t igc_driver = {
+ "igc", igc_methods, sizeof(struct igc_adapter),
+};
+
+static devclass_t igc_devclass;
+DRIVER_MODULE(igc, pci, igc_driver, igc_devclass, 0, 0);
+
+MODULE_DEPEND(igc, pci, 1, 1, 1);
+MODULE_DEPEND(igc, ether, 1, 1, 1);
+MODULE_DEPEND(igc, iflib, 1, 1, 1);
+
+IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array);
+
+static device_method_t igc_if_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, igc_if_attach_post),
+ DEVMETHOD(ifdi_detach, igc_if_detach),
+ DEVMETHOD(ifdi_shutdown, igc_if_shutdown),
+ DEVMETHOD(ifdi_suspend, igc_if_suspend),
+ DEVMETHOD(ifdi_resume, igc_if_resume),
+ DEVMETHOD(ifdi_init, igc_if_init),
+ DEVMETHOD(ifdi_stop, igc_if_stop),
+ DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable),
+ DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable),
+ DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_queues_free, igc_if_queues_free),
+ DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status),
+ DEVMETHOD(ifdi_multi_set, igc_if_multi_set),
+ DEVMETHOD(ifdi_media_status, igc_if_media_status),
+ DEVMETHOD(ifdi_media_change, igc_if_media_change),
+ DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set),
+ DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc),
+ DEVMETHOD(ifdi_timer, igc_if_timer),
+ DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset),
+ DEVMETHOD(ifdi_vlan_register, igc_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, igc_if_vlan_unregister),
+ DEVMETHOD(ifdi_get_counter, igc_if_get_counter),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_debug, igc_if_debug),
+ DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart),
+ DEVMETHOD_END
+};
+
+static driver_t igc_if_driver = {
+ "igc_if", igc_if_methods, sizeof(struct igc_adapter)
+};
+
+/*********************************************************************
+ * Tunable default values.
+ *********************************************************************/
+
+#define IGC_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
+#define IGC_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
+
+#define MAX_INTS_PER_SEC 8000
+#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
+
+/* Allow common code without TSO */
+#ifndef CSUM_TSO
+#define CSUM_TSO 0
+#endif
+
+static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "igc driver parameters");
+
+static int igc_disable_crc_stripping = 0;
+SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
+ &igc_disable_crc_stripping, 0, "Disable CRC Stripping");
+
+static int igc_tx_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_TIDV_VAL);
+static int igc_rx_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_RDTR_VAL);
+SYSCTL_INT(_hw_igc, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &igc_tx_int_delay_dflt,
+ 0, "Default transmit interrupt delay in usecs");
+SYSCTL_INT(_hw_igc, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &igc_rx_int_delay_dflt,
+ 0, "Default receive interrupt delay in usecs");
+
+static int igc_tx_abs_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_TADV_VAL);
+static int igc_rx_abs_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_RADV_VAL);
+SYSCTL_INT(_hw_igc, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
+ &igc_tx_abs_int_delay_dflt, 0,
+ "Default transmit interrupt delay limit in usecs");
+SYSCTL_INT(_hw_igc, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
+ &igc_rx_abs_int_delay_dflt, 0,
+ "Default receive interrupt delay limit in usecs");
+
+static int igc_smart_pwr_down = false;
+SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down,
+ 0, "Set to true to leave smart power down enabled on newer adapters");
+
+/* Controls whether promiscuous also shows bad packets */
+static int igc_debug_sbp = true;
+SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0,
+ "Show bad packets in promiscuous mode");
+
+/* How many packets rxeof tries to clean at a time */
+static int igc_rx_process_limit = 100;
+SYSCTL_INT(_hw_igc, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
+ &igc_rx_process_limit, 0,
+ "Maximum number of received packets to process "
+ "at a time, -1 means unlimited");
+
+/* Energy efficient ethernet - default to OFF */
+static int igc_eee_setting = 1;
+SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0,
+ "Enable Energy Efficient Ethernet");
+
+/*
+** Tuneable Interrupt rate
+*/
+static int igc_max_interrupt_rate = 8000;
+SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
+ &igc_max_interrupt_rate, 0, "Maximum interrupts per second");
+
+extern struct if_txrx igc_txrx;
+
+static struct if_shared_ctx igc_sctx_init = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,
+ .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header),
+ .isc_tx_maxsegsize = PAGE_SIZE,
+ .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header),
+ .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE,
+ .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE,
+ .isc_rx_nsegments = 1,
+ .isc_rx_maxsegsize = MJUM9BYTES,
+ .isc_nfl = 1,
+ .isc_nrxqs = 1,
+ .isc_ntxqs = 1,
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = igc_vendor_info_array,
+ .isc_driver_version = "1",
+ .isc_driver = &igc_if_driver,
+ .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
+
+ .isc_nrxd_min = {IGC_MIN_RXD},
+ .isc_ntxd_min = {IGC_MIN_TXD},
+ .isc_nrxd_max = {IGC_MAX_RXD},
+ .isc_ntxd_max = {IGC_MAX_TXD},
+ .isc_nrxd_default = {IGC_DEFAULT_RXD},
+ .isc_ntxd_default = {IGC_DEFAULT_TXD},
+};
+
+/*****************************************************************
+ *
+ * Dump Registers
+ *
+ ****************************************************************/
+#define IGC_REGS_LEN 739
+
+static int igc_get_regs(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_adapter *adapter = (struct igc_adapter *)arg1;
+ struct igc_hw *hw = &adapter->hw;
+ struct sbuf *sb;
+ u32 *regs_buff;
+ int rc;
+
+ regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK);
+ memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32));
+
+ rc = sysctl_wire_old_buffer(req, 0);
+ MPASS(rc == 0);
+ if (rc != 0) {
+ free(regs_buff, M_DEVBUF);
+ return (rc);
+ }
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
+ MPASS(sb != NULL);
+ if (sb == NULL) {
+ free(regs_buff, M_DEVBUF);
+ return (ENOMEM);
+ }
+
+ /* General Registers */
+ regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL);
+ regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS);
+ regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT);
+ regs_buff[3] = IGC_READ_REG(hw, IGC_ICR);
+ regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL);
+ regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0));
+ regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0));
+ regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0));
+ regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0));
+ regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0));
+ regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0));
+ regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL);
+ regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0));
+ regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0));
+ regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0));
+ regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0));
+ regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0));
+ regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0));
+
+ sbuf_printf(sb, "General Registers\n");
+ sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
+ sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
+ sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]);
+
+ sbuf_printf(sb, "Interrupt Registers\n");
+ sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
+
+ sbuf_printf(sb, "RX Registers\n");
+ sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
+ sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
+ sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
+ sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
+ sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
+ sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
+ sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
+
+ sbuf_printf(sb, "TX Registers\n");
+ sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
+ sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
+ sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
+ sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
+ sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
+ sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
+ sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
+ sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
+ sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
+ sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
+ sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
+
+ free(regs_buff, M_DEVBUF);
+
+#ifdef DUMP_DESCS
+ {
+ if_softc_ctx_t scctx = adapter->shared;
+ struct rx_ring *rxr = &rx_que->rxr;
+ struct tx_ring *txr = &tx_que->txr;
+ int ntxd = scctx->isc_ntxd[0];
+ int nrxd = scctx->isc_nrxd[0];
+ int j;
+
+ for (j = 0; j < nrxd; j++) {
+ u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
+ u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
+ sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
+ }
+
+ for (j = 0; j < min(ntxd, 256); j++) {
+ unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
+
+ sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
+ j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
+ buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0);
+
+ }
+ }
+#endif
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return(rc);
+}
+
+static void *
+igc_register(device_t dev)
+{
+ return (&igc_sctx_init);
+}
+
+static int
+igc_set_num_queues(if_ctx_t ctx)
+{
+ int maxqueues;
+
+ maxqueues = 4;
+
+ return (maxqueues);
+}
+
+#define IGC_CAPS \
+ IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \
+ IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_VLAN_HWFILTER | IFCAP_TSO4 | \
+ IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 |\
+ IFCAP_TSO6
+
+/*********************************************************************
+ * Device initialization routine
+ *
+ * The attach entry point is called when the driver is being loaded.
+ * This routine identifies the type of hardware, allocates all resources
+ * and initializes the hardware.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+static int
+igc_if_attach_pre(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter;
+ if_softc_ctx_t scctx;
+ device_t dev;
+ struct igc_hw *hw;
+ int error = 0;
+
+ INIT_DEBUGOUT("igc_if_attach_pre: begin");
+ dev = iflib_get_dev(ctx);
+ adapter = iflib_get_softc(ctx);
+
+ adapter->ctx = adapter->osdep.ctx = ctx;
+ adapter->dev = adapter->osdep.dev = dev;
+ scctx = adapter->shared = iflib_get_softc_ctx(ctx);
+ adapter->media = iflib_get_media(ctx);
+ hw = &adapter->hw;
+
+ adapter->tx_process_limit = scctx->isc_ntxd[0];
+
+ /* SYSCTL stuff */
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ adapter, 0, igc_sysctl_nvm_info, "I", "NVM Information");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ adapter, 0, igc_sysctl_debug_info, "I", "Debug Information");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ adapter, 0, igc_set_flowcntl, "I", "Flow Control");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "reg_dump",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
+ igc_get_regs, "A", "Dump Registers");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rs_dump",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
+ igc_get_rs, "I", "Dump RS indexes");
+
+ /* Determine hardware and mac info */
+ igc_identify_hardware(ctx);
+
+ scctx->isc_tx_nsegments = IGC_MAX_SCATTER;
+ scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx);
+ if (bootverbose)
+ device_printf(dev, "attach_pre capping queues at %d\n",
+ scctx->isc_ntxqsets_max);
+
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN);
+ scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc);
+ scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc);
+ scctx->isc_txrx = &igc_txrx;
+ scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER;
+ scctx->isc_tx_tso_size_max = IGC_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE;
+ scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS;
+ scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO |
+ CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP;
+
+ /*
+ ** Some new devices, as with ixgbe, now may
+ ** use a different BAR, so we need to keep
+ ** track of which is used.
+ */
+ scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR);
+ if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0)
+ scctx->isc_msix_bar += 4;
+
+ /* Setup PCI resources */
+ if (igc_allocate_pci_resources(ctx)) {
+ device_printf(dev, "Allocation of PCI resources failed\n");
+ error = ENXIO;
+ goto err_pci;
+ }
+
+ /* Do Shared Code initialization */
+ error = igc_setup_init_funcs(hw, true);
+ if (error) {
+ device_printf(dev, "Setup of Shared code failed, error %d\n",
+ error);
+ error = ENXIO;
+ goto err_pci;
+ }
+
+ igc_setup_msix(ctx);
+ igc_get_bus_info(hw);
+
+ /* Set up some sysctls for the tunable interrupt delays */
+ igc_add_int_delay_sysctl(adapter, "rx_int_delay",
+ "receive interrupt delay in usecs", &adapter->rx_int_delay,
+ IGC_REGISTER(hw, IGC_RDTR), igc_rx_int_delay_dflt);
+ igc_add_int_delay_sysctl(adapter, "tx_int_delay",
+ "transmit interrupt delay in usecs", &adapter->tx_int_delay,
+ IGC_REGISTER(hw, IGC_TIDV), igc_tx_int_delay_dflt);
+ igc_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
+ "receive interrupt delay limit in usecs",
+ &adapter->rx_abs_int_delay,
+ IGC_REGISTER(hw, IGC_RADV),
+ igc_rx_abs_int_delay_dflt);
+ igc_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
+ "transmit interrupt delay limit in usecs",
+ &adapter->tx_abs_int_delay,
+ IGC_REGISTER(hw, IGC_TADV),
+ igc_tx_abs_int_delay_dflt);
+ igc_add_int_delay_sysctl(adapter, "itr",
+ "interrupt delay limit in usecs/4",
+ &adapter->tx_itr,
+ IGC_REGISTER(hw, IGC_ITR),
+ DEFAULT_ITR);
+
+ hw->mac.autoneg = DO_AUTO_NEG;
+ hw->phy.autoneg_wait_to_complete = false;
+ hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+
+ /* Copper options */
+ if (hw->phy.media_type == igc_media_type_copper) {
+ hw->phy.mdix = AUTO_ALL_MODES;
+ }
+
+ /*
+ * Set the frame limits assuming
+ * standard ethernet sized frames.
+ */
+ scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
+ ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
+
+ /* Allocate multicast array memory. */
+ adapter->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
+ MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
+ if (adapter->mta == NULL) {
+ device_printf(dev, "Can not allocate multicast setup array\n");
+ error = ENOMEM;
+ goto err_late;
+ }
+
+ /* Check SOL/IDER usage */
+ if (igc_check_reset_block(hw))
+ device_printf(dev, "PHY reset is blocked"
+ " due to SOL/IDER session.\n");
+
+ /* Sysctl for setting Energy Efficient Ethernet */
+ adapter->hw.dev_spec._i225.eee_disable = igc_eee_setting;
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "eee_control",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ adapter, 0, igc_sysctl_eee, "I",
+ "Disable Energy Efficient Ethernet");
+
+ /*
+ ** Start from a known state, this is
+ ** important in reading the nvm and
+ ** mac from that.
+ */
+ igc_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (igc_validate_nvm_checksum(hw) < 0) {
+ /*
+ ** Some PCI-E parts fail the first check due to
+ ** the link being in sleep state, call it again,
+ ** if it fails a second time its a real issue.
+ */
+ if (igc_validate_nvm_checksum(hw) < 0) {
+ device_printf(dev,
+ "The EEPROM Checksum Is Not Valid\n");
+ error = EIO;
+ goto err_late;
+ }
+ }
+
+ /* Copy the permanent MAC address out of the EEPROM */
+ if (igc_read_mac_addr(hw) < 0) {
+ device_printf(dev, "EEPROM read error while reading MAC"
+ " address\n");
+ error = EIO;
+ goto err_late;
+ }
+
+ if (!igc_is_valid_ether_addr(hw->mac.addr)) {
+ device_printf(dev, "Invalid MAC address\n");
+ error = EIO;
+ goto err_late;
+ }
+
+ /*
+ * Get Wake-on-Lan and Management info for later use
+ */
+ igc_get_wakeup(ctx);
+
+ /* Enable only WOL MAGIC by default */
+ scctx->isc_capenable &= ~IFCAP_WOL;
+ if (adapter->wol != 0)
+ scctx->isc_capenable |= IFCAP_WOL_MAGIC;
+
+ iflib_set_mac(ctx, hw->mac.addr);
+
+ return (0);
+
+err_late:
+ igc_release_hw_control(adapter);
+err_pci:
+ igc_free_pci_resources(ctx);
+ free(adapter->mta, M_DEVBUF);
+
+ return (error);
+}
+
+static int
+igc_if_attach_post(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_hw *hw = &adapter->hw;
+ int error = 0;
+
+ /* Setup OS specific network interface */
+ error = igc_setup_interface(ctx);
+ if (error != 0) {
+ goto err_late;
+ }
+
+ igc_reset(ctx);
+
+ /* Initialize statistics */
+ igc_update_stats_counters(adapter);
+ hw->mac.get_link_status = true;
+ igc_if_update_admin_status(ctx);
+ igc_add_hw_stats(adapter);
+
+ /* the driver can now take control from firmware */
+ igc_get_hw_control(adapter);
+
+ INIT_DEBUGOUT("igc_if_attach_post: end");
+
+ return (error);
+
+err_late:
+ igc_release_hw_control(adapter);
+ igc_free_pci_resources(ctx);
+ igc_if_queues_free(ctx);
+ free(adapter->mta, M_DEVBUF);
+
+ return (error);
+}
+
+/*********************************************************************
+ * Device removal routine
+ *
+ * The detach entry point is called when the driver is being removed.
+ * This routine stops the adapter and deallocates all the resources
+ * that were allocated for driver operation.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+static int
+igc_if_detach(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ INIT_DEBUGOUT("igc_if_detach: begin");
+
+ igc_phy_hw_reset(&adapter->hw);
+
+ igc_release_hw_control(adapter);
+ igc_free_pci_resources(ctx);
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+igc_if_shutdown(if_ctx_t ctx)
+{
+ return igc_if_suspend(ctx);
+}
+
+/*
+ * Suspend/resume device methods.
+ */
+static int
+igc_if_suspend(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ igc_release_hw_control(adapter);
+ igc_enable_wakeup(ctx);
+ return (0);
+}
+
+static int
+igc_if_resume(if_ctx_t ctx)
+{
+ igc_if_init(ctx);
+
+ return(0);
+}
+
+static int
+igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ int max_frame_size;
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
+
+ IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
+
+ /* 9K Jumbo Frame size */
+ max_frame_size = 9234;
+
+ if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
+ return (EINVAL);
+ }
+
+ scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
+ mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ return (0);
+}
+
+/*********************************************************************
+ * Init entry point
+ *
+ * This routine is used in two ways. It is used by the stack as
+ * init entry point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get to a
+ * consistent state.
+ *
+ **********************************************************************/
+static void
+igc_if_init(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct igc_tx_queue *tx_que;
+ int i;
+
+ INIT_DEBUGOUT("igc_if_init: begin");
+
+ /* Get the latest mac address, User can use a LAA */
+ bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
+ ETHER_ADDR_LEN);
+
+ /* Put the address into the Receive Address Array */
+ igc_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+ /* Initialize the hardware */
+ igc_reset(ctx);
+ igc_if_update_admin_status(ctx);
+
+ for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+
+ txr->tx_rs_cidx = txr->tx_rs_pidx;
+
+ /* Initialize the last processed descriptor to be the end of
+ * the ring, rather than the start, so that we avoid an
+ * off-by-one error when calculating how many descriptors are
+ * done in the credits_update function.
+ */
+ txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
+ }
+
+ /* Setup VLAN support, basic and offload if available */
+ IGC_WRITE_REG(&adapter->hw, IGC_VET, ETHERTYPE_VLAN);
+
+ /* Prepare transmit descriptors and buffers */
+ igc_initialize_transmit_unit(ctx);
+
+ /* Setup Multicast table */
+ igc_if_multi_set(ctx);
+
+ adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
+ igc_initialize_receive_unit(ctx);
+
+ /* Use real VLAN Filter support? */
+ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
+ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
+ /* Use real VLAN Filter support */
+ igc_setup_vlan_hw_support(adapter);
+ else {
+ u32 ctrl;
+ ctrl = IGC_READ_REG(&adapter->hw, IGC_CTRL);
+ ctrl |= IGC_CTRL_VME;
+ IGC_WRITE_REG(&adapter->hw, IGC_CTRL, ctrl);
+ }
+ }
+
+ /* Don't lose promiscuous settings */
+ igc_if_set_promisc(ctx, IFF_PROMISC);
+ igc_clear_hw_cntrs_base_generic(&adapter->hw);
+
+ if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
+ igc_configure_queues(adapter);
+
+ /* this clears any pending interrupts */
+ IGC_READ_REG(&adapter->hw, IGC_ICR);
+ IGC_WRITE_REG(&adapter->hw, IGC_ICS, IGC_ICS_LSC);
+
+ /* the driver can now take control from firmware */
+ igc_get_hw_control(adapter);
+
+ /* Set Energy Efficient Ethernet */
+ igc_set_eee_i225(&adapter->hw, true, true, true);
+}
+
+/*********************************************************************
+ *
+ * Fast Legacy/MSI Combined Interrupt Service routine
+ *
+ *********************************************************************/
+int
+igc_intr(void *arg)
+{
+ struct igc_adapter *adapter = arg;
+ if_ctx_t ctx = adapter->ctx;
+ u32 reg_icr;
+
+ reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR);
+
+ /* Hot eject? */
+ if (reg_icr == 0xffffffff)
+ return FILTER_STRAY;
+
+ /* Definitely not our interrupt. */
+ if (reg_icr == 0x0)
+ return FILTER_STRAY;
+
+ if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0)
+ return FILTER_STRAY;
+
+ /*
+ * Only MSI-X interrupts have one-shot behavior by taking advantage
+ * of the EIAC register. Thus, explicitly disable interrupts. This
+ * also works around the MSI message reordering errata on certain
+ * systems.
+ */
+ IFDI_INTR_DISABLE(ctx);
+
+ /* Link status change */
+ if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC))
+ igc_handle_link(ctx);
+
+ if (reg_icr & IGC_ICR_RXO)
+ adapter->rx_overruns++;
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+static int
+igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_rx_queue *rxq = &adapter->rx_queues[rxqid];
+
+ IGC_WRITE_REG(&adapter->hw, IGC_EIMS, rxq->eims);
+ return (0);
+}
+
+static int
+igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_tx_queue *txq = &adapter->tx_queues[txqid];
+
+ IGC_WRITE_REG(&adapter->hw, IGC_EIMS, txq->eims);
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * MSI-X RX Interrupt Service routine
+ *
+ **********************************************************************/
+static int
+igc_msix_que(void *arg)
+{
+ struct igc_rx_queue *que = arg;
+
+ ++que->irqs;
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+/*********************************************************************
+ *
+ * MSI-X Link Fast Interrupt Service routine
+ *
+ **********************************************************************/
+static int
+igc_msix_link(void *arg)
+{
+ struct igc_adapter *adapter = arg;
+ u32 reg_icr;
+
+ ++adapter->link_irq;
+ MPASS(adapter->hw.back != NULL);
+ reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR);
+
+ if (reg_icr & IGC_ICR_RXO)
+ adapter->rx_overruns++;
+
+ if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
+ igc_handle_link(adapter->ctx);
+ }
+
+ IGC_WRITE_REG(&adapter->hw, IGC_IMS, IGC_IMS_LSC);
+ IGC_WRITE_REG(&adapter->hw, IGC_EIMS, adapter->link_mask);
+
+ return (FILTER_HANDLED);
+}
+
+static void
+igc_handle_link(void *context)
+{
+ if_ctx_t ctx = context;
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ adapter->hw.mac.get_link_status = true;
+ iflib_admin_intr_deferred(ctx);
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ INIT_DEBUGOUT("igc_if_media_status: begin");
+
+ iflib_admin_intr_deferred(ctx);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!adapter->link_active) {
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ switch (adapter->link_speed) {
+ case 10:
+ ifmr->ifm_active |= IFM_10_T;
+ break;
+ case 100:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ case 1000:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case 2500:
+ ifmr->ifm_active |= IFM_2500_T;
+ break;
+ }
+
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called when the user changes speed/duplex using
+ * media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+igc_if_media_change(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
+
+ INIT_DEBUGOUT("igc_if_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ adapter->hw.mac.autoneg = DO_AUTO_NEG;
+
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ break;
+ case IFM_2500_T:
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
+ break;
+ case IFM_1000_T:
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case IFM_100_TX:
+ if ((ifm->ifm_media & IFM_GMASK) == IFM_HDX)
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
+ else
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
+ break;
+ case IFM_10_T:
+ if ((ifm->ifm_media & IFM_GMASK) == IFM_HDX)
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
+ else
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
+ break;
+ default:
+ device_printf(adapter->dev, "Unsupported media type\n");
+ }
+
+ igc_if_init(ctx);
+
+ return (0);
+}
+
+static int
+igc_if_set_promisc(if_ctx_t ctx, int flags)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ u32 reg_rctl;
+ int mcnt = 0;
+
+ reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL);
+ reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE);
+ if (flags & IFF_ALLMULTI)
+ mcnt = MAX_NUM_MULTICAST_ADDRESSES;
+ else
+ mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
+
+ /* Don't disable if in MAX groups */
+ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
+ reg_rctl &= (~IGC_RCTL_MPE);
+ IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+
+ if (flags & IFF_PROMISC) {
+ reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
+ /* Turn this on if you want to see bad packets */
+ if (igc_debug_sbp)
+ reg_rctl |= IGC_RCTL_SBP;
+ IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+ } else if (flags & IFF_ALLMULTI) {
+ reg_rctl |= IGC_RCTL_MPE;
+ reg_rctl &= ~IGC_RCTL_UPE;
+ IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+ }
+ return (0);
+}
+
+static u_int
+igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
+{
+ u8 *mta = arg;
+
+ if (idx == MAX_NUM_MULTICAST_ADDRESSES)
+ return (0);
+
+ bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
+
+ return (1);
+}
+
+/*********************************************************************
+ * Multicast Update
+ *
+ * This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+
+static void
+igc_if_multi_set(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ u8 *mta; /* Multicast array memory */
+ u32 reg_rctl = 0;
+ int mcnt = 0;
+
+ IOCTL_DEBUGOUT("igc_set_multi: begin");
+
+ mta = adapter->mta;
+ bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
+
+ mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta);
+
+ reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL);
+
+ if (if_getflags(ifp) & IFF_PROMISC) {
+ reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
+ /* Turn this on if you want to see bad packets */
+ if (igc_debug_sbp)
+ reg_rctl |= IGC_RCTL_SBP;
+ } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
+ if_getflags(ifp) & IFF_ALLMULTI) {
+ reg_rctl |= IGC_RCTL_MPE;
+ reg_rctl &= ~IGC_RCTL_UPE;
+ } else
+ reg_rctl = ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+
+ IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+
+ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
+ igc_update_mc_addr_list(&adapter->hw, mta, mcnt);
+}
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine schedules igc_if_update_admin_status() to check for
+ * link status and to gather statistics as well as to perform some
+ * controller-specific hardware patting.
+ *
+ **********************************************************************/
+static void
+igc_if_timer(if_ctx_t ctx, uint16_t qid)
+{
+
+ if (qid != 0)
+ return;
+
+ iflib_admin_intr_deferred(ctx);
+}
+
+static void
+igc_if_update_admin_status(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_hw *hw = &adapter->hw;
+ device_t dev = iflib_get_dev(ctx);
+ u32 link_check, thstat, ctrl;
+
+ link_check = thstat = ctrl = 0;
+ /* Get the cached link value or read phy for real */
+ switch (hw->phy.media_type) {
+ case igc_media_type_copper:
+ if (hw->mac.get_link_status == true) {
+ /* Do the work to read phy */
+ igc_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ } else
+ link_check = true;
+ break;
+ case igc_media_type_unknown:
+ igc_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+
+ /* Now check for a transition */
+ if (link_check && (adapter->link_active == 0)) {
+ igc_get_speed_and_duplex(hw, &adapter->link_speed,
+ &adapter->link_duplex);
+ if (bootverbose)
+ device_printf(dev, "Link is up %d Mbps %s\n",
+ adapter->link_speed,
+ ((adapter->link_duplex == FULL_DUPLEX) ?
+ "Full Duplex" : "Half Duplex"));
+ adapter->link_active = 1;
+ iflib_link_state_change(ctx, LINK_STATE_UP,
+ IF_Mbps(adapter->link_speed));
+ } else if (!link_check && (adapter->link_active == 1)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ adapter->link_active = 0;
+ iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
+ }
+ igc_update_stats_counters(adapter);
+}
+
+static void
+igc_if_watchdog_reset(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ /*
+ * Just count the event; iflib(4) will already trigger a
+ * sufficient reset of the controller.
+ */
+ adapter->watchdog_events++;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+igc_if_stop(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ INIT_DEBUGOUT("igc_if_stop: begin");
+
+ igc_reset_hw(&adapter->hw);
+ IGC_WRITE_REG(&adapter->hw, IGC_WUC, 0);
+}
+
+/*********************************************************************
+ *
+ * Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+igc_identify_hardware(if_ctx_t ctx)
+{
+ device_t dev = iflib_get_dev(ctx);
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+
+ /* Make sure our PCI config space has the necessary stuff set */
+ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+
+ /* Save off the information about this board */
+ adapter->hw.vendor_id = pci_get_vendor(dev);
+ adapter->hw.device_id = pci_get_device(dev);
+ adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ adapter->hw.subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ adapter->hw.subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ /* Do Shared Code Init and Setup */
+ if (igc_set_mac_type(&adapter->hw)) {
+ device_printf(dev, "Setup init failure\n");
+ return;
+ }
+}
+
+static int
+igc_allocate_pci_resources(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
+
+ rid = PCIR_BAR(0);
+ adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+ if (adapter->memory == NULL) {
+ device_printf(dev, "Unable to allocate bus resource: memory\n");
+ return (ENXIO);
+ }
+ adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory);
+ adapter->osdep.mem_bus_space_handle =
+ rman_get_bushandle(adapter->memory);
+ adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
+
+ adapter->hw.back = &adapter->osdep;
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Set up the MSI-X Interrupt handlers
+ *
+ **********************************************************************/
+static int
+igc_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_rx_queue *rx_que = adapter->rx_queues;
+ struct igc_tx_queue *tx_que = adapter->tx_queues;
+ int error, rid, i, vector = 0, rx_vectors;
+ char buf[16];
+
+ /* First set up ring resources */
+ for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
+ rid = vector + 1;
+ snprintf(buf, sizeof(buf), "rxq%d", i);
+ error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf);
+ if (error) {
+ device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
+ adapter->rx_num_queues = i + 1;
+ goto fail;
+ }
+
+ rx_que->msix = vector;
+
+ /*
+ * Set the bit to enable interrupt
+ * in IGC_IMS -- bits 20 and 21
+ * are for RX0 and RX1, note this has
+ * NOTHING to do with the MSI-X vector
+ */
+ rx_que->eims = 1 << vector;
+ }
+ rx_vectors = vector;
+
+ vector = 0;
+ for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
+ snprintf(buf, sizeof(buf), "txq%d", i);
+ tx_que = &adapter->tx_queues[i];
+ iflib_softirq_alloc_generic(ctx,
+ &adapter->rx_queues[i % adapter->rx_num_queues].que_irq,
+ IFLIB_INTR_TX, tx_que, tx_que->me, buf);
+
+ tx_que->msix = (vector % adapter->rx_num_queues);
+
+ /*
+ * Set the bit to enable interrupt
+ * in IGC_IMS -- bits 22 and 23
+ * are for TX0 and TX1, note this has
+ * NOTHING to do with the MSI-X vector
+ */
+ tx_que->eims = 1 << i;
+ }
+
+ /* Link interrupt */
+ rid = rx_vectors + 1;
+ error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, adapter, 0, "aq");
+
+ if (error) {
+ device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
+ goto fail;
+ }
+ adapter->linkvec = rx_vectors;
+ return (0);
+fail:
+ iflib_irq_free(ctx, &adapter->irq);
+ rx_que = adapter->rx_queues;
+ for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
+ iflib_irq_free(ctx, &rx_que->que_irq);
+ return (error);
+}
+
+static void
+igc_configure_queues(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_rx_queue *rx_que;
+ struct igc_tx_queue *tx_que;
+ u32 ivar = 0, newitr = 0;
+
+ /* First turn on RSS capability */
+ IGC_WRITE_REG(hw, IGC_GPIE,
+ IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA |
+ IGC_GPIE_NSICR);
+
+ /* Turn on MSI-X */
+ /* RX entries */
+ for (int i = 0; i < adapter->rx_num_queues; i++) {
+ u32 index = i >> 1;
+ ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
+ rx_que = &adapter->rx_queues[i];
+ if (i & 1) {
+ ivar &= 0xFF00FFFF;
+ ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16;
+ } else {
+ ivar &= 0xFFFFFF00;
+ ivar |= rx_que->msix | IGC_IVAR_VALID;
+ }
+ IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
+ }
+ /* TX entries */
+ for (int i = 0; i < adapter->tx_num_queues; i++) {
+ u32 index = i >> 1;
+ ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
+ tx_que = &adapter->tx_queues[i];
+ if (i & 1) {
+ ivar &= 0x00FFFFFF;
+ ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24;
+ } else {
+ ivar &= 0xFFFF00FF;
+ ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8;
+ }
+ IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
+ adapter->que_mask |= tx_que->eims;
+ }
+
+ /* And for the link interrupt */
+ ivar = (adapter->linkvec | IGC_IVAR_VALID) << 8;
+ adapter->link_mask = 1 << adapter->linkvec;
+ IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
+
+ /* Set the starting interrupt rate */
+ if (igc_max_interrupt_rate > 0)
+ newitr = (4000000 / igc_max_interrupt_rate) & 0x7FFC;
+
+ newitr |= IGC_EITR_CNT_IGNR;
+
+ for (int i = 0; i < adapter->rx_num_queues; i++) {
+ rx_que = &adapter->rx_queues[i];
+ IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr);
+ }
+
+ return;
+}
+
+static void
+igc_free_pci_resources(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_rx_queue *que = adapter->rx_queues;
+ device_t dev = iflib_get_dev(ctx);
+
+ /* Release all MSI-X queue resources */
+ if (adapter->intr_type == IFLIB_INTR_MSIX)
+ iflib_irq_free(ctx, &adapter->irq);
+
+ for (int i = 0; i < adapter->rx_num_queues; i++, que++) {
+ iflib_irq_free(ctx, &que->que_irq);
+ }
+
+ if (adapter->memory != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(adapter->memory), adapter->memory);
+ adapter->memory = NULL;
+ }
+
+ if (adapter->flash != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(adapter->flash), adapter->flash);
+ adapter->flash = NULL;
+ }
+
+ if (adapter->ioport != NULL) {
+ bus_release_resource(dev, SYS_RES_IOPORT,
+ rman_get_rid(adapter->ioport), adapter->ioport);
+ adapter->ioport = NULL;
+ }
+}
+
+/* Set up MSI or MSI-X */
+static int
+igc_setup_msix(if_ctx_t ctx)
+{
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Initialize the DMA Coalescing feature
+ *
+ **********************************************************************/
+static void
+igc_init_dmac(struct igc_adapter *adapter, u32 pba)
+{
+ device_t dev = adapter->dev;
+ struct igc_hw *hw = &adapter->hw;
+ u32 dmac, reg = ~IGC_DMACR_DMAC_EN;
+ u16 hwm;
+ u16 max_frame_size;
+ int status;
+
+ max_frame_size = adapter->shared->isc_max_frame_size;
+
+ if (adapter->dmac == 0) { /* Disabling it */
+ IGC_WRITE_REG(hw, IGC_DMACR, reg);
+ return;
+ } else
+ device_printf(dev, "DMA Coalescing enabled\n");
+
+ /* Set starting threshold */
+ IGC_WRITE_REG(hw, IGC_DMCTXTH, 0);
+
+ hwm = 64 * pba - max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+ reg = IGC_READ_REG(hw, IGC_FCRTC);
+ reg &= ~IGC_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT)
+ & IGC_FCRTC_RTH_COAL_MASK);
+ IGC_WRITE_REG(hw, IGC_FCRTC, reg);
+
+ dmac = pba - max_frame_size / 512;
+ if (dmac < pba - 10)
+ dmac = pba - 10;
+ reg = IGC_READ_REG(hw, IGC_DMACR);
+ reg &= ~IGC_DMACR_DMACTHR_MASK;
+ reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT)
+ & IGC_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK);
+
+ /* Check if status is 2.5Gb backplane connection
+ * before configuration of watchdog timer, which is
+ * in msec values in 12.8usec intervals
+ * watchdog timer= msec values in 32usec intervals
+ * for non 2.5Gb connection
+ */
+ status = IGC_READ_REG(hw, IGC_STATUS);
+ if ((status & IGC_STATUS_2P5_SKU) &&
+ (!(status & IGC_STATUS_2P5_SKU_OVER)))
+ reg |= ((adapter->dmac * 5) >> 6);
+ else
+ reg |= (adapter->dmac >> 5);
+
+ IGC_WRITE_REG(hw, IGC_DMACR, reg);
+
+ IGC_WRITE_REG(hw, IGC_DMCRTRH, 0);
+
+ /* Set the interval before transition */
+ reg = IGC_READ_REG(hw, IGC_DMCTLX);
+ reg |= IGC_DMCTLX_DCFLUSH_DIS;
+
+ /*
+ ** in 2.5Gb connection, TTLX unit is 0.4 usec
+ ** which is 0x4*2 = 0xA. But delay is still 4 usec
+ */
+ status = IGC_READ_REG(hw, IGC_STATUS);
+ if ((status & IGC_STATUS_2P5_SKU) &&
+ (!(status & IGC_STATUS_2P5_SKU_OVER)))
+ reg |= 0xA;
+ else
+ reg |= 0x4;
+
+ IGC_WRITE_REG(hw, IGC_DMCTLX, reg);
+
+ /* free space in tx packet buffer to wake from DMA coal */
+ IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE -
+ (2 * max_frame_size)) >> 6);
+
+ /* make low power state decision controlled by DMA coal */
+ reg = IGC_READ_REG(hw, IGC_PCIEMISC);
+ reg &= ~IGC_PCIEMISC_LX_DECISION;
+ IGC_WRITE_REG(hw, IGC_PCIEMISC, reg);
+}
+
+/*********************************************************************
+ *
+ * Initialize the hardware to a configuration as specified by the
+ * adapter structure.
+ *
+ **********************************************************************/
+static void
+igc_reset(if_ctx_t ctx)
+{
+ device_t dev = iflib_get_dev(ctx);
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_hw *hw = &adapter->hw;
+ u16 rx_buffer_size;
+ u32 pba;
+
+ INIT_DEBUGOUT("igc_reset: begin");
+ /* Let the firmware know the OS is in control */
+ igc_get_hw_control(adapter);
+
+ /*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ */
+ pba = IGC_PBA_34K;
+
+ INIT_DEBUGOUT1("igc_reset: pba=%dK",pba);
+
+ /*
+ * These parameters control the automatic generation (Tx) and
+ * response (Rx) to Ethernet PAUSE frames.
+ * - High water mark should allow for at least two frames to be
+ * received after sending an XOFF.
+ * - Low water mark works best when it is very near the high water mark.
+ * This allows the receiver to restart by sending XON when it has
+ * drained a bit. Here we use an arbitrary value of 1500 which will
+ * restart after one full frame is pulled from the buffer. There
+ * could be several smaller frames in the buffer and if so they will
+ * not trigger the XON until their total number reduces the buffer
+ * by 1500.
+ * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ */
+ rx_buffer_size = (pba & 0xffff) << 10;
+ hw->fc.high_water = rx_buffer_size -
+ roundup2(adapter->hw.mac.max_frame_size, 1024);
+ /* 16-byte granularity */
+ hw->fc.low_water = hw->fc.high_water - 16;
+
+ if (adapter->fc) /* locally set flow control value? */
+ hw->fc.requested_mode = adapter->fc;
+ else
+ hw->fc.requested_mode = igc_fc_full;
+
+ hw->fc.pause_time = IGC_FC_PAUSE_TIME;
+
+ hw->fc.send_xon = true;
+
+ /* Issue a global reset */
+ igc_reset_hw(hw);
+ IGC_WRITE_REG(hw, IGC_WUC, 0);
+
+ /* and a re-init */
+ if (igc_init_hw(hw) < 0) {
+ device_printf(dev, "Hardware Initialization Failed\n");
+ return;
+ }
+
+ /* Setup DMA Coalescing */
+ igc_init_dmac(adapter, pba);
+
+ IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
+ igc_get_phy_info(hw);
+ igc_check_for_link(hw);
+}
+
+/*
+ * Initialise the RSS mapping for NICs that support multiple transmit/
+ * receive rings.
+ */
+
+#define RSSKEYLEN 10
+static void
+igc_initialize_rss_mapping(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i;
+ int queue_id;
+ u32 reta;
+ u32 rss_key[RSSKEYLEN], mrqc, shift = 0;
+
+ /*
+ * The redirection table controls which destination
+ * queue each bucket redirects traffic to.
+ * Each DWORD represents four queues, with the LSB
+ * being the first queue in the DWORD.
+ *
+ * This just allocates buckets to queues using round-robin
+ * allocation.
+ *
+ * NOTE: It Just Happens to line up with the default
+ * RSS allocation method.
+ */
+
+ /* Warning FM follows */
+ reta = 0;
+ for (i = 0; i < 128; i++) {
+#ifdef RSS
+ queue_id = rss_get_indirection_to_bucket(i);
+ /*
+ * If we have more queues than buckets, we'll
+ * end up mapping buckets to a subset of the
+ * queues.
+ *
+ * If we have more buckets than queues, we'll
+ * end up instead assigning multiple buckets
+ * to queues.
+ *
+ * Both are suboptimal, but we need to handle
+ * the case so we don't go out of bounds
+ * indexing arrays and such.
+ */
+ queue_id = queue_id % adapter->rx_num_queues;
+#else
+ queue_id = (i % adapter->rx_num_queues);
+#endif
+ /* Adjust if required */
+ queue_id = queue_id << shift;
+
+ /*
+ * The low 8 bits are for hash value (n+0);
+ * The next 8 bits are for hash value (n+1), etc.
+ */
+ reta = reta >> 8;
+ reta = reta | ( ((uint32_t) queue_id) << 24);
+ if ((i & 3) == 3) {
+ IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta);
+ reta = 0;
+ }
+ }
+
+ /* Now fill in hash table */
+
+ /*
+ * MRQC: Multiple Receive Queues Command
+ * Set queuing to RSS control, number depends on the device.
+ */
+ mrqc = IGC_MRQC_ENABLE_RSS_4Q;
+
+#ifdef RSS
+ /* XXX ew typecasting */
+ rss_getkey((uint8_t *) &rss_key);
+#else
+ arc4rand(&rss_key, sizeof(rss_key), 0);
+#endif
+ for (i = 0; i < RSSKEYLEN; i++)
+ IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]);
+
+ /*
+ * Configure the RSS fields to hash upon.
+ */
+ mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP);
+ mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP);
+ mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP |
+ IGC_MRQC_RSS_FIELD_IPV6_UDP);
+ mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+ IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
+}
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register interface media.
+ *
+ **********************************************************************/
+static int
+igc_setup_interface(if_ctx_t ctx)
+{
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+
+ INIT_DEBUGOUT("igc_setup_interface: begin");
+
+ /* Single Queue */
+ if (adapter->tx_num_queues == 1) {
+ if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
+ if_setsendqready(ifp);
+ }
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_T, 0, NULL);
+
+ ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
+ return (0);
+}
+
+static int
+igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ int error = IGC_SUCCESS;
+ struct igc_tx_queue *que;
+ int i, j;
+
+ MPASS(adapter->tx_num_queues > 0);
+ MPASS(adapter->tx_num_queues == ntxqsets);
+
+ /* First allocate the top level queue structs */
+ if (!(adapter->tx_queues =
+ (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) *
+ adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ return(ENOMEM);
+ }
+
+ for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) {
+ /* Set up some basics */
+
+ struct tx_ring *txr = &que->txr;
+ txr->adapter = que->adapter = adapter;
+ que->me = txr->me = i;
+
+ /* Allocate report status array */
+ if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ for (j = 0; j < scctx->isc_ntxd[0]; j++)
+ txr->tx_rsq[j] = QIDX_INVALID;
+ /* get the virtual and physical address of the hardware queues */
+ txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs];
+ txr->tx_paddr = paddrs[i*ntxqs];
+ }
+
+ if (bootverbose)
+ device_printf(iflib_get_dev(ctx),
+ "allocated for %d tx_queues\n", adapter->tx_num_queues);
+ return (0);
+fail:
+ igc_if_queues_free(ctx);
+ return (error);
+}
+
+static int
+igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ int error = IGC_SUCCESS;
+ struct igc_rx_queue *que;
+ int i;
+
+ MPASS(adapter->rx_num_queues > 0);
+ MPASS(adapter->rx_num_queues == nrxqsets);
+
+ /* First allocate the top level queue structs */
+ if (!(adapter->rx_queues =
+ (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) *
+ adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
+ /* Set up some basics */
+ struct rx_ring *rxr = &que->rxr;
+ rxr->adapter = que->adapter = adapter;
+ rxr->que = que;
+ que->me = rxr->me = i;
+
+ /* get the virtual and physical address of the hardware queues */
+ rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs];
+ rxr->rx_paddr = paddrs[i*nrxqs];
+ }
+
+ if (bootverbose)
+ device_printf(iflib_get_dev(ctx),
+ "allocated for %d rx_queues\n", adapter->rx_num_queues);
+
+ return (0);
+fail:
+ igc_if_queues_free(ctx);
+ return (error);
+}
+
+static void
+igc_if_queues_free(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_tx_queue *tx_que = adapter->tx_queues;
+ struct igc_rx_queue *rx_que = adapter->rx_queues;
+
+ if (tx_que != NULL) {
+ for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+ if (txr->tx_rsq == NULL)
+ break;
+
+ free(txr->tx_rsq, M_DEVBUF);
+ txr->tx_rsq = NULL;
+ }
+ free(adapter->tx_queues, M_DEVBUF);
+ adapter->tx_queues = NULL;
+ }
+
+ if (rx_que != NULL) {
+ free(adapter->rx_queues, M_DEVBUF);
+ adapter->rx_queues = NULL;
+ }
+
+ igc_release_hw_control(adapter);
+
+ if (adapter->mta != NULL) {
+ free(adapter->mta, M_DEVBUF);
+ }
+}
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+igc_initialize_transmit_unit(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct igc_tx_queue *que;
+ struct tx_ring *txr;
+ struct igc_hw *hw = &adapter->hw;
+ u32 tctl, txdctl = 0;
+
+ INIT_DEBUGOUT("igc_initialize_transmit_unit: begin");
+
+ for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
+ u64 bus_addr;
+ caddr_t offp, endp;
+
+ que = &adapter->tx_queues[i];
+ txr = &que->txr;
+ bus_addr = txr->tx_paddr;
+
+ /* Clear checksum offload context. */
+ offp = (caddr_t)&txr->csum_flags;
+ endp = (caddr_t)(txr + 1);
+ bzero(offp, endp - offp);
+
+ /* Base and Len of TX Ring */
+ IGC_WRITE_REG(hw, IGC_TDLEN(i),
+ scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc));
+ IGC_WRITE_REG(hw, IGC_TDBAH(i),
+ (u32)(bus_addr >> 32));
+ IGC_WRITE_REG(hw, IGC_TDBAL(i),
+ (u32)bus_addr);
+ /* Init the HEAD/TAIL indices */
+ IGC_WRITE_REG(hw, IGC_TDT(i), 0);
+ IGC_WRITE_REG(hw, IGC_TDH(i), 0);
+
+ HW_DEBUGOUT2("Base = %x, Length = %x\n",
+ IGC_READ_REG(&adapter->hw, IGC_TDBAL(i)),
+ IGC_READ_REG(&adapter->hw, IGC_TDLEN(i)));
+
+ txdctl = 0; /* clear txdctl */
+ txdctl |= 0x1f; /* PTHRESH */
+ txdctl |= 1 << 8; /* HTHRESH */
+ txdctl |= 1 << 16;/* WTHRESH */
+ txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
+ txdctl |= IGC_TXDCTL_GRAN;
+ txdctl |= 1 << 25; /* LWTHRESH */
+
+ IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl);
+ }
+
+ /* Program the Transmit Control Register */
+ tctl = IGC_READ_REG(&adapter->hw, IGC_TCTL);
+ tctl &= ~IGC_TCTL_CT;
+ tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
+ (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
+
+ /* This write will effectively turn on the transmit unit. */
+ IGC_WRITE_REG(&adapter->hw, IGC_TCTL, tctl);
+}
+
+/*********************************************************************
+ *
+ * Enable receive unit.
+ *
+ **********************************************************************/
+
+static void
+igc_initialize_receive_unit(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = adapter->shared;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_rx_queue *que;
+ int i;
+ u32 psize, rctl, rxcsum, srrctl = 0;
+
+ INIT_DEBUGOUT("igc_initialize_receive_units: begin");
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the descriptor ring
+ */
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
+
+ /* Setup the Receive Control Register */
+ rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
+ rctl |= IGC_RCTL_EN | IGC_RCTL_BAM |
+ IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
+
+ /* Do not store bad packets */
+ rctl &= ~IGC_RCTL_SBP;
+
+ /* Enable Long Packet receive */
+ if (if_getmtu(ifp) > ETHERMTU)
+ rctl |= IGC_RCTL_LPE;
+ else
+ rctl &= ~IGC_RCTL_LPE;
+
+ /* Strip the CRC */
+ if (!igc_disable_crc_stripping)
+ rctl |= IGC_RCTL_SECRC;
+
+ /*
+ * Set the interrupt throttling rate. Value is calculated
+ * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
+ */
+ IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR);
+
+ rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
+ if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
+ rxcsum |= IGC_RXCSUM_CRCOFL;
+ if (adapter->tx_num_queues > 1)
+ rxcsum |= IGC_RXCSUM_PCSD;
+ else
+ rxcsum |= IGC_RXCSUM_IPPCSE;
+ } else {
+ if (adapter->tx_num_queues > 1)
+ rxcsum |= IGC_RXCSUM_PCSD;
+ else
+ rxcsum &= ~IGC_RXCSUM_TUOFL;
+ }
+ IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
+
+ if (adapter->rx_num_queues > 1)
+ igc_initialize_rss_mapping(adapter);
+
+ if (if_getmtu(ifp) > ETHERMTU) {
+ /* Set maximum packet len */
+ if (adapter->rx_mbuf_sz <= 4096) {
+ srrctl |= 4096 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ rctl |= IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX;
+ } else if (adapter->rx_mbuf_sz > 4096) {
+ srrctl |= 8192 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ rctl |= IGC_RCTL_SZ_8192 | IGC_RCTL_BSEX;
+ }
+ psize = scctx->isc_max_frame_size;
+ /* are we on a vlan? */
+ if (ifp->if_vlantrunk != NULL)
+ psize += VLAN_TAG_SIZE;
+ IGC_WRITE_REG(&adapter->hw, IGC_RLPML, psize);
+ } else {
+ srrctl |= 2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ rctl |= IGC_RCTL_SZ_2048;
+ }
+
+ /*
+ * If TX flow control is disabled and there's >1 queue defined,
+ * enable DROP.
+ *
+ * This drops frames rather than hanging the RX MAC for all queues.
+ */
+ if ((adapter->rx_num_queues > 1) &&
+ (adapter->fc == igc_fc_none ||
+ adapter->fc == igc_fc_rx_pause)) {
+ srrctl |= IGC_SRRCTL_DROP_EN;
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+ u64 bus_addr = rxr->rx_paddr;
+ u32 rxdctl;
+
+#ifdef notyet
+ /* Configure for header split? -- ignore for now */
+ rxr->hdr_split = igc_header_split;
+#else
+ srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
+#endif
+
+ IGC_WRITE_REG(hw, IGC_RDLEN(i),
+ scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc));
+ IGC_WRITE_REG(hw, IGC_RDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ IGC_WRITE_REG(hw, IGC_RDBAL(i),
+ (uint32_t)bus_addr);
+ IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl);
+ /* Setup the Head and Tail Descriptor Pointers */
+ IGC_WRITE_REG(hw, IGC_RDH(i), 0);
+ IGC_WRITE_REG(hw, IGC_RDT(i), 0);
+ /* Enable this Queue */
+ rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i));
+ rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= IGC_RX_PTHRESH;
+ rxdctl |= IGC_RX_HTHRESH << 8;
+ rxdctl |= IGC_RX_WTHRESH << 16;
+ IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl);
+ }
+
+ /* Make sure VLAN Filters are off */
+ rctl &= ~IGC_RCTL_VFE;
+
+ /* Write out the settings */
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+
+ return;
+}
+
+static void
+igc_if_vlan_register(if_ctx_t ctx, u16 vtag)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ u32 index, bit;
+
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ adapter->shadow_vfta[index] |= (1 << bit);
+ ++adapter->num_vlans;
+}
+
+static void
+igc_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ u32 index, bit;
+
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ adapter->shadow_vfta[index] &= ~(1 << bit);
+ --adapter->num_vlans;
+}
+
+static void
+igc_setup_vlan_hw_support(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg;
+
+ /*
+ * We get here thru init_locked, meaning
+ * a soft reset, this has already cleared
+ * the VFTA and other state, so if there
+ * have been no vlan's registered do nothing.
+ */
+ if (adapter->num_vlans == 0)
+ return;
+
+ /*
+ * A soft reset zero's out the VFTA, so
+ * we need to repopulate it now.
+ */
+ for (int i = 0; i < IGC_VFTA_SIZE; i++)
+ if (adapter->shadow_vfta[i] != 0)
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA,
+ i, adapter->shadow_vfta[i]);
+
+ reg = IGC_READ_REG(hw, IGC_CTRL);
+ reg |= IGC_CTRL_VME;
+ IGC_WRITE_REG(hw, IGC_CTRL, reg);
+
+ /* Enable the Filter Table */
+ reg = IGC_READ_REG(hw, IGC_RCTL);
+ reg &= ~IGC_RCTL_CFIEN;
+ reg |= IGC_RCTL_VFE;
+ IGC_WRITE_REG(hw, IGC_RCTL, reg);
+}
+
+static void
+igc_if_intr_enable(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_hw *hw = &adapter->hw;
+ u32 mask;
+
+ if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
+ mask = (adapter->que_mask | adapter->link_mask);
+ IGC_WRITE_REG(hw, IGC_EIAC, mask);
+ IGC_WRITE_REG(hw, IGC_EIAM, mask);
+ IGC_WRITE_REG(hw, IGC_EIMS, mask);
+ IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC);
+ } else
+ IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK);
+ IGC_WRITE_FLUSH(hw);
+}
+
+static void
+igc_if_intr_disable(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_hw *hw = &adapter->hw;
+
+ if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
+ IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
+ IGC_WRITE_REG(hw, IGC_EIAC, 0);
+ }
+ IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
+ IGC_WRITE_FLUSH(hw);
+}
+
+/*
+ * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded. For AMT version type f/w
+ * this means that the network i/f is open.
+ */
+static void
+igc_get_hw_control(struct igc_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ if (adapter->vf_ifp)
+ return;
+
+ ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT);
+ IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT,
+ ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is no longer loaded. For AMT versions of the
+ * f/w this means that the network i/f is closed.
+ */
+static void
+igc_release_hw_control(struct igc_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT);
+ IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT,
+ ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
+ return;
+}
+
+static int
+igc_is_valid_ether_addr(u8 *addr)
+{
+ char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
+
+ if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
+ return (false);
+ }
+
+ return (true);
+}
+
+/*
+** Parse the interface capabilities with regard
+** to both system management and wake-on-lan for
+** later use.
+*/
+static void
+igc_get_wakeup(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ u16 eeprom_data = 0, apme_mask;
+
+ apme_mask = IGC_WUC_APME;
+ eeprom_data = IGC_READ_REG(&adapter->hw, IGC_WUC);
+
+ if (eeprom_data & apme_mask)
+ adapter->wol = IGC_WUFC_LNKC;
+}
+
+
+/*
+ * Enable PCI Wake On Lan capability
+ */
+static void
+igc_enable_wakeup(if_ctx_t ctx)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ int error = 0;
+ u32 pmc, ctrl, rctl;
+ u16 status;
+
+ if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
+ return;
+
+ /*
+ * Determine type of Wakeup: note that wol
+ * is set with all bits on by default.
+ */
+ if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
+ adapter->wol &= ~IGC_WUFC_MAG;
+
+ if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0)
+ adapter->wol &= ~IGC_WUFC_EX;
+
+ if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
+ adapter->wol &= ~IGC_WUFC_MC;
+ else {
+ rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL);
+ rctl |= IGC_RCTL_MPE;
+ IGC_WRITE_REG(&adapter->hw, IGC_RCTL, rctl);
+ }
+
+ if (!(adapter->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC)))
+ goto pme;
+
+ /* Advertise the wakeup capability */
+ ctrl = IGC_READ_REG(&adapter->hw, IGC_CTRL);
+ ctrl |= IGC_CTRL_ADVD3WUC;
+ IGC_WRITE_REG(&adapter->hw, IGC_CTRL, ctrl);
+
+ /* Enable wakeup by the MAC */
+ IGC_WRITE_REG(&adapter->hw, IGC_WUC, IGC_WUC_PME_EN);
+ IGC_WRITE_REG(&adapter->hw, IGC_WUFC, adapter->wol);
+
+pme:
+ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
+ status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
+ if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
+ status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
+
+ return;
+}
+
+/**********************************************************************
+ *
+ * Update the board statistics counters.
+ *
+ **********************************************************************/
+static void
+igc_update_stats_counters(struct igc_adapter *adapter)
+{
+ u64 prev_xoffrxc = adapter->stats.xoffrxc;
+
+ adapter->stats.crcerrs += IGC_READ_REG(&adapter->hw, IGC_CRCERRS);
+ adapter->stats.mpc += IGC_READ_REG(&adapter->hw, IGC_MPC);
+ adapter->stats.scc += IGC_READ_REG(&adapter->hw, IGC_SCC);
+ adapter->stats.ecol += IGC_READ_REG(&adapter->hw, IGC_ECOL);
+
+ adapter->stats.mcc += IGC_READ_REG(&adapter->hw, IGC_MCC);
+ adapter->stats.latecol += IGC_READ_REG(&adapter->hw, IGC_LATECOL);
+ adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_COLC);
+ adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_RERC);
+ adapter->stats.dc += IGC_READ_REG(&adapter->hw, IGC_DC);
+ adapter->stats.rlec += IGC_READ_REG(&adapter->hw, IGC_RLEC);
+ adapter->stats.xonrxc += IGC_READ_REG(&adapter->hw, IGC_XONRXC);
+ adapter->stats.xontxc += IGC_READ_REG(&adapter->hw, IGC_XONTXC);
+ adapter->stats.xoffrxc += IGC_READ_REG(&adapter->hw, IGC_XOFFRXC);
+ /*
+ * For watchdog management we need to know if we have been
+ * paused during the last interval, so capture that here.
+ */
+ if (adapter->stats.xoffrxc != prev_xoffrxc)
+ adapter->shared->isc_pause_frames = 1;
+ adapter->stats.xofftxc += IGC_READ_REG(&adapter->hw, IGC_XOFFTXC);
+ adapter->stats.fcruc += IGC_READ_REG(&adapter->hw, IGC_FCRUC);
+ adapter->stats.prc64 += IGC_READ_REG(&adapter->hw, IGC_PRC64);
+ adapter->stats.prc127 += IGC_READ_REG(&adapter->hw, IGC_PRC127);
+ adapter->stats.prc255 += IGC_READ_REG(&adapter->hw, IGC_PRC255);
+ adapter->stats.prc511 += IGC_READ_REG(&adapter->hw, IGC_PRC511);
+ adapter->stats.prc1023 += IGC_READ_REG(&adapter->hw, IGC_PRC1023);
+ adapter->stats.prc1522 += IGC_READ_REG(&adapter->hw, IGC_PRC1522);
+ adapter->stats.tlpic += IGC_READ_REG(&adapter->hw, IGC_TLPIC);
+ adapter->stats.rlpic += IGC_READ_REG(&adapter->hw, IGC_RLPIC);
+ adapter->stats.gprc += IGC_READ_REG(&adapter->hw, IGC_GPRC);
+ adapter->stats.bprc += IGC_READ_REG(&adapter->hw, IGC_BPRC);
+ adapter->stats.mprc += IGC_READ_REG(&adapter->hw, IGC_MPRC);
+ adapter->stats.gptc += IGC_READ_REG(&adapter->hw, IGC_GPTC);
+
+ /* For the 64-bit byte counters the low dword must be read first. */
+ /* Both registers clear on the read of the high dword */
+
+ adapter->stats.gorc += IGC_READ_REG(&adapter->hw, IGC_GORCL) +
+ ((u64)IGC_READ_REG(&adapter->hw, IGC_GORCH) << 32);
+ adapter->stats.gotc += IGC_READ_REG(&adapter->hw, IGC_GOTCL) +
+ ((u64)IGC_READ_REG(&adapter->hw, IGC_GOTCH) << 32);
+
+ adapter->stats.rnbc += IGC_READ_REG(&adapter->hw, IGC_RNBC);
+ adapter->stats.ruc += IGC_READ_REG(&adapter->hw, IGC_RUC);
+ adapter->stats.rfc += IGC_READ_REG(&adapter->hw, IGC_RFC);
+ adapter->stats.roc += IGC_READ_REG(&adapter->hw, IGC_ROC);
+ adapter->stats.rjc += IGC_READ_REG(&adapter->hw, IGC_RJC);
+
+ adapter->stats.tor += IGC_READ_REG(&adapter->hw, IGC_TORH);
+ adapter->stats.tot += IGC_READ_REG(&adapter->hw, IGC_TOTH);
+
+ adapter->stats.tpr += IGC_READ_REG(&adapter->hw, IGC_TPR);
+ adapter->stats.tpt += IGC_READ_REG(&adapter->hw, IGC_TPT);
+ adapter->stats.ptc64 += IGC_READ_REG(&adapter->hw, IGC_PTC64);
+ adapter->stats.ptc127 += IGC_READ_REG(&adapter->hw, IGC_PTC127);
+ adapter->stats.ptc255 += IGC_READ_REG(&adapter->hw, IGC_PTC255);
+ adapter->stats.ptc511 += IGC_READ_REG(&adapter->hw, IGC_PTC511);
+ adapter->stats.ptc1023 += IGC_READ_REG(&adapter->hw, IGC_PTC1023);
+ adapter->stats.ptc1522 += IGC_READ_REG(&adapter->hw, IGC_PTC1522);
+ adapter->stats.mptc += IGC_READ_REG(&adapter->hw, IGC_MPTC);
+ adapter->stats.bptc += IGC_READ_REG(&adapter->hw, IGC_BPTC);
+
+ /* Interrupt Counts */
+ adapter->stats.iac += IGC_READ_REG(&adapter->hw, IGC_IAC);
+ adapter->stats.rxdmtc += IGC_READ_REG(&adapter->hw, IGC_RXDMTC);
+
+ adapter->stats.algnerrc += IGC_READ_REG(&adapter->hw, IGC_ALGNERRC);
+ adapter->stats.tncrs += IGC_READ_REG(&adapter->hw, IGC_TNCRS);
+ adapter->stats.htdpmc += IGC_READ_REG(&adapter->hw, IGC_HTDPMC);
+ adapter->stats.tsctc += IGC_READ_REG(&adapter->hw, IGC_TSCTC);
+}
+
+static uint64_t
+igc_if_get_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_COLLISIONS:
+ return (adapter->stats.colc);
+ case IFCOUNTER_IERRORS:
+ return (adapter->dropped_pkts + adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.mpc + adapter->stats.htdpmc);
+ case IFCOUNTER_OERRORS:
+ return (adapter->stats.ecol + adapter->stats.latecol +
+ adapter->watchdog_events);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
+/* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized
+ * @ctx: iflib context
+ * @event: event code to check
+ *
+ * Defaults to returning true for unknown events.
+ *
+ * @returns true if iflib needs to reinit the interface
+ */
+static bool
+igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
+{
+ switch (event) {
+ case IFLIB_RESTART_VLAN_CONFIG:
+ default:
+ return (true);
+ }
+}
+
+/* Export a single 32-bit register via a read-only sysctl. */
+static int
+igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_adapter *adapter;
+ u_int val;
+
+ adapter = oidp->oid_arg1;
+ val = IGC_READ_REG(&adapter->hw, oidp->oid_arg2);
+ return (sysctl_handle_int(oidp, &val, 0, req));
+}
+
+/*
+ * Add sysctl variables, one per statistic, to the system.
+ */
+static void
+igc_add_hw_stats(struct igc_adapter *adapter)
+{
+ device_t dev = iflib_get_dev(adapter->ctx);
+ struct igc_tx_queue *tx_que = adapter->tx_queues;
+ struct igc_rx_queue *rx_que = adapter->rx_queues;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct igc_hw_stats *stats = &adapter->stats;
+
+ struct sysctl_oid *stat_node, *queue_node, *int_node;
+ struct sysctl_oid_list *stat_list, *queue_list, *int_list;
+
+#define QUEUE_NAME_LEN 32
+ char namebuf[QUEUE_NAME_LEN];
+
+ /* Driver Statistics */
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
+ CTLFLAG_RD, &adapter->dropped_pkts,
+ "Driver dropped packets");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
+ CTLFLAG_RD, &adapter->link_irq,
+ "Link MSI-X IRQ Handled");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
+ CTLFLAG_RD, &adapter->rx_overruns,
+ "RX overruns");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
+ CTLFLAG_RD, &adapter->watchdog_events,
+ "Watchdog timeouts");
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ adapter, IGC_CTRL, igc_sysctl_reg_handler, "IU",
+ "Device Control Register");
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ adapter, IGC_RCTL, igc_sysctl_reg_handler, "IU",
+ "Receiver Control Register");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
+ CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
+ "Flow Control High Watermark");
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
+ CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
+ "Flow Control Low Watermark");
+
+ for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+ snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
+ queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU",
+ "Transmit Descriptor Head");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU",
+ "Transmit Descriptor Tail");
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
+ CTLFLAG_RD, &txr->tx_irq,
+ "Queue MSI-X Transmit Interrupts");
+ }
+
+ for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
+ snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
+ queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU",
+ "Receive Descriptor Head");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU",
+ "Receive Descriptor Tail");
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
+ CTLFLAG_RD, &rxr->rx_irq,
+ "Queue MSI-X Receive Interrupts");
+ }
+
+ /* MAC stats get their own sub node */
+
+ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
+ stat_list = SYSCTL_CHILDREN(stat_node);
+
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
+ CTLFLAG_RD, &stats->ecol,
+ "Excessive collisions");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
+ CTLFLAG_RD, &stats->scc,
+ "Single collisions");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
+ CTLFLAG_RD, &stats->mcc,
+ "Multiple collisions");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
+ CTLFLAG_RD, &stats->latecol,
+ "Late collisions");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
+ CTLFLAG_RD, &stats->colc,
+ "Collision Count");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
+ CTLFLAG_RD, &adapter->stats.symerrs,
+ "Symbol Errors");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
+ CTLFLAG_RD, &adapter->stats.sec,
+ "Sequence Errors");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
+ CTLFLAG_RD, &adapter->stats.dc,
+ "Defer Count");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
+ CTLFLAG_RD, &adapter->stats.mpc,
+ "Missed Packets");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
+ CTLFLAG_RD, &adapter->stats.rnbc,
+ "Receive No Buffers");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
+ CTLFLAG_RD, &adapter->stats.ruc,
+ "Receive Undersize");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+ CTLFLAG_RD, &adapter->stats.rfc,
+ "Fragmented Packets Received ");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
+ CTLFLAG_RD, &adapter->stats.roc,
+ "Oversized Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
+ CTLFLAG_RD, &adapter->stats.rjc,
+ "Recevied Jabber");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
+ CTLFLAG_RD, &adapter->stats.rxerrc,
+ "Receive Errors");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+ CTLFLAG_RD, &adapter->stats.crcerrs,
+ "CRC errors");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
+ CTLFLAG_RD, &adapter->stats.algnerrc,
+ "Alignment Errors");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
+ CTLFLAG_RD, &adapter->stats.xonrxc,
+ "XON Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
+ CTLFLAG_RD, &adapter->stats.xontxc,
+ "XON Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
+ CTLFLAG_RD, &adapter->stats.xoffrxc,
+ "XOFF Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
+ CTLFLAG_RD, &adapter->stats.xofftxc,
+ "XOFF Transmitted");
+
+ /* Packet Reception Stats */
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
+ CTLFLAG_RD, &adapter->stats.tpr,
+ "Total Packets Received ");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+ CTLFLAG_RD, &adapter->stats.gprc,
+ "Good Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
+ CTLFLAG_RD, &adapter->stats.bprc,
+ "Broadcast Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+ CTLFLAG_RD, &adapter->stats.mprc,
+ "Multicast Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+ CTLFLAG_RD, &adapter->stats.prc64,
+ "64 byte frames received ");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+ CTLFLAG_RD, &adapter->stats.prc127,
+ "65-127 byte frames received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+ CTLFLAG_RD, &adapter->stats.prc255,
+ "128-255 byte frames received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+ CTLFLAG_RD, &adapter->stats.prc511,
+ "256-511 byte frames received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+ CTLFLAG_RD, &adapter->stats.prc1023,
+ "512-1023 byte frames received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+ CTLFLAG_RD, &adapter->stats.prc1522,
+ "1023-1522 byte frames received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ CTLFLAG_RD, &adapter->stats.gorc,
+ "Good Octets Received");
+
+ /* Packet Transmission Stats */
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &adapter->stats.gotc,
+ "Good Octets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+ CTLFLAG_RD, &adapter->stats.tpt,
+ "Total Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ CTLFLAG_RD, &adapter->stats.gptc,
+ "Good Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+ CTLFLAG_RD, &adapter->stats.bptc,
+ "Broadcast Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+ CTLFLAG_RD, &adapter->stats.mptc,
+ "Multicast Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+ CTLFLAG_RD, &adapter->stats.ptc64,
+ "64 byte frames transmitted ");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+ CTLFLAG_RD, &adapter->stats.ptc127,
+ "65-127 byte frames transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+ CTLFLAG_RD, &adapter->stats.ptc255,
+ "128-255 byte frames transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+ CTLFLAG_RD, &adapter->stats.ptc511,
+ "256-511 byte frames transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+ CTLFLAG_RD, &adapter->stats.ptc1023,
+ "512-1023 byte frames transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+ CTLFLAG_RD, &adapter->stats.ptc1522,
+ "1024-1522 byte frames transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
+ CTLFLAG_RD, &adapter->stats.tsctc,
+ "TSO Contexts Transmitted");
+
+ /* Interrupt Stats */
+
+ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics");
+ int_list = SYSCTL_CHILDREN(int_node);
+
+ SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
+ CTLFLAG_RD, &adapter->stats.iac,
+ "Interrupt Assertion Count");
+
+ SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
+ CTLFLAG_RD, &adapter->stats.rxdmtc,
+ "Rx Desc Min Thresh Count");
+}
+
+/**********************************************************************
+ *
+ * This routine provides a way to dump out the adapter eeprom,
+ * often a useful debug/service tool. This only dumps the first
+ * 32 words, stuff that matters is in that extent.
+ *
+ **********************************************************************/
+static int
+igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_adapter *adapter = (struct igc_adapter *)arg1;
+ int error;
+ int result;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ /*
+ * This value will cause a hex dump of the
+ * first 32 16-bit words of the EEPROM to
+ * the screen.
+ */
+ if (result == 1)
+ igc_print_nvm_info(adapter);
+
+ return (error);
+}
+
+static void
+igc_print_nvm_info(struct igc_adapter *adapter)
+{
+ u16 eeprom_data;
+ int i, j, row = 0;
+
+ /* Its a bit crude, but it gets the job done */
+ printf("\nInterface EEPROM Dump:\n");
+ printf("Offset\n0x0000 ");
+ for (i = 0, j = 0; i < 32; i++, j++) {
+ if (j == 8) { /* Make the offset block */
+ j = 0; ++row;
+ printf("\n0x00%x0 ",row);
+ }
+ igc_read_nvm(&adapter->hw, i, 1, &eeprom_data);
+ printf("%04x ", eeprom_data);
+ }
+ printf("\n");
+}
+
+static int
+igc_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_int_delay_info *info;
+ struct igc_adapter *adapter;
+ u32 regval;
+ int error, usecs, ticks;
+
+ info = (struct igc_int_delay_info *) arg1;
+ usecs = info->value;
+ error = sysctl_handle_int(oidp, &usecs, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (usecs < 0 || usecs > IGC_TICKS_TO_USECS(65535))
+ return (EINVAL);
+ info->value = usecs;
+ ticks = IGC_USECS_TO_TICKS(usecs);
+ if (info->offset == IGC_ITR) /* units are 256ns here */
+ ticks *= 4;
+
+ adapter = info->adapter;
+
+ regval = IGC_READ_OFFSET(&adapter->hw, info->offset);
+ regval = (regval & ~0xffff) | (ticks & 0xffff);
+ /* Handle a few special cases. */
+ switch (info->offset) {
+ case IGC_RDTR:
+ break;
+ case IGC_TIDV:
+ if (ticks == 0) {
+ adapter->txd_cmd &= ~IGC_TXD_CMD_IDE;
+ /* Don't write 0 into the TIDV register. */
+ regval++;
+ } else
+ adapter->txd_cmd |= IGC_TXD_CMD_IDE;
+ break;
+ }
+ IGC_WRITE_OFFSET(&adapter->hw, info->offset, regval);
+ return (0);
+}
+
+static void
+igc_add_int_delay_sysctl(struct igc_adapter *adapter, const char *name,
+ const char *description, struct igc_int_delay_info *info,
+ int offset, int value)
+{
+ info->adapter = adapter;
+ info->offset = offset;
+ info->value = value;
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+ OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ info, 0, igc_sysctl_int_delay, "I", description);
+}
+
+/*
+ * Set flow control using sysctl:
+ * Flow control values:
+ * 0 - off
+ * 1 - rx pause
+ * 2 - tx pause
+ * 3 - full
+ */
+static int
+igc_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ static int input = 3; /* default is full */
+ struct igc_adapter *adapter = (struct igc_adapter *) arg1;
+
+ error = sysctl_handle_int(oidp, &input, 0, req);
+
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (input == adapter->fc) /* no change? */
+ return (error);
+
+ switch (input) {
+ case igc_fc_rx_pause:
+ case igc_fc_tx_pause:
+ case igc_fc_full:
+ case igc_fc_none:
+ adapter->hw.fc.requested_mode = input;
+ adapter->fc = input;
+ break;
+ default:
+ /* Do nothing */
+ return (error);
+ }
+
+ adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
+ igc_force_mac_fc(&adapter->hw);
+ return (error);
+}
+
+/*
+ * Manage Energy Efficient Ethernet:
+ * Control values:
+ * 0/1 - enabled/disabled
+ */
+static int
+igc_sysctl_eee(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_adapter *adapter = (struct igc_adapter *) arg1;
+ int error, value;
+
+ value = adapter->hw.dev_spec._i225.eee_disable;
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || req->newptr == NULL)
+ return (error);
+
+ adapter->hw.dev_spec._i225.eee_disable = (value != 0);
+ igc_if_init(adapter->ctx);
+
+ return (0);
+}
+
+static int
+igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_adapter *adapter;
+ int error;
+ int result;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ adapter = (struct igc_adapter *) arg1;
+ igc_print_debug_info(adapter);
+ }
+
+ return (error);
+}
+
+static int
+igc_get_rs(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_adapter *adapter = (struct igc_adapter *) arg1;
+ int error;
+ int result;
+
+ result = 0;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr || result != 1)
+ return (error);
+ igc_dump_rs(adapter);
+
+ return (error);
+}
+
+static void
+igc_if_debug(if_ctx_t ctx)
+{
+ igc_dump_rs(iflib_get_softc(ctx));
+}
+
+/*
+ * This routine is meant to be fluid, add whatever is
+ * needed for debugging a problem. -jfv
+ */
+static void
+igc_print_debug_info(struct igc_adapter *adapter)
+{
+ device_t dev = iflib_get_dev(adapter->ctx);
+ struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
+ struct tx_ring *txr = &adapter->tx_queues->txr;
+ struct rx_ring *rxr = &adapter->rx_queues->rxr;
+
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+ printf("Interface is RUNNING ");
+ else
+ printf("Interface is NOT RUNNING\n");
+
+ if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE)
+ printf("and INACTIVE\n");
+ else
+ printf("and ACTIVE\n");
+
+ for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
+ device_printf(dev, "TX Queue %d ------\n", i);
+ device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
+ IGC_READ_REG(&adapter->hw, IGC_TDH(i)),
+ IGC_READ_REG(&adapter->hw, IGC_TDT(i)));
+
+ }
+ for (int j=0; j < adapter->rx_num_queues; j++, rxr++) {
+ device_printf(dev, "RX Queue %d ------\n", j);
+ device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
+ IGC_READ_REG(&adapter->hw, IGC_RDH(j)),
+ IGC_READ_REG(&adapter->hw, IGC_RDT(j)));
+ }
+}
diff --git a/sys/dev/igc/if_igc.h b/sys/dev/igc/if_igc.h
new file mode 100644
index 000000000000..69b2123cd73f
--- /dev/null
+++ b/sys/dev/igc/if_igc.h
@@ -0,0 +1,430 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
+ * All rights reserved.
+ * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_ddb.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#ifdef DDB
+#include <sys/types.h>
+#include <ddb/ddb.h>
+#endif
+#include <sys/buf_ring.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/eventhandler.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/iflib.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+
+#include <machine/in_cksum.h>
+#include <dev/led/led.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include "igc_api.h"
+#include "igc_i225.h"
+#include "ifdi_if.h"
+
+
+#ifndef _IGC_H_DEFINED_
+#define _IGC_H_DEFINED_
+
+
+/* Tunables */
+
+/*
+ * IGC_MAX_TXD: Maximum number of Transmit Descriptors
+ * Valid Range: 128-4096
+ * Default Value: 1024
+ * This value is the number of transmit descriptors allocated by the driver.
+ * Increasing this value allows the driver to queue more transmits. Each
+ * descriptor is 16 bytes.
+ * Since TDLEN should be multiple of 128bytes, the number of transmit
+ * desscriptors should meet the following condition.
+ * (num_tx_desc * sizeof(struct igc_tx_desc)) % 128 == 0
+ */
+#define IGC_MIN_TXD 128
+#define IGC_MAX_TXD 4096
+#define IGC_DEFAULT_TXD 1024
+#define IGC_DEFAULT_MULTI_TXD 4096
+#define IGC_MAX_TXD 4096
+
+/*
+ * IGC_MAX_RXD - Maximum number of receive Descriptors
+ * Valid Range: 128-4096
+ * Default Value: 1024
+ * This value is the number of receive descriptors allocated by the driver.
+ * Increasing this value allows the driver to buffer more incoming packets.
+ * Each descriptor is 16 bytes. A receive buffer is also allocated for each
+ * descriptor. The maximum MTU size is 16110.
+ * Since TDLEN should be multiple of 128bytes, the number of transmit
+ * desscriptors should meet the following condition.
+ * (num_tx_desc * sizeof(struct igc_tx_desc)) % 128 == 0
+ */
+#define IGC_MIN_RXD 128
+#define IGC_MAX_RXD 4096
+#define IGC_DEFAULT_RXD 1024
+#define IGC_DEFAULT_MULTI_RXD 4096
+#define IGC_MAX_RXD 4096
+
+/*
+ * IGC_TIDV_VAL - Transmit Interrupt Delay Value
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ * This value delays the generation of transmit interrupts in units of
+ * 1.024 microseconds. Transmit interrupt reduction can improve CPU
+ * efficiency if properly tuned for specific network traffic. If the
+ * system is reporting dropped transmits, this value may be set too high
+ * causing the driver to run out of available transmit descriptors.
+ */
+#define IGC_TIDV_VAL 64
+
+/*
+ * IGC_TADV_VAL - Transmit Absolute Interrupt Delay Value
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ * This value, in units of 1.024 microseconds, limits the delay in which a
+ * transmit interrupt is generated. Useful only if IGC_TIDV is non-zero,
+ * this value ensures that an interrupt is generated after the initial
+ * packet is sent on the wire within the set amount of time. Proper tuning,
+ * along with IGC_TIDV_VAL, may improve traffic throughput in specific
+ * network conditions.
+ */
+#define IGC_TADV_VAL 64
+
+/*
+ * IGC_RDTR_VAL - Receive Interrupt Delay Timer (Packet Timer)
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 0
+ * This value delays the generation of receive interrupts in units of 1.024
+ * microseconds. Receive interrupt reduction can improve CPU efficiency if
+ * properly tuned for specific network traffic. Increasing this value adds
+ * extra latency to frame reception and can end up decreasing the throughput
+ * of TCP traffic. If the system is reporting dropped receives, this value
+ * may be set too high, causing the driver to run out of available receive
+ * descriptors.
+ *
+ * CAUTION: When setting IGC_RDTR to a value other than 0, adapters
+ * may hang (stop transmitting) under certain network conditions.
+ * If this occurs a WATCHDOG message is logged in the system
+ * event log. In addition, the controller is automatically reset,
+ * restoring the network connection. To eliminate the potential
+ * for the hang ensure that IGC_RDTR is set to 0.
+ */
+#define IGC_RDTR_VAL 0
+
+/*
+ * Receive Interrupt Absolute Delay Timer
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ * This value, in units of 1.024 microseconds, limits the delay in which a
+ * receive interrupt is generated. Useful only if IGC_RDTR is non-zero,
+ * this value ensures that an interrupt is generated after the initial
+ * packet is received within the set amount of time. Proper tuning,
+ * along with IGC_RDTR, may improve traffic throughput in specific network
+ * conditions.
+ */
+#define IGC_RADV_VAL 64
+
+/*
+ * This parameter controls whether or not autonegotation is enabled.
+ * 0 - Disable autonegotiation
+ * 1 - Enable autonegotiation
+ */
+#define DO_AUTO_NEG true
+
+/* Tunables -- End */
+
+#define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
+ ADVERTISE_1000_FULL | ADVERTISE_2500_FULL)
+
+#define AUTO_ALL_MODES 0
+
+/*
+ * Micellaneous constants
+ */
+#define MAX_NUM_MULTICAST_ADDRESSES 128
+#define IGC_FC_PAUSE_TIME 0x0680
+
+#define IGC_TXPBSIZE 20408
+#define IGC_PKTTYPE_MASK 0x0000FFF0
+#define IGC_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coalesce Flush */
+
+#define IGC_RX_PTHRESH 8
+#define IGC_RX_HTHRESH 8
+#define IGC_RX_WTHRESH 4
+
+#define IGC_TX_PTHRESH 8
+#define IGC_TX_HTHRESH 1
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IGC_DBA_ALIGN 128
+
+#define IGC_MSIX_BAR 3
+
+/* Defines for printing debug information */
+#define DEBUG_INIT 0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW 0
+
+#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
+#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
+#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
+#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
+
+#define IGC_MAX_SCATTER 40
+#define IGC_VFTA_SIZE 128
+#define IGC_TSO_SIZE 65535
+#define IGC_TSO_SEG_SIZE 4096 /* Max dma segment size */
+#define IGC_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | \
+ CSUM_IP_SCTP | CSUM_IP6_UDP | CSUM_IP6_TCP | \
+ CSUM_IP6_SCTP) /* Offload bits in mbuf flag */
+
+struct igc_adapter;
+
+struct igc_int_delay_info {
+ struct igc_adapter *adapter; /* Back-pointer to the adapter struct */
+ int offset; /* Register offset to read/write */
+ int value; /* Current value in usecs */
+};
+
+/*
+ * The transmit ring, one per tx queue
+ */
+struct tx_ring {
+ struct igc_adapter *adapter;
+ struct igc_tx_desc *tx_base;
+ uint64_t tx_paddr;
+ qidx_t *tx_rsq;
+ uint8_t me;
+ qidx_t tx_rs_cidx;
+ qidx_t tx_rs_pidx;
+ qidx_t tx_cidx_processed;
+ /* Interrupt resources */
+ void *tag;
+ struct resource *res;
+ unsigned long tx_irq;
+
+ /* Saved csum offloading context information */
+ int csum_flags;
+ int csum_lhlen;
+ int csum_iphlen;
+
+ int csum_thlen;
+ int csum_mss;
+ int csum_pktlen;
+
+ uint32_t csum_txd_upper;
+ uint32_t csum_txd_lower; /* last field */
+};
+
+/*
+ * The Receive ring, one per rx queue
+ */
+struct rx_ring {
+ struct igc_adapter *adapter;
+ struct igc_rx_queue *que;
+ u32 me;
+ u32 payload;
+ union igc_rx_desc_extended *rx_base;
+ uint64_t rx_paddr;
+
+ /* Interrupt resources */
+ void *tag;
+ struct resource *res;
+
+ /* Soft stats */
+ unsigned long rx_irq;
+ unsigned long rx_discarded;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+};
+
+struct igc_tx_queue {
+ struct igc_adapter *adapter;
+ u32 msix;
+ u32 eims; /* This queue's EIMS bit */
+ u32 me;
+ struct tx_ring txr;
+};
+
+struct igc_rx_queue {
+ struct igc_adapter *adapter;
+ u32 me;
+ u32 msix;
+ u32 eims;
+ struct rx_ring rxr;
+ u64 irqs;
+ struct if_irq que_irq;
+};
+
+/* Our adapter structure */
+struct igc_adapter {
+ struct ifnet *ifp;
+ struct igc_hw hw;
+
+ if_softc_ctx_t shared;
+ if_ctx_t ctx;
+#define tx_num_queues shared->isc_ntxqsets
+#define rx_num_queues shared->isc_nrxqsets
+#define intr_type shared->isc_intr
+ /* FreeBSD operating-system-specific structures. */
+ struct igc_osdep osdep;
+ device_t dev;
+ struct cdev *led_dev;
+
+ struct igc_tx_queue *tx_queues;
+ struct igc_rx_queue *rx_queues;
+ struct if_irq irq;
+
+ struct resource *memory;
+ struct resource *flash;
+ struct resource *ioport;
+
+ struct resource *res;
+ void *tag;
+ u32 linkvec;
+ u32 ivars;
+
+ struct ifmedia *media;
+ int msix;
+ int if_flags;
+ int igc_insert_vlan_header;
+ u32 ims;
+
+ u32 flags;
+ /* Task for FAST handling */
+ struct grouptask link_task;
+
+ u16 num_vlans;
+ u32 txd_cmd;
+
+ u32 tx_process_limit;
+ u32 rx_process_limit;
+ u32 rx_mbuf_sz;
+
+ /* Management and WOL features */
+ u32 wol;
+
+ /* Multicast array memory */
+ u8 *mta;
+
+ /*
+ ** Shadow VFTA table, this is needed because
+ ** the real vlan filter table gets cleared during
+ ** a soft reset and the driver needs to be able
+ ** to repopulate it.
+ */
+ u32 shadow_vfta[IGC_VFTA_SIZE];
+
+ /* Info about the interface */
+ u16 link_active;
+ u16 fc;
+ u16 link_speed;
+ u16 link_duplex;
+ u32 smartspeed;
+ u32 dmac;
+ int link_mask;
+
+ u64 que_mask;
+
+ struct igc_int_delay_info tx_int_delay;
+ struct igc_int_delay_info tx_abs_int_delay;
+ struct igc_int_delay_info rx_int_delay;
+ struct igc_int_delay_info rx_abs_int_delay;
+ struct igc_int_delay_info tx_itr;
+
+ /* Misc stats maintained by the driver */
+ unsigned long dropped_pkts;
+ unsigned long link_irq;
+ unsigned long rx_overruns;
+ unsigned long watchdog_events;
+
+ struct igc_hw_stats stats;
+ u16 vf_ifp;
+};
+
+void igc_dump_rs(struct igc_adapter *);
+
+#define IGC_RSSRK_SIZE 4
+#define IGC_RSSRK_VAL(key, i) (key[(i) * IGC_RSSRK_SIZE] | \
+ key[(i) * IGC_RSSRK_SIZE + 1] << 8 | \
+ key[(i) * IGC_RSSRK_SIZE + 2] << 16 | \
+ key[(i) * IGC_RSSRK_SIZE + 3] << 24)
+#endif /* _IGC_H_DEFINED_ */
diff --git a/sys/dev/igc/igc_api.c b/sys/dev/igc/igc_api.c
new file mode 100644
index 000000000000..cad116c2395d
--- /dev/null
+++ b/sys/dev/igc/igc_api.c
@@ -0,0 +1,735 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "igc_api.h"
+
+/**
+ * igc_init_mac_params - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the MAC
+ * set of functions. Called by drivers or by igc_setup_init_funcs.
+ **/
+s32 igc_init_mac_params(struct igc_hw *hw)
+{
+ s32 ret_val = IGC_SUCCESS;
+
+ if (hw->mac.ops.init_params) {
+ ret_val = hw->mac.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("MAC Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mac.init_mac_params was NULL\n");
+ ret_val = -IGC_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_init_nvm_params - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the NVM
+ * set of functions. Called by drivers or by igc_setup_init_funcs.
+ **/
+s32 igc_init_nvm_params(struct igc_hw *hw)
+{
+ s32 ret_val = IGC_SUCCESS;
+
+ if (hw->nvm.ops.init_params) {
+ ret_val = hw->nvm.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("NVM Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("nvm.init_nvm_params was NULL\n");
+ ret_val = -IGC_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_init_phy_params - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by igc_setup_init_funcs.
+ **/
+s32 igc_init_phy_params(struct igc_hw *hw)
+{
+ s32 ret_val = IGC_SUCCESS;
+
+ if (hw->phy.ops.init_params) {
+ ret_val = hw->phy.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("PHY Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("phy.init_phy_params was NULL\n");
+ ret_val = -IGC_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * device ID stored in the hw structure.
+ * MUST BE FIRST FUNCTION CALLED (explicitly or through
+ * igc_setup_init_funcs()).
+ **/
+s32 igc_set_mac_type(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val = IGC_SUCCESS;
+
+ DEBUGFUNC("igc_set_mac_type");
+
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I225_LM:
+ case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I220_V:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_IT:
+ case IGC_DEV_ID_I221_V:
+ case IGC_DEV_ID_I226_BLANK_NVM:
+ case IGC_DEV_ID_I225_BLANK_NVM:
+ mac->type = igc_i225;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ ret_val = -IGC_ERR_MAC_INIT;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_setup_init_funcs - Initializes function pointers
+ * @hw: pointer to the HW structure
+ * @init_device: true will initialize the rest of the function pointers
+ * getting the device ready for use. FALSE will only set
+ * MAC type and the function pointers for the other init
+ * functions. Passing FALSE will not generate any hardware
+ * reads or writes.
+ *
+ * This function must be called by a driver in order to use the rest
+ * of the 'shared' code files. Called by drivers only.
+ **/
+s32 igc_setup_init_funcs(struct igc_hw *hw, bool init_device)
+{
+ s32 ret_val;
+
+ /* Can't do much good without knowing the MAC type. */
+ ret_val = igc_set_mac_type(hw);
+ if (ret_val) {
+ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+ goto out;
+ }
+
+ if (!hw->hw_addr) {
+ DEBUGOUT("ERROR: Registers not mapped\n");
+ ret_val = -IGC_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Init function pointers to generic implementations. We do this first
+ * allowing a driver module to override it afterward.
+ */
+ igc_init_mac_ops_generic(hw);
+ igc_init_phy_ops_generic(hw);
+ igc_init_nvm_ops_generic(hw);
+
+ /*
+ * Set up the init function pointers. These are functions within the
+ * adapter family file that sets up function pointers for the rest of
+ * the functions in that family.
+ */
+ switch (hw->mac.type) {
+ case igc_i225:
+ igc_init_function_pointers_i225(hw);
+ break;
+ default:
+ DEBUGOUT("Hardware not supported\n");
+ ret_val = -IGC_ERR_CONFIG;
+ break;
+ }
+
+ /*
+ * Initialize the rest of the function pointers. These require some
+ * register reads/writes in some cases.
+ */
+ if (!(ret_val) && init_device) {
+ ret_val = igc_init_mac_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igc_init_nvm_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igc_init_phy_params(hw);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_get_bus_info - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure. This is a
+ * function pointer entry point called by drivers.
+ **/
+s32 igc_get_bus_info(struct igc_hw *hw)
+{
+ if (hw->mac.ops.get_bus_info)
+ return hw->mac.ops.get_bus_info(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * This clears the VLAN filter table on the adapter. This is a function
+ * pointer entry point called by drivers.
+ **/
+void igc_clear_vfta(struct igc_hw *hw)
+{
+ if (hw->mac.ops.clear_vfta)
+ hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ * igc_write_vfta - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table. This is a function pointer entry point called by drivers.
+ **/
+void igc_write_vfta(struct igc_hw *hw, u32 offset, u32 value)
+{
+ if (hw->mac.ops.write_vfta)
+ hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ * igc_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igc_update_mc_addr_list(struct igc_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+ mc_addr_count);
+}
+
+/**
+ * igc_force_mac_fc - Force MAC flow control
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Currently no func pointer exists
+ * and all implementations are handled in the generic version of this
+ * function.
+ **/
+s32 igc_force_mac_fc(struct igc_hw *hw)
+{
+ return igc_force_mac_fc_generic(hw);
+}
+
+/**
+ * igc_check_for_link - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 igc_check_for_link(struct igc_hw *hw)
+{
+ if (hw->mac.ops.check_for_link)
+ return hw->mac.ops.check_for_link(hw);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_reset_hw - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 igc_reset_hw(struct igc_hw *hw)
+{
+ if (hw->mac.ops.reset_hw)
+ return hw->mac.ops.reset_hw(hw);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_init_hw - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation. This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 igc_init_hw(struct igc_hw *hw)
+{
+ if (hw->mac.ops.init_hw)
+ return hw->mac.ops.init_hw(hw);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_setup_link - Configures link and flow control
+ * @hw: pointer to the HW structure
+ *
+ * This configures link and flow control settings for the adapter. This
+ * is a function pointer entry point called by drivers. While modules can
+ * also call this, they probably call their own version of this function.
+ **/
+s32 igc_setup_link(struct igc_hw *hw)
+{
+ if (hw->mac.ops.setup_link)
+ return hw->mac.ops.setup_link(hw);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_get_speed_and_duplex - Returns current speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to a 16-bit value to store the speed
+ * @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ * This returns the speed and duplex of the adapter in the two 'out'
+ * variables passed in. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 igc_get_speed_and_duplex(struct igc_hw *hw, u16 *speed, u16 *duplex)
+{
+ if (hw->mac.ops.get_link_up_info)
+ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_disable_pcie_master - Disable PCI-Express master access
+ * @hw: pointer to the HW structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. Currently no func pointer exists and all implementations are
+ * handled in the generic version of this function.
+ **/
+s32 igc_disable_pcie_master(struct igc_hw *hw)
+{
+ return igc_disable_pcie_master_generic(hw);
+}
+
+/**
+ * igc_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+void igc_config_collision_dist(struct igc_hw *hw)
+{
+ if (hw->mac.ops.config_collision_dist)
+ hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ * igc_rar_set - Sets a receive address register
+ * @hw: pointer to the HW structure
+ * @addr: address to set the RAR to
+ * @index: the RAR to set
+ *
+ * Sets a Receive Address Register (RAR) to the specified address.
+ **/
+int igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
+{
+ if (hw->mac.ops.rar_set)
+ return hw->mac.ops.rar_set(hw, addr, index);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ * @hw: pointer to the HW structure
+ *
+ * Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 igc_validate_mdi_setting(struct igc_hw *hw)
+{
+ if (hw->mac.ops.validate_mdi_setting)
+ return hw->mac.ops.validate_mdi_setting(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_hash_mc_addr - Determines address location in multicast table
+ * @hw: pointer to the HW structure
+ * @mc_addr: Multicast address to hash.
+ *
+ * This hashes an address to determine its location in the multicast
+ * table. Currently no func pointer exists and all implementations
+ * are handled in the generic version of this function.
+ **/
+u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
+{
+ return igc_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ * igc_check_reset_block - Verifies PHY can be reset
+ * @hw: pointer to the HW structure
+ *
+ * Checks if the PHY is in a state that can be reset or if manageability
+ * has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 igc_check_reset_block(struct igc_hw *hw)
+{
+ if (hw->phy.ops.check_reset_block)
+ return hw->phy.ops.check_reset_block(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_read_phy_reg - Reads PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the buffer to store the 16-bit read.
+ *
+ * Reads the PHY register and returns the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_write_phy_reg - Writes PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 igc_write_phy_reg(struct igc_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_release_phy - Generic release PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return if silicon family does not require a semaphore when accessing the
+ * PHY.
+ **/
+void igc_release_phy(struct igc_hw *hw)
+{
+ if (hw->phy.ops.release)
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * igc_acquire_phy - Generic acquire PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return success if silicon family does not require a semaphore when
+ * accessing the PHY.
+ **/
+s32 igc_acquire_phy(struct igc_hw *hw)
+{
+ if (hw->phy.ops.acquire)
+ return hw->phy.ops.acquire(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_get_phy_info - Retrieves PHY information from registers
+ * @hw: pointer to the HW structure
+ *
+ * This function gets some information from various PHY registers and
+ * populates hw->phy values with it. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 igc_get_phy_info(struct igc_hw *hw)
+{
+ if (hw->phy.ops.get_info)
+ return hw->phy.ops.get_info(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_phy_hw_reset - Hard PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a hard PHY reset. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 igc_phy_hw_reset(struct igc_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_phy_commit - Soft PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a soft PHY reset on those that apply. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 igc_phy_commit(struct igc_hw *hw)
+{
+ if (hw->phy.ops.commit)
+ return hw->phy.ops.commit(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_set_d0_lplu_state - Sets low power link up state for D0
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D0
+ * and SmartSpeed is disabled when active is true, else clear lplu for D0
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 igc_set_d0_lplu_state(struct igc_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d0_lplu_state)
+ return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 igc_set_d3_lplu_state(struct igc_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d3_lplu_state)
+ return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_read_mac_addr - Reads MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MAC address out of the adapter and stores it in the HW structure.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 igc_read_mac_addr(struct igc_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return igc_read_mac_addr_generic(hw);
+}
+
+/**
+ * igc_read_pba_string - Read device part number string
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 igc_read_pba_string(struct igc_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return igc_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * igc_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Validates the NVM checksum is correct. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 igc_validate_nvm_checksum(struct igc_hw *hw)
+{
+ if (hw->nvm.ops.validate)
+ return hw->nvm.ops.validate(hw);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the NVM checksum. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+s32 igc_update_nvm_checksum(struct igc_hw *hw)
+{
+ if (hw->nvm.ops.update)
+ return hw->nvm.ops.update(hw);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_reload_nvm - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void igc_reload_nvm(struct igc_hw *hw)
+{
+ if (hw->nvm.ops.reload)
+ hw->nvm.ops.reload(hw);
+}
+
+/**
+ * igc_read_nvm - Reads NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to read
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 igc_read_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.read)
+ return hw->nvm.ops.read(hw, offset, words, data);
+
+ return -IGC_ERR_CONFIG;
+}
+
+/**
+ * igc_write_nvm - Writes to NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to write
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 igc_write_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.write)
+ return hw->nvm.ops.write(hw, offset, words, data);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void igc_power_up_phy(struct igc_hw *hw)
+{
+ if (hw->phy.ops.power_up)
+ hw->phy.ops.power_up(hw);
+
+ igc_setup_link(hw);
+}
+
+/**
+ * igc_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void igc_power_down_phy(struct igc_hw *hw)
+{
+ if (hw->phy.ops.power_down)
+ hw->phy.ops.power_down(hw);
+}
+
diff --git a/sys/dev/igc/igc_api.h b/sys/dev/igc/igc_api.h
new file mode 100644
index 000000000000..a0fc9ff21166
--- /dev/null
+++ b/sys/dev/igc/igc_api.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_API_H_
+#define _IGC_API_H_
+
+#include "igc_hw.h"
+
+extern void igc_init_function_pointers_i225(struct igc_hw *hw);
+
+s32 igc_set_mac_type(struct igc_hw *hw);
+s32 igc_setup_init_funcs(struct igc_hw *hw, bool init_device);
+s32 igc_init_mac_params(struct igc_hw *hw);
+s32 igc_init_nvm_params(struct igc_hw *hw);
+s32 igc_init_phy_params(struct igc_hw *hw);
+s32 igc_get_bus_info(struct igc_hw *hw);
+void igc_clear_vfta(struct igc_hw *hw);
+void igc_write_vfta(struct igc_hw *hw, u32 offset, u32 value);
+s32 igc_force_mac_fc(struct igc_hw *hw);
+s32 igc_check_for_link(struct igc_hw *hw);
+s32 igc_reset_hw(struct igc_hw *hw);
+s32 igc_init_hw(struct igc_hw *hw);
+s32 igc_setup_link(struct igc_hw *hw);
+s32 igc_get_speed_and_duplex(struct igc_hw *hw, u16 *speed, u16 *duplex);
+s32 igc_disable_pcie_master(struct igc_hw *hw);
+void igc_config_collision_dist(struct igc_hw *hw);
+int igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index);
+u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr);
+void igc_update_mc_addr_list(struct igc_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count);
+s32 igc_check_reset_block(struct igc_hw *hw);
+s32 igc_get_cable_length(struct igc_hw *hw);
+s32 igc_validate_mdi_setting(struct igc_hw *hw);
+s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data);
+s32 igc_write_phy_reg(struct igc_hw *hw, u32 offset, u16 data);
+s32 igc_get_phy_info(struct igc_hw *hw);
+void igc_release_phy(struct igc_hw *hw);
+s32 igc_acquire_phy(struct igc_hw *hw);
+s32 igc_phy_hw_reset(struct igc_hw *hw);
+s32 igc_phy_commit(struct igc_hw *hw);
+void igc_power_up_phy(struct igc_hw *hw);
+void igc_power_down_phy(struct igc_hw *hw);
+s32 igc_read_mac_addr(struct igc_hw *hw);
+s32 igc_read_pba_string(struct igc_hw *hw, u8 *pba_num, u32 pba_num_size);
+void igc_reload_nvm(struct igc_hw *hw);
+s32 igc_update_nvm_checksum(struct igc_hw *hw);
+s32 igc_validate_nvm_checksum(struct igc_hw *hw);
+s32 igc_read_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igc_write_nvm(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igc_set_d3_lplu_state(struct igc_hw *hw, bool active);
+s32 igc_set_d0_lplu_state(struct igc_hw *hw, bool active);
+
+#endif /* _IGC_API_H_ */
diff --git a/sys/dev/igc/igc_base.c b/sys/dev/igc/igc_base.c
new file mode 100644
index 000000000000..2029184ce66a
--- /dev/null
+++ b/sys/dev/igc/igc_base.c
@@ -0,0 +1,188 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "igc_hw.h"
+#include "igc_i225.h"
+#include "igc_mac.h"
+#include "igc_base.h"
+
+/**
+ * igc_acquire_phy_base - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY.
+ **/
+s32 igc_acquire_phy_base(struct igc_hw *hw)
+{
+ u16 mask = IGC_SWFW_PHY0_SM;
+
+ DEBUGFUNC("igc_acquire_phy_base");
+
+ if (hw->bus.func == IGC_FUNC_1)
+ mask = IGC_SWFW_PHY1_SM;
+
+ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ * igc_release_phy_base - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+void igc_release_phy_base(struct igc_hw *hw)
+{
+ u16 mask = IGC_SWFW_PHY0_SM;
+
+ DEBUGFUNC("igc_release_phy_base");
+
+ if (hw->bus.func == IGC_FUNC_1)
+ mask = IGC_SWFW_PHY1_SM;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * igc_init_hw_base - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+s32 igc_init_hw_base(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ DEBUGFUNC("igc_init_hw_base");
+
+ /* Setup the receive address */
+ igc_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ DEBUGOUT("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ igc_clear_hw_cntrs_base_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_power_down_phy_copper_base - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+void igc_power_down_phy_copper_base(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+
+ if (!(phy->ops.check_reset_block))
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (phy->ops.check_reset_block(hw))
+ igc_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
+ * @hw: pointer to the HW structure
+ *
+ * After Rx enable, if manageability is enabled then there is likely some
+ * bad data at the start of the FIFO and possibly in the DMA FIFO. This
+ * function clears the FIFOs and flushes any packets that came in as Rx was
+ * being enabled.
+ **/
+void igc_rx_fifo_flush_base(struct igc_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ DEBUGFUNC("igc_rx_fifo_flush_base");
+
+ /* disable IPv6 options as per hardware errata */
+ rfctl = IGC_READ_REG(hw, IGC_RFCTL);
+ rfctl |= IGC_RFCTL_IPV6_EX_DIS;
+ IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
+
+ if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i));
+ IGC_WRITE_REG(hw, IGC_RXDCTL(i),
+ rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ msec_delay(1);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i));
+ if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ DEBUGOUT("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
+
+ rlpml = IGC_READ_REG(hw, IGC_RLPML);
+ IGC_WRITE_REG(hw, IGC_RLPML, 0);
+
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+ temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
+ temp_rctl |= IGC_RCTL_LPE;
+
+ IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl);
+ IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN);
+ IGC_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]);
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ IGC_WRITE_FLUSH(hw);
+
+ IGC_WRITE_REG(hw, IGC_RLPML, rlpml);
+ IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ IGC_READ_REG(hw, IGC_ROC);
+ IGC_READ_REG(hw, IGC_RNBC);
+ IGC_READ_REG(hw, IGC_MPC);
+}
diff --git a/sys/dev/igc/igc_base.h b/sys/dev/igc/igc_base.h
new file mode 100644
index 000000000000..fa5356baf096
--- /dev/null
+++ b/sys/dev/igc/igc_base.h
@@ -0,0 +1,131 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_BASE_H_
+#define _IGC_BASE_H_
+
+/* forward declaration */
+s32 igc_init_hw_base(struct igc_hw *hw);
+void igc_power_down_phy_copper_base(struct igc_hw *hw);
+extern void igc_rx_fifo_flush_base(struct igc_hw *hw);
+s32 igc_acquire_phy_base(struct igc_hw *hw);
+void igc_release_phy_base(struct igc_hw *hw);
+
+/* Transmit Descriptor - Advanced */
+union igc_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Context descriptors */
+struct igc_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ union {
+ __le32 launch_time;
+ __le32 seqnum_seed;
+ };
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define IGC_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
+#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
+#define IGC_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
+#define IGC_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IGC_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IGC_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IGC_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st & Last TSO-full iSCSI PDU*/
+#define IGC_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define IGC_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Advanced Transmit Context Descriptor Config */
+#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IGC_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IGC_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IGC_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IGC_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define IGC_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+/* Req requires Markers and CRC */
+#define IGC_ADVTXD_TUCMD_MKRREQ 0x00002000
+#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define IGC_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define IGC_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
+
+#define IGC_RAR_ENTRIES_BASE 16
+
+/* Receive Descriptor - Advanced */
+union igc_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Additional Transmit Descriptor Control definitions */
+#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+
+/* Additional Receive Descriptor Control definitions */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+
+#endif /* _IGC_BASE_H_ */
diff --git a/sys/dev/igc/igc_defines.h b/sys/dev/igc/igc_defines.h
new file mode 100644
index 000000000000..6ac9d480e7ba
--- /dev/null
+++ b/sys/dev/igc/igc_defines.h
@@ -0,0 +1,1347 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_DEFINES_H_
+#define _IGC_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IGC_WUC_APME 0x00000001 /* APM Enable */
+#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IGC_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IGC_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define IGC_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IGC_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+
+/* Wake Up Status */
+#define IGC_WUS_LNKC IGC_WUFC_LNKC
+#define IGC_WUS_MAG IGC_WUFC_MAG
+#define IGC_WUS_EX IGC_WUFC_EX
+#define IGC_WUS_MC IGC_WUFC_MC
+#define IGC_WUS_BC IGC_WUFC_BC
+
+/* Packet types that are enabled for wake packet delivery */
+#define WAKE_PKT_WUS ( \
+ IGC_WUS_EX | \
+ IGC_WUS_ARPD | \
+ IGC_WUS_IPV4 | \
+ IGC_WUS_IPV6 | \
+ IGC_WUS_NSD)
+
+/* Wake Up Packet Length */
+#define IGC_WUPL_MASK 0x00000FFF
+
+/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
+#define IGC_WUPM_BYTES 128
+
+#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */
+#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */
+#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */
+#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
+
+/* Extended Device Control */
+#define IGC_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
+#define IGC_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
+#define IGC_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
+#define IGC_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
+#define IGC_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
+#define IGC_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+#define IGC_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define IGC_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define IGC_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IGC_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
+#define IGC_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define IGC_CTRL_EXT_EIAME 0x01000000
+#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+#define IGC_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define IGC_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define IGC_CTRL_EXT_PHYPDEN 0x00100000
+#define IGC_IVAR_VALID 0x80
+#define IGC_GPIE_NSICR 0x00000001
+#define IGC_GPIE_MSIX_MODE 0x00000010
+#define IGC_GPIE_EIAME 0x40000000
+#define IGC_GPIE_PBA 0x80000000
+
+/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define IGC_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IGC_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IGC_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define IGC_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IGC_RXD_ERR_CE 0x01 /* CRC Error */
+#define IGC_RXD_ERR_SE 0x02 /* Symbol Error */
+#define IGC_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define IGC_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define IGC_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define IGC_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define IGC_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define IGC_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define IGC_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
+#define IGC_RXDEXT_STATERR_LB 0x00040000
+#define IGC_RXDEXT_STATERR_L4E 0x20000000
+#define IGC_RXDEXT_STATERR_IPE 0x40000000
+#define IGC_RXDEXT_STATERR_RXE 0x80000000
+
+/* Same mask, but for extended and packet split descriptors */
+#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ IGC_RXDEXT_STATERR_CE | \
+ IGC_RXDEXT_STATERR_SE | \
+ IGC_RXDEXT_STATERR_SEQ | \
+ IGC_RXDEXT_STATERR_CXE | \
+ IGC_RXDEXT_STATERR_RXE)
+
+#if !defined(EXTERNAL_RELEASE) || defined(IGCE_MQ)
+#define IGC_MRQC_ENABLE_RSS_2Q 0x00000001
+#endif /* !EXTERNAL_RELEASE || IGCE_MQ */
+#define IGC_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+#define IGC_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+/* Management Control */
+#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define IGC_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define IGC_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define IGC_MANC_EN_MNG2HOST 0x00200000
+
+#define IGC_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define IGC_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define IGC_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define IGC_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define IGC_RCTL_RST 0x00000001 /* Software reset */
+#define IGC_RCTL_EN 0x00000002 /* enable */
+#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
+#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
+#define IGC_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define IGC_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define IGC_RCTL_RDMTS_HEX 0x00010000
+#define IGC_RCTL_RDMTS1_HEX IGC_RCTL_RDMTS_HEX
+#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define IGC_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if IGC_RCTL_BSEX is 0 */
+#define IGC_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define IGC_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define IGC_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if IGC_RCTL_BSEX is 1 */
+#define IGC_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define IGC_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define IGC_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define IGC_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define IGC_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
+#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define IGC_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> IGC_PSRCTL_BSIZE0_SHIFT) &
+ * IGC_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> IGC_PSRCTL_BSIZE1_SHIFT) &
+ * IGC_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << IGC_PSRCTL_BSIZE2_SHIFT) &
+ * IGC_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << IGC_PSRCTL_BSIZE3_SHIFT) |;
+ * IGC_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define IGC_PSRCTL_BSIZE0_MASK 0x0000007F
+#define IGC_PSRCTL_BSIZE1_MASK 0x00003F00
+#define IGC_PSRCTL_BSIZE2_MASK 0x003F0000
+#define IGC_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define IGC_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define IGC_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define IGC_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define IGC_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define IGC_SWFW_EEP_SM 0x01
+#define IGC_SWFW_PHY0_SM 0x02
+#define IGC_SWFW_PHY1_SM 0x04
+#define IGC_SWFW_CSR_SM 0x08
+#define IGC_SWFW_SW_MNG_SM 0x400
+
+/* Device Control */
+#define IGC_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define IGC_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define IGC_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define IGC_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define IGC_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define IGC_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define IGC_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define IGC_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define IGC_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define IGC_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define IGC_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define IGC_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define IGC_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define IGC_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
+#define IGC_CTRL_RST 0x04000000 /* Global reset */
+#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+
+
+#define IGC_CONNSW_AUTOSENSE_EN 0x1
+#define IGC_PCS_LCTL_FORCE_FCTRL 0x80
+
+#define IGC_PCS_LSTS_AN_COMPLETE 0x10000
+
+/* Device Status */
+#define IGC_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
+#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define IGC_STATUS_FUNC_SHIFT 2
+#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define IGC_STATUS_SPEED_MASK 0x000000C0
+#define IGC_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s indication for I225 */
+#define IGC_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
+#define IGC_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+#define IGC_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+#define IGC_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
+#define IGC_STATUS_PCIM_STATE 0x40000000 /* PCIm function state */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+#define ADVERTISE_2500_HALF 0x0040 /* NOT used, just FYI */
+#define ADVERTISE_2500_FULL 0x0080
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define IGC_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define IGC_ALL_SPEED_DUPLEX_2500 ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL)
+#define IGC_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define IGC_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define IGC_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define IGC_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT IGC_ALL_SPEED_DUPLEX
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500
+
+/* LED Control */
+#define IGC_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define IGC_LEDCTL_LED0_MODE_SHIFT 0
+#define IGC_LEDCTL_LED0_IVRT 0x00000040
+#define IGC_LEDCTL_LED0_BLINK 0x00000080
+
+#define IGC_LEDCTL_MODE_LED_ON 0xE
+#define IGC_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
+#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define IGC_TCTL_EN 0x00000002 /* enable Tx */
+#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
+#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
+#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
+#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define IGC_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define IGC_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define IGC_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
+
+/* Receive Checksum Control */
+#define IGC_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define IGC_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define IGC_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* GPY211 - I225 defines */
+#define GPY_MMD_MASK 0xFFFF0000
+#define GPY_MMD_SHIFT 16
+#define GPY_REG_MASK 0x0000FFFF
+/* Header split receive */
+#define IGC_RFCTL_NFSW_DIS 0x00000040
+#define IGC_RFCTL_NFSR_DIS 0x00000080
+#define IGC_RFCTL_ACK_DIS 0x00001000
+#define IGC_RFCTL_EXTEN 0x00008000
+#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
+#define IGC_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define IGC_RFCTL_LEF 0x00040000
+
+/* Collision related configuration parameters */
+#define IGC_CT_SHIFT 4
+#define IGC_COLLISION_THRESHOLD 15
+#define IGC_COLLISION_DISTANCE 63
+#define IGC_COLD_SHIFT 12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define IGC_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define IGC_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define IGC_TIPG_IPGR2_SHIFT 20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE MJUM9BYTES
+#define IGC_TX_PTR_GAP 0x1F
+
+/* Extended Configuration Control and Size */
+#define IGC_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define IGC_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define IGC_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define IGC_EXTCNF_CTRL_SWFLAG 0x00000020
+#define IGC_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define IGC_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define IGC_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define IGC_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define IGC_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define IGC_PHY_CTRL_D0A_LPLU 0x00000002
+#define IGC_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define IGC_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define IGC_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define IGC_KABGTXD_BGSQLBIAS 0x00050000
+
+/* PBA constants */
+#define IGC_PBA_8K 0x0008 /* 8KB */
+#define IGC_PBA_10K 0x000A /* 10KB */
+#define IGC_PBA_12K 0x000C /* 12KB */
+#define IGC_PBA_14K 0x000E /* 14KB */
+#define IGC_PBA_16K 0x0010 /* 16KB */
+#define IGC_PBA_18K 0x0012
+#define IGC_PBA_20K 0x0014
+#define IGC_PBA_22K 0x0016
+#define IGC_PBA_24K 0x0018
+#define IGC_PBA_26K 0x001A
+#define IGC_PBA_30K 0x001E
+#define IGC_PBA_32K 0x0020
+#define IGC_PBA_34K 0x0022
+#define IGC_PBA_35K 0x0023
+#define IGC_PBA_38K 0x0026
+#define IGC_PBA_40K 0x0028
+#define IGC_PBA_48K 0x0030 /* 48KB */
+#define IGC_PBA_64K 0x0040 /* 64KB */
+
+#define IGC_PBA_RXA_MASK 0xFFFF
+
+#define IGC_PBS_16K IGC_PBA_16K
+
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define IGC_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define IGC_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define IGC_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define IGC_PBECCSTS_ECC_ENABLE 0x00010000
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IGC_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define IGC_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define IGC_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define IGC_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define IGC_ICR_LSC 0x00000004 /* Link Status Change */
+#define IGC_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define IGC_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define IGC_ICR_RXO 0x00000040 /* Rx overrun */
+#define IGC_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define IGC_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
+#define IGC_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define IGC_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define IGC_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define IGC_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define IGC_ICR_TXD_LOW 0x00008000
+#define IGC_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
+#define IGC_ICR_TS 0x00080000 /* Time Sync Interrupt */
+#define IGC_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define IGC_ICR_INT_ASSERTED 0x80000000
+#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define IGC_ICR_FER 0x00400000 /* Fatal Error */
+
+
+
+/* Extended Interrupt Cause Read */
+#define IGC_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define IGC_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define IGC_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define IGC_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define IGC_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define IGC_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define IGC_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define IGC_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define IGC_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IGC_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define IGC_TCPTIMER_KS 0x00000100 /* KickStart */
+#define IGC_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
+#define IGC_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
+#define IGC_TCPTIMER_LOOP 0x00000800 /* Loop */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ IGC_IMS_RXT0 | \
+ IGC_IMS_TXDW | \
+ IGC_IMS_RXDMT0 | \
+ IGC_IMS_RXSEQ | \
+ IGC_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */
+#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */
+#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */
+#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
+#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
+#define IGC_IMS_RXO IGC_ICR_RXO /* Rx overrun */
+#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
+#define IGC_IMS_TXD_LOW IGC_ICR_TXD_LOW
+#define IGC_IMS_ECCER IGC_ICR_ECCER /* Uncorrectable ECC Error */
+#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */
+#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
+#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define IGC_IMS_FER IGC_ICR_FER /* Fatal Error */
+
+#define IGC_IMS_THS IGC_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define IGC_IMS_MDDET IGC_ICR_MDDET /* Malicious Driver Detect */
+/* Extended Interrupt Mask Set */
+#define IGC_EIMS_RX_QUEUE0 IGC_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define IGC_EIMS_RX_QUEUE1 IGC_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define IGC_EIMS_RX_QUEUE2 IGC_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define IGC_EIMS_RX_QUEUE3 IGC_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define IGC_EIMS_TX_QUEUE0 IGC_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define IGC_EIMS_TX_QUEUE1 IGC_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define IGC_EIMS_TX_QUEUE2 IGC_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define IGC_EIMS_TX_QUEUE3 IGC_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define IGC_EIMS_TCP_TIMER IGC_EICR_TCP_TIMER /* TCP Timer */
+#define IGC_EIMS_OTHER IGC_EICR_OTHER /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
+#define IGC_ICS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */
+#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+
+/* Extended Interrupt Cause Set */
+#define IGC_EICS_RX_QUEUE0 IGC_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define IGC_EICS_RX_QUEUE1 IGC_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define IGC_EICS_RX_QUEUE2 IGC_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define IGC_EICS_RX_QUEUE3 IGC_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define IGC_EICS_TX_QUEUE0 IGC_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define IGC_EICS_TX_QUEUE1 IGC_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define IGC_EICS_TX_QUEUE2 IGC_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define IGC_EICS_TX_QUEUE3 IGC_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define IGC_EICS_TCP_TIMER IGC_EICR_TCP_TIMER /* TCP Timer */
+#define IGC_EICS_OTHER IGC_EICR_OTHER /* Interrupt Cause Active */
+
+#define IGC_EITR_ITR_INT_MASK 0x0000FFFF
+#define IGC_EITR_INTERVAL 0x00007FFC
+/* IGC_EITR_CNT_IGNR is only for 82576 and newer */
+#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+
+/* Transmit Descriptor Control */
+#define IGC_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define IGC_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define IGC_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define IGC_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define IGC_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define IGC_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define IGC_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define IGC_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define IGC_RAR_ENTRIES 15
+#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define IGC_RAL_MAC_ADDR_LEN 4
+#define IGC_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define IGC_SUCCESS 0
+#define IGC_ERR_NVM 1
+#define IGC_ERR_PHY 2
+#define IGC_ERR_CONFIG 3
+#define IGC_ERR_PARAM 4
+#define IGC_ERR_MAC_INIT 5
+#define IGC_ERR_PHY_TYPE 6
+#define IGC_ERR_RESET 9
+#define IGC_ERR_MASTER_REQUESTS_PENDING 10
+#define IGC_ERR_HOST_INTERFACE_COMMAND 11
+#define IGC_BLK_PHY_RESET 12
+#define IGC_ERR_SWFW_SYNC 13
+#define IGC_NOT_IMPLEMENTED 14
+#define IGC_ERR_MBX 15
+#define IGC_ERR_INVALID_ARGUMENT 16
+#define IGC_ERR_NO_SPACE 17
+#define IGC_ERR_NVM_PBA_SECTION 18
+#define IGC_ERR_INVM_VALUE_NOT_FOUND 20
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define IGC_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define IGC_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define IGC_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define IGC_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define IGC_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define IGC_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define IGC_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define IGC_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define IGC_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define IGC_RXCW_IV 0x08000000 /* Receive config invalid */
+#define IGC_RXCW_C 0x20000000 /* Receive config */
+#define IGC_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */
+#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IGC_TSYNCRXCTL_TYPE_ALL 0x08
+#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define IGC_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define IGC_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define IGC_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define IGC_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define IGC_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define IGC_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define IGC_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define IGC_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define IGC_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define IGC_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define IGC_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define IGC_TIMINCA_16NS_SHIFT 24
+#define IGC_TIMINCA_INCPERIOD_SHIFT 24
+#define IGC_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+/* Time Sync Interrupt Cause/Mask Register Bits */
+#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
+#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */
+#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */
+
+#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
+
+/* Extended Device Control */
+#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
+
+/* ETQF register bit definitions */
+#define IGC_ETQF_1588 (1 << 30)
+#define IGC_FTQF_VF_BP 0x00008000
+#define IGC_FTQF_1588_TIME_STAMP 0x08000000
+#define IGC_FTQF_MASK 0xF0000000
+#define IGC_FTQF_MASK_PROTO_BP 0x10000000
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_TSICR_TXTS 0x00000002
+#define IGC_TSIM_TXTS 0x00000002
+/* TUPLE Filtering Configuration */
+#define IGC_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
+#define IGC_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
+#define IGC_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with IGC_TTQF_PROTOCOL SHIFT */
+#define IGC_TTQF_PROTOCOL_TCP 0x0
+/* TTQF UDP Bit, shift with IGC_TTQF_PROTOCOL_SHIFT */
+#define IGC_TTQF_PROTOCOL_UDP 0x1
+/* TTQF SCTP Bit, shift with IGC_TTQF_PROTOCOL_SHIFT */
+#define IGC_TTQF_PROTOCOL_SCTP 0x2
+#define IGC_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
+#define IGC_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
+#define IGC_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
+#define IGC_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
+#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+#define IGC_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define IGC_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define IGC_MDICNFG_PHY_MASK 0x03E00000
+#define IGC_MDICNFG_PHY_SHIFT 21
+
+#define IGC_MEDIA_PORT_COPPER 1
+#define IGC_MEDIA_PORT_OTHER 2
+#define IGC_M88E1112_AUTO_COPPER_SGMII 0x2
+#define IGC_M88E1112_AUTO_COPPER_BASEX 0x3
+#define IGC_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define IGC_M88E1112_MAC_CTRL_1 0x10
+#define IGC_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define IGC_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define IGC_M88E1112_PAGE_ADDR 0x16
+#define IGC_M88E1112_STATUS 0x01
+
+#define IGC_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
+#define IGC_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
+#define IGC_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
+#define IGC_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define IGC_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
+
+/* EEE defines */
+#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */
+#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
+#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
+#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
+/* EEE status */
+#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define IGC_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
+#define IGC_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
+#define IGC_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define IGC_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define IGC_M88E1543_EEE_CTRL_1 0x0
+#define IGC_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define IGC_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */
+#define IGC_EEE_ADV_DEV_I354 7
+#define IGC_EEE_ADV_ADDR_I354 60
+#define IGC_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define IGC_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define IGC_PCS_STATUS_DEV_I354 3
+#define IGC_PCS_STATUS_ADDR_I354 1
+#define IGC_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define IGC_PCS_STATUS_TX_LPI_RCVD 0x0800
+#define IGC_M88E1512_CFG_REG_1 0x0010
+#define IGC_M88E1512_CFG_REG_2 0x0011
+#define IGC_M88E1512_CFG_REG_3 0x0007
+#define IGC_M88E1512_MODE 0x0014
+#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+#define IGC_EEE_LP_ADV_DEV_I225 7 /* EEE LP Adv Device */
+#define IGC_EEE_LP_ADV_ADDR_I225 61 /* EEE LP Adv Register */
+
+#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+/* 1=Repeater/switch device port 0=DTE device */
+#define CR_1000T_REPEATER_DTE 0x0400
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define CR_1000T_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define CR_1000T_MS_ENABLE 0x1000
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+/* PHY GPY 211 registers */
+#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
+#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
+#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
+#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
+
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define IGC_EECD_SK 0x00000001 /* NVM Clock */
+#define IGC_EECD_CS 0x00000002 /* NVM Chip Select */
+#define IGC_EECD_DI 0x00000004 /* NVM Data In */
+#define IGC_EECD_DO 0x00000008 /* NVM Data Out */
+#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */
+#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define IGC_EECD_PRES 0x00000100 /* NVM Present */
+#define IGC_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define IGC_EECD_ADDR_BITS 0x00000400
+#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define IGC_EECD_SIZE_EX_SHIFT 11
+#define IGC_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define IGC_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
+#define IGC_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define IGC_EECD_SEC1VAL_VALID_MASK (IGC_EECD_AUTO_RD | IGC_EECD_PRES)
+
+#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */
+#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done */
+#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */
+#define IGC_FLUDONE_ATTEMPTS 20000
+#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+#define IGC_EECD_SEC1VAL_I225 0x02000000 /* Sector One Valid */
+#define IGC_FLSECU_BLK_SW_ACCESS_I225 0x00000004 /* Block SW access */
+#define IGC_FWSM_FW_VALID_I225 0x8000 /* FW valid bit */
+
+#define IGC_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
+#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define IGC_NVM_RW_REG_START 1 /* Start operation */
+#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IGC_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define IGC_FLASH_UPDATES 2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+
+#define IGC_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define IGC_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define IGC_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+#define PCIX_STATUS_LO_FUNC_MASK 0x7
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+#define PCIE_LINK_SPEED_MASK 0x0F
+#define PCIE_LINK_SPEED_2500 0x01
+#define PCIE_LINK_SPEED_5000 0x02
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs.
+ * I = Integrated
+ * E = External
+ */
+#define M88IGC_E_PHY_ID 0x01410C50
+#define M88IGC_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01IGC_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03IGC_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define I225_I_PHY_ID 0x67C9DC00
+
+/* M88IGC Specific Registers */
+#define M88IGC_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
+#define M88IGC_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
+#define M88IGC_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
+#define M88IGC_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88IGC_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
+#define M88IGC_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
+
+/* M88IGC PHY Specific Control Register */
+#define M88IGC_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
+#define M88IGC_PSCR_MDI_MANUAL_MODE 0x0000
+#define M88IGC_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88IGC_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88IGC_PSCR_AUTO_X_MODE 0x0060
+#define M88IGC_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
+
+/* M88IGC PHY Specific Status Register */
+#define M88IGC_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88IGC_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88IGC_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88IGC_PSSR_CABLE_LENGTH 0x0380
+#define M88IGC_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88IGC_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88IGC_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88IGC_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88IGC_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88IGC_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88IGC_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88IGC_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88IGC_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88IGC_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
+#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
+#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
+
+/* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
+
+#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+/* Kumeran Mode Control */
+#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
+#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
+
+/* MDI Control */
+#define IGC_MDIC_DATA_MASK 0x0000FFFF
+#define IGC_MDIC_INT_EN 0x20000000
+#define IGC_MDIC_REG_MASK 0x001F0000
+#define IGC_MDIC_REG_SHIFT 16
+#define IGC_MDIC_PHY_SHIFT 21
+#define IGC_MDIC_OP_WRITE 0x04000000
+#define IGC_MDIC_OP_READ 0x08000000
+#define IGC_MDIC_READY 0x10000000
+#define IGC_MDIC_ERROR 0x40000000
+
+#define IGC_N0_QUEUE -1
+
+#define IGC_MAX_MAC_HDR_LEN 127
+#define IGC_MAX_NETWORK_HDR_LEN 511
+
+#define IGC_VLANPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4))
+#define IGC_VLANPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define IGC_VLANPQF_QUEUE_MASK 0x03
+#define IGC_VFTA_BLOCK_SIZE 8
+/* SerDes Control */
+#define IGC_GEN_POLL_TIMEOUT 640
+
+/* DMA Coalescing register fields */
+/* DMA Coalescing Watchdog Timer */
+#define IGC_DMACR_DMACWT_MASK 0x00003FFF
+/* DMA Coalescing Rx Threshold */
+#define IGC_DMACR_DMACTHR_MASK 0x00FF0000
+#define IGC_DMACR_DMACTHR_SHIFT 16
+/* Lx when no PCIe transactions */
+#define IGC_DMACR_DMAC_LX_MASK 0x30000000
+#define IGC_DMACR_DMAC_LX_SHIFT 28
+#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define IGC_DMACR_DC_BMC2OSW_EN 0x00008000
+
+/* DMA Coalescing Transmit Threshold */
+#define IGC_DMCTXTH_DMCTTHR_MASK 0x00000FFF
+
+#define IGC_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+/* Rx Traffic Rate Threshold */
+#define IGC_DMCRTRH_UTRESH_MASK 0x0007FFFF
+/* Rx packet rate in current window */
+#define IGC_DMCRTRH_LRPRCW 0x80000000
+
+/* DMA Coal Rx Traffic Current Count */
+#define IGC_DMCCNT_CCOUNT_MASK 0x01FFFFFF
+
+/* Flow ctrl Rx Threshold High val */
+#define IGC_FCRTC_RTH_COAL_MASK 0x0003FFF0
+#define IGC_FCRTC_RTH_COAL_SHIFT 4
+/* Lx power decision based on DMA coal */
+#define IGC_PCIEMISC_LX_DECISION 0x00000080
+
+#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define IGC_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
+#define IGC_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
+#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */
+/* Minimum time for 1000BASE-T where no data will be transmit following move out
+ * of EEE LPI Tx state
+ */
+#define IGC_TW_SYSTEM_1000_MASK 0x000000FF
+/* Minimum time for 100BASE-T where no data will be transmit following move out
+ * of EEE LPI Tx state
+ */
+#define IGC_TW_SYSTEM_100_MASK 0x0000FF00
+#define IGC_TW_SYSTEM_100_SHIFT 8
+#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */
+#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */
+#define IGC_LTRMINV_SCALE_MASK 0x00001C00 /* LTR minimum scale */
+#define IGC_LTRMINV_SCALE_SHIFT 10
+/* Reg val to set scale to 1024 nsec */
+#define IGC_LTRMINV_SCALE_1024 2
+/* Reg val to set scale to 32768 nsec */
+#define IGC_LTRMINV_SCALE_32768 3
+#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */
+#define IGC_LTRMAXV_SCALE_MASK 0x00001C00 /* LTR maximum scale */
+#define IGC_LTRMAXV_SCALE_SHIFT 10
+/* Reg val to set scale to 1024 nsec */
+#define IGC_LTRMAXV_SCALE_1024 2
+/* Reg val to set scale to 32768 nsec */
+#define IGC_LTRMAXV_SCALE_32768 3
+#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */
+
+#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */
+#define IGC_TXPB0S_SIZE_I225_MASK 0x0000003F /* Tx packet buffer 0 size */
+#define IGC_STM_OPCODE 0xDB00
+#define IGC_EEPROM_FLASH_SIZE_WORD 0x11
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+ (u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+ (u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+ (u16)(((invm_dword) & 0xFFFF0000) >> 16)
+#define IGC_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
+#define IGC_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define IGC_INVM_ULT_BYTES_SIZE 8
+#define IGC_INVM_RECORD_SIZE_IN_BYTES 4
+#define IGC_INVM_VER_FIELD_ONE 0x1FF8
+#define IGC_INVM_VER_FIELD_TWO 0x7FE000
+#define IGC_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define IGC_INVM_MAJOR_MASK 0x3F0
+#define IGC_INVM_MINOR_MASK 0xF
+#define IGC_INVM_MAJOR_SHIFT 4
+
+/* PLL Defines */
+#define IGC_PCI_PMCSR 0x44
+#define IGC_PCI_PMCSR_D3 0x03
+#define IGC_MAX_PLL_TRIES 5
+#define IGC_PHY_PLL_UNCONF 0xFF
+#define IGC_PHY_PLL_FREQ_PAGE 0xFC0000
+#define IGC_PHY_PLL_FREQ_REG 0x000E
+#define IGC_INVM_DEFAULT_AL 0x202F
+#define IGC_INVM_AUTOLOAD 0x0A
+#define IGC_INVM_PLL_WO_VAL 0x0010
+
+/* Proxy Filter Control Extended */
+#define IGC_PROXYFCEX_MDNS 0x00000001 /* mDNS */
+#define IGC_PROXYFCEX_MDNS_M 0x00000002 /* mDNS Multicast */
+#define IGC_PROXYFCEX_MDNS_U 0x00000004 /* mDNS Unicast */
+#define IGC_PROXYFCEX_IPV4_M 0x00000008 /* IPv4 Multicast */
+#define IGC_PROXYFCEX_IPV6_M 0x00000010 /* IPv6 Multicast */
+#define IGC_PROXYFCEX_IGMP 0x00000020 /* IGMP */
+#define IGC_PROXYFCEX_IGMP_M 0x00000040 /* IGMP Multicast */
+#define IGC_PROXYFCEX_ARPRES 0x00000080 /* ARP Response */
+#define IGC_PROXYFCEX_ARPRES_D 0x00000100 /* ARP Response Directed */
+#define IGC_PROXYFCEX_ICMPV4 0x00000200 /* ICMPv4 */
+#define IGC_PROXYFCEX_ICMPV4_D 0x00000400 /* ICMPv4 Directed */
+#define IGC_PROXYFCEX_ICMPV6 0x00000800 /* ICMPv6 */
+#define IGC_PROXYFCEX_ICMPV6_D 0x00001000 /* ICMPv6 Directed */
+#define IGC_PROXYFCEX_DNS 0x00002000 /* DNS */
+
+/* Proxy Filter Control */
+#define IGC_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
+#define IGC_PROXYFC_EX 0x00000004 /* Directed exact proxy */
+#define IGC_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
+#define IGC_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
+#define IGC_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
+#define IGC_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
+#define IGC_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
+#define IGC_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define IGC_PROXYFC_NS_DIRECTED 0x00000400 /* Directed NS Proxy Ena */
+#define IGC_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
+/* Proxy Status */
+#define IGC_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
+
+/* Firmware Status */
+#define IGC_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
+/* VF Control */
+#define IGC_VTCTRL_RST 0x04000000 /* Reset VF */
+
+#define IGC_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
+/* Lan ID bit field offset in status register */
+#define IGC_STATUS_LAN_ID_OFFSET 2
+#define IGC_VFTA_ENTRIES 128
+
+#define IGC_UNUSEDARG
+#ifndef ERROR_REPORT
+#define ERROR_REPORT(fmt) do { } while (0)
+#endif /* ERROR_REPORT */
+#endif /* _IGC_DEFINES_H_ */
diff --git a/sys/dev/igc/igc_hw.h b/sys/dev/igc/igc_hw.h
new file mode 100644
index 000000000000..a07d2894f97a
--- /dev/null
+++ b/sys/dev/igc/igc_hw.h
@@ -0,0 +1,548 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_HW_H_
+#define _IGC_HW_H_
+
+#include "igc_osdep.h"
+#include "igc_regs.h"
+#include "igc_defines.h"
+
+struct igc_hw;
+
+#define IGC_DEV_ID_I225_LM 0x15F2
+#define IGC_DEV_ID_I225_V 0x15F3
+#define IGC_DEV_ID_I225_K 0x3100
+#define IGC_DEV_ID_I225_I 0x15F8
+#define IGC_DEV_ID_I220_V 0x15F7
+#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I225_LMVP 0x5502
+#define IGC_DEV_ID_I226_K 0x5504
+#define IGC_DEV_ID_I225_IT 0x0D9F
+#define IGC_DEV_ID_I226_LM 0x125B
+#define IGC_DEV_ID_I226_V 0x125C
+#define IGC_DEV_ID_I226_IT 0x125D
+#define IGC_DEV_ID_I221_V 0x125E
+#define IGC_DEV_ID_I226_BLANK_NVM 0x125F
+#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
+
+#define IGC_REVISION_0 0
+#define IGC_REVISION_1 1
+#define IGC_REVISION_2 2
+#define IGC_REVISION_3 3
+#define IGC_REVISION_4 4
+
+#define IGC_FUNC_1 1
+
+#define IGC_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define IGC_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
+enum igc_mac_type {
+ igc_undefined = 0,
+ igc_i225,
+ igc_num_macs /* List is 1-based, so subtract 1 for TRUE count. */
+};
+
+enum igc_media_type {
+ igc_media_type_unknown = 0,
+ igc_media_type_copper = 1,
+ igc_num_media_types
+};
+
+enum igc_nvm_type {
+ igc_nvm_unknown = 0,
+ igc_nvm_eeprom_spi,
+ igc_nvm_flash_hw,
+ igc_nvm_invm,
+};
+
+enum igc_phy_type {
+ igc_phy_unknown = 0,
+ igc_phy_none,
+ igc_phy_i225,
+};
+
+enum igc_bus_type {
+ igc_bus_type_unknown = 0,
+ igc_bus_type_pci,
+ igc_bus_type_pcix,
+ igc_bus_type_pci_express,
+ igc_bus_type_reserved
+};
+
+enum igc_bus_speed {
+ igc_bus_speed_unknown = 0,
+ igc_bus_speed_33,
+ igc_bus_speed_66,
+ igc_bus_speed_100,
+ igc_bus_speed_120,
+ igc_bus_speed_133,
+ igc_bus_speed_2500,
+ igc_bus_speed_5000,
+ igc_bus_speed_reserved
+};
+
+enum igc_bus_width {
+ igc_bus_width_unknown = 0,
+ igc_bus_width_pcie_x1,
+ igc_bus_width_pcie_x2,
+ igc_bus_width_pcie_x4 = 4,
+ igc_bus_width_pcie_x8 = 8,
+ igc_bus_width_32,
+ igc_bus_width_64,
+ igc_bus_width_reserved
+};
+
+enum igc_fc_mode {
+ igc_fc_none = 0,
+ igc_fc_rx_pause,
+ igc_fc_tx_pause,
+ igc_fc_full,
+ igc_fc_default = 0xFF
+};
+
+enum igc_ms_type {
+ igc_ms_hw_default = 0,
+ igc_ms_force_master,
+ igc_ms_force_slave,
+ igc_ms_auto
+};
+
+enum igc_smart_speed {
+ igc_smart_speed_default = 0,
+ igc_smart_speed_on,
+ igc_smart_speed_off
+};
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+/* Receive Descriptor */
+struct igc_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union igc_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union igc_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct igc_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct igc_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct igc_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct igc_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 tlpic;
+ u64 rlpic;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 iac;
+ u64 rxdmtc;
+ u64 htdpmc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+#include "igc_mac.h"
+#include "igc_phy.h"
+#include "igc_nvm.h"
+
+/* Function pointers for the MAC. */
+struct igc_mac_operations {
+ s32 (*init_params)(struct igc_hw *);
+ s32 (*check_for_link)(struct igc_hw *);
+ void (*clear_hw_cntrs)(struct igc_hw *);
+ void (*clear_vfta)(struct igc_hw *);
+ s32 (*get_bus_info)(struct igc_hw *);
+ void (*set_lan_id)(struct igc_hw *);
+ s32 (*get_link_up_info)(struct igc_hw *, u16 *, u16 *);
+ void (*update_mc_addr_list)(struct igc_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct igc_hw *);
+ s32 (*init_hw)(struct igc_hw *);
+ s32 (*setup_link)(struct igc_hw *);
+ s32 (*setup_physical_interface)(struct igc_hw *);
+ void (*write_vfta)(struct igc_hw *, u32, u32);
+ void (*config_collision_dist)(struct igc_hw *);
+ int (*rar_set)(struct igc_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct igc_hw *);
+ s32 (*validate_mdi_setting)(struct igc_hw *);
+ s32 (*acquire_swfw_sync)(struct igc_hw *, u16);
+ void (*release_swfw_sync)(struct igc_hw *, u16);
+};
+
+/* When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct igc_phy_operations {
+ s32 (*init_params)(struct igc_hw *);
+ s32 (*acquire)(struct igc_hw *);
+ s32 (*check_reset_block)(struct igc_hw *);
+ s32 (*commit)(struct igc_hw *);
+ s32 (*force_speed_duplex)(struct igc_hw *);
+ s32 (*get_info)(struct igc_hw *);
+ s32 (*set_page)(struct igc_hw *, u16);
+ s32 (*read_reg)(struct igc_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct igc_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct igc_hw *, u32, u16 *);
+ void (*release)(struct igc_hw *);
+ s32 (*reset)(struct igc_hw *);
+ s32 (*set_d0_lplu_state)(struct igc_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct igc_hw *, bool);
+ s32 (*write_reg)(struct igc_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct igc_hw *, u32, u16);
+ s32 (*write_reg_page)(struct igc_hw *, u32, u16);
+ void (*power_up)(struct igc_hw *);
+ void (*power_down)(struct igc_hw *);
+};
+
+/* Function pointers for the NVM. */
+struct igc_nvm_operations {
+ s32 (*init_params)(struct igc_hw *);
+ s32 (*acquire)(struct igc_hw *);
+ s32 (*read)(struct igc_hw *, u16, u16, u16 *);
+ void (*release)(struct igc_hw *);
+ void (*reload)(struct igc_hw *);
+ s32 (*update)(struct igc_hw *);
+ s32 (*validate)(struct igc_hw *);
+ s32 (*write)(struct igc_hw *, u16, u16, u16 *);
+};
+
+struct igc_info {
+ s32 (*get_invariants)(struct igc_hw *hw);
+ struct igc_mac_operations *mac_ops;
+ const struct igc_phy_operations *phy_ops;
+ struct igc_nvm_operations *nvm_ops;
+};
+
+extern const struct igc_info igc_i225_info;
+
+struct igc_mac_info {
+ struct igc_mac_operations ops;
+ u8 addr[ETH_ADDR_LEN];
+ u8 perm_addr[ETH_ADDR_LEN];
+
+ enum igc_mac_type type;
+
+ u32 mc_filter_type;
+
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+#define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool asf_firmware_present;
+ bool autoneg;
+ bool get_link_status;
+ u32 max_frame_size;
+};
+
+struct igc_phy_info {
+ struct igc_phy_operations ops;
+ enum igc_phy_type type;
+
+ enum igc_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum igc_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+
+ u8 mdix;
+
+ bool polarity_correction;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct igc_nvm_info {
+ struct igc_nvm_operations ops;
+ enum igc_nvm_type type;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct igc_bus_info {
+ enum igc_bus_type type;
+ enum igc_bus_speed speed;
+ enum igc_bus_width width;
+
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+struct igc_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum igc_fc_mode current_mode; /* FC mode in effect */
+ enum igc_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct igc_dev_spec_i225 {
+ bool eee_disable;
+ bool clear_semaphore_once;
+ u32 mtu;
+};
+
+struct igc_hw {
+ void *back;
+
+ u8 *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct igc_mac_info mac;
+ struct igc_fc_info fc;
+ struct igc_phy_info phy;
+ struct igc_nvm_info nvm;
+ struct igc_bus_info bus;
+
+ union {
+ struct igc_dev_spec_i225 _i225;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+#include "igc_i225.h"
+#include "igc_base.h"
+
+/* These functions must be implemented by drivers */
+s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
+s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
+void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
+void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/sys/dev/igc/igc_i225.c b/sys/dev/igc/igc_i225.c
new file mode 100644
index 000000000000..75c4b5125a97
--- /dev/null
+++ b/sys/dev/igc/igc_i225.c
@@ -0,0 +1,1232 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "igc_api.h"
+
+static s32 igc_init_nvm_params_i225(struct igc_hw *hw);
+static s32 igc_init_mac_params_i225(struct igc_hw *hw);
+static s32 igc_init_phy_params_i225(struct igc_hw *hw);
+static s32 igc_reset_hw_i225(struct igc_hw *hw);
+static s32 igc_acquire_nvm_i225(struct igc_hw *hw);
+static void igc_release_nvm_i225(struct igc_hw *hw);
+static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw);
+static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw);
+
+/**
+ * igc_init_nvm_params_i225 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igc_init_nvm_params_i225(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 eecd = IGC_READ_REG(hw, IGC_EECD);
+ u16 size;
+
+ DEBUGFUNC("igc_init_nvm_params_i225");
+
+ size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
+ IGC_EECD_SIZE_EX_SHIFT);
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
+ */
+ if (size > 15)
+ size = 15;
+
+ nvm->word_size = 1 << size;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ nvm->type = igc_nvm_eeprom_spi;
+
+
+ nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
+ 16 : 8;
+
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->ops.acquire = igc_acquire_nvm_i225;
+ nvm->ops.release = igc_release_nvm_i225;
+ if (igc_get_flash_presence_i225(hw)) {
+ hw->nvm.type = igc_nvm_flash_hw;
+ nvm->ops.read = igc_read_nvm_srrd_i225;
+ nvm->ops.write = igc_write_nvm_srwr_i225;
+ nvm->ops.validate = igc_validate_nvm_checksum_i225;
+ nvm->ops.update = igc_update_nvm_checksum_i225;
+ } else {
+ hw->nvm.type = igc_nvm_invm;
+ nvm->ops.write = igc_null_write_nvm;
+ nvm->ops.validate = igc_null_ops_generic;
+ nvm->ops.update = igc_null_ops_generic;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_init_mac_params_i225 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igc_init_mac_params_i225(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ struct igc_dev_spec_i225 *dev_spec = &hw->dev_spec._i225;
+
+ DEBUGFUNC("igc_init_mac_params_i225");
+
+ /* Initialize function pointer */
+ igc_init_mac_ops_generic(hw);
+
+ /* Set media type */
+ hw->phy.media_type = igc_media_type_copper;
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = IGC_RAR_ENTRIES_BASE;
+
+ /* reset */
+ mac->ops.reset_hw = igc_reset_hw_i225;
+ /* hw initialization */
+ mac->ops.init_hw = igc_init_hw_i225;
+ /* link setup */
+ mac->ops.setup_link = igc_setup_link_generic;
+ /* check for link */
+ mac->ops.check_for_link = igc_check_for_link_i225;
+ /* link info */
+ mac->ops.get_link_up_info = igc_get_speed_and_duplex_copper_generic;
+ /* acquire SW_FW sync */
+ mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
+ /* release SW_FW sync */
+ mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
+
+ /* Allow a single clear of the SW semaphore on I225 */
+ dev_spec->clear_semaphore_once = true;
+ mac->ops.setup_physical_interface = igc_setup_copper_link_i225;
+
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = igc_update_mc_addr_list_generic;
+
+ mac->ops.write_vfta = igc_write_vfta_generic;
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_init_phy_params_i225 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igc_init_phy_params_i225(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val = IGC_SUCCESS;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("igc_init_phy_params_i225");
+
+
+ if (hw->phy.media_type != igc_media_type_copper) {
+ phy->type = igc_phy_none;
+ goto out;
+ }
+
+ phy->ops.power_up = igc_power_up_phy_copper;
+ phy->ops.power_down = igc_power_down_phy_copper_base;
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
+
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = igc_acquire_phy_base;
+ phy->ops.check_reset_block = igc_check_reset_block_generic;
+ phy->ops.commit = igc_phy_sw_reset_generic;
+ phy->ops.release = igc_release_phy_base;
+
+ ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
+
+ /* Make sure the PHY is in a good state. Several people have reported
+ * firmware leaving the PHY's page select register set to something
+ * other than the default of zero, which causes the PHY ID read to
+ * access something other than the intended register.
+ */
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val)
+ goto out;
+
+ IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext);
+ phy->ops.read_reg = igc_read_phy_reg_gpy;
+ phy->ops.write_reg = igc_write_phy_reg_gpy;
+
+ ret_val = igc_get_phy_id(hw);
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I225_I_PHY_ID:
+ phy->type = igc_phy_i225;
+ phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225;
+ phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225;
+ /* TODO - complete with GPY PHY information */
+ break;
+ default:
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_reset_hw_i225 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+static s32 igc_reset_hw_i225(struct igc_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("igc_reset_hw_i225");
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = igc_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
+
+ IGC_WRITE_REG(hw, IGC_RCTL, 0);
+ IGC_WRITE_REG(hw, IGC_TCTL, IGC_TCTL_PSP);
+ IGC_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
+
+ ret_val = igc_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* Clear any pending interrupt events. */
+ IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
+ IGC_READ_REG(hw, IGC_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = igc_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+}
+
+/* igc_acquire_nvm_i225 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -IGC_ERR_NVM (-1).
+ */
+static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("igc_acquire_nvm_i225");
+
+ ret_val = igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/* igc_release_nvm_i225 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ */
+static void igc_release_nvm_i225(struct igc_hw *hw)
+{
+ DEBUGFUNC("igc_release_nvm_i225");
+
+ igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
+}
+
+/* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ */
+s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = IGC_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("igc_acquire_swfw_sync_i225");
+
+ while (i < timeout) {
+ if (igc_get_hw_semaphore_i225(hw)) {
+ ret_val = -IGC_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /* Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ igc_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -IGC_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
+
+ igc_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/* igc_release_swfw_sync_i225 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ */
+void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("igc_release_swfw_sync_i225");
+
+ while (igc_get_hw_semaphore_i225(hw) != IGC_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = IGC_READ_REG(hw, IGC_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ IGC_WRITE_REG(hw, IGC_SW_FW_SYNC, swfw_sync);
+
+ igc_put_hw_semaphore_generic(hw);
+}
+
+/*
+ * igc_setup_copper_link_i225 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ */
+s32 igc_setup_copper_link_i225(struct igc_hw *hw)
+{
+ u32 phpm_reg;
+ s32 ret_val;
+ u32 ctrl;
+
+ DEBUGFUNC("igc_setup_copper_link_i225");
+
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+ ctrl |= IGC_CTRL_SLU;
+ ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
+
+ phpm_reg = IGC_READ_REG(hw, IGC_I225_PHPM);
+ phpm_reg &= ~IGC_I225_PHPM_GO_LINKD;
+ IGC_WRITE_REG(hw, IGC_I225_PHPM, phpm_reg);
+
+ ret_val = igc_setup_copper_link_generic(hw);
+
+ return ret_val;
+}
+
+/* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("igc_get_hw_semaphore_i225");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = IGC_READ_REG(hw, IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._i225.clear_semaphore_once) {
+ hw->dev_spec._i225.clear_semaphore_once = false;
+ igc_put_hw_semaphore_generic(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = IGC_READ_REG(hw, IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device -\n");
+ DEBUGOUT("SMBI bit is set.\n");
+ return -IGC_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = IGC_READ_REG(hw, IGC_SWSM);
+ IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igc_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -IGC_ERR_NVM;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ */
+s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = IGC_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("igc_read_nvm_srrd_i225");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
+ IGC_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
+ status = igc_read_nvm_eerd(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = IGC_ERR_SWFW_SYNC;
+ }
+
+ if (status != IGC_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ */
+s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = IGC_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("igc_write_nvm_srwr_i225");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
+ IGC_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
+ status = __igc_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = IGC_ERR_SWFW_SYNC;
+ }
+
+ if (status != IGC_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/* __igc_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ */
+static s32 __igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 i, k, eewr = 0;
+ u32 attempts = 100000;
+ s32 ret_val = IGC_SUCCESS;
+
+ DEBUGFUNC("__igc_write_nvm_srwr");
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
+ (data[i] << IGC_NVM_RW_REG_DATA) |
+ IGC_NVM_RW_REG_START;
+
+ IGC_WRITE_REG(hw, IGC_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (IGC_NVM_RW_REG_DONE &
+ IGC_READ_REG(hw, IGC_SRWR)) {
+ ret_val = IGC_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (ret_val != IGC_SUCCESS) {
+ DEBUGOUT("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ */
+s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
+{
+ s32 status = IGC_SUCCESS;
+ s32 (*read_op_ptr)(struct igc_hw *, u16, u16, u16 *);
+
+ DEBUGFUNC("igc_validate_nvm_checksum_i225");
+
+ if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
+ /* Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = igc_read_nvm_eerd;
+
+ status = igc_validate_nvm_checksum_generic(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+ } else {
+ status = IGC_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/* igc_update_nvm_checksum_i225 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ */
+s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("igc_update_nvm_checksum_i225");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val != IGC_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->nvm.ops.acquire(hw) == IGC_SUCCESS) {
+ /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Read Error while updating\n");
+ DEBUGOUT("checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = __igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val != IGC_SUCCESS) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = igc_update_flash_i225(hw);
+ } else {
+ ret_val = IGC_ERR_SWFW_SYNC;
+ }
+out:
+ return ret_val;
+}
+
+/* igc_get_flash_presence_i225 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ */
+bool igc_get_flash_presence_i225(struct igc_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ DEBUGFUNC("igc_get_flash_presence_i225");
+
+ eec = IGC_READ_REG(hw, IGC_EECD);
+
+ if (eec & IGC_EECD_FLASH_DETECTED_I225)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/* igc_set_flsw_flash_burst_counter_i225 - sets FLSW NVM Burst
+ * Counter in FLSWCNT register.
+ *
+ * @hw: pointer to the HW structure
+ * @burst_counter: size in bytes of the Flash burst to read or write
+ */
+s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw,
+ u32 burst_counter)
+{
+ s32 ret_val = IGC_SUCCESS;
+
+ DEBUGFUNC("igc_set_flsw_flash_burst_counter_i225");
+
+ /* Validate input data */
+ if (burst_counter < IGC_I225_SHADOW_RAM_SIZE) {
+ /* Write FLSWCNT - burst counter */
+ IGC_WRITE_REG(hw, IGC_I225_FLSWCNT, burst_counter);
+ } else {
+ ret_val = IGC_ERR_INVALID_ARGUMENT;
+ }
+
+ return ret_val;
+}
+
+/* igc_write_erase_flash_command_i225 - write/erase to a sector
+ * region on a given address.
+ *
+ * @hw: pointer to the HW structure
+ * @opcode: opcode to be used for the write command
+ * @address: the offset to write into the FLASH image
+ */
+s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode,
+ u32 address)
+{
+ u32 flswctl = 0;
+ s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
+ s32 ret_val = IGC_SUCCESS;
+
+ DEBUGFUNC("igc_write_erase_flash_command_i225");
+
+ flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
+ /* Polling done bit on FLSWCTL register */
+ while (timeout) {
+ if (flswctl & IGC_FLSWCTL_DONE)
+ break;
+ usec_delay(5);
+ flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Flash transaction was not done\n");
+ return -IGC_ERR_NVM;
+ }
+
+ /* Build and issue command on FLSWCTL register */
+ flswctl = address | opcode;
+ IGC_WRITE_REG(hw, IGC_I225_FLSWCTL, flswctl);
+
+ /* Check if issued command is valid on FLSWCTL register */
+ flswctl = IGC_READ_REG(hw, IGC_I225_FLSWCTL);
+ if (!(flswctl & IGC_FLSWCTL_CMDV)) {
+ DEBUGOUT("Write flash command failed\n");
+ ret_val = IGC_ERR_INVALID_ARGUMENT;
+ }
+
+ return ret_val;
+}
+
+/* igc_update_flash_i225 - Commit EEPROM to the flash
+ * if fw_valid_bit is set, FW is active. setting FLUPD bit in EEC
+ * register makes the FW load the internal shadow RAM into the flash.
+ * Otherwise, fw_valid_bit is 0. if FL_SECU.block_prtotected_sw = 0
+ * then FW is not active so the SW is responsible shadow RAM dump.
+ *
+ * @hw: pointer to the HW structure
+ */
+s32 igc_update_flash_i225(struct igc_hw *hw)
+{
+ u16 current_offset_data = 0;
+ u32 block_sw_protect = 1;
+ u16 base_address = 0x0;
+ u32 i, fw_valid_bit;
+ u16 current_offset;
+ s32 ret_val = 0;
+ u32 flup;
+
+ DEBUGFUNC("igc_update_flash_i225");
+
+ block_sw_protect = IGC_READ_REG(hw, IGC_I225_FLSECU) &
+ IGC_FLSECU_BLK_SW_ACCESS_I225;
+ fw_valid_bit = IGC_READ_REG(hw, IGC_FWSM) &
+ IGC_FWSM_FW_VALID_I225;
+ if (fw_valid_bit) {
+ ret_val = igc_pool_flash_update_done_i225(hw);
+ if (ret_val == -IGC_ERR_NVM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = IGC_READ_REG(hw, IGC_EECD) | IGC_EECD_FLUPD_I225;
+ IGC_WRITE_REG(hw, IGC_EECD, flup);
+
+ ret_val = igc_pool_flash_update_done_i225(hw);
+ if (ret_val == IGC_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+ } else if (!block_sw_protect) {
+ /* FW is not active and security protection is disabled.
+ * therefore, SW is in charge of shadow RAM dump.
+ * Check which sector is valid. if sector 0 is valid,
+ * base address remains 0x0. otherwise, sector 1 is
+ * valid and it's base address is 0x1000
+ */
+ if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_SEC1VAL_I225)
+ base_address = 0x1000;
+
+ /* Valid sector erase */
+ ret_val = igc_write_erase_flash_command_i225(hw,
+ IGC_I225_ERASE_CMD_OPCODE,
+ base_address);
+ if (!ret_val) {
+ DEBUGOUT("Sector erase failed\n");
+ goto out;
+ }
+
+ current_offset = base_address;
+
+ /* Write */
+ for (i = 0; i < IGC_I225_SHADOW_RAM_SIZE / 2; i++) {
+ /* Set burst write length */
+ ret_val = igc_set_flsw_flash_burst_counter_i225(hw,
+ 0x2);
+ if (ret_val != IGC_SUCCESS)
+ break;
+
+ /* Set address and opcode */
+ ret_val = igc_write_erase_flash_command_i225(hw,
+ IGC_I225_WRITE_CMD_OPCODE,
+ 2 * current_offset);
+ if (ret_val != IGC_SUCCESS)
+ break;
+
+ ret_val = igc_read_nvm_eerd(hw, current_offset,
+ 1, &current_offset_data);
+ if (ret_val) {
+ DEBUGOUT("Failed to read from EEPROM\n");
+ goto out;
+ }
+
+ /* Write CurrentOffseData to FLSWDATA register */
+ IGC_WRITE_REG(hw, IGC_I225_FLSWDATA,
+ current_offset_data);
+ current_offset++;
+
+ /* Wait till operation has finished */
+ ret_val = igc_poll_eerd_eewr_done(hw,
+ IGC_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ usec_delay(1000);
+ }
+ }
+out:
+ return ret_val;
+}
+
+/* igc_pool_flash_update_done_i225 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ */
+s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
+{
+ s32 ret_val = -IGC_ERR_NVM;
+ u32 i, reg;
+
+ DEBUGFUNC("igc_pool_flash_update_done_i225");
+
+ for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
+ reg = IGC_READ_REG(hw, IGC_EECD);
+ if (reg & IGC_EECD_FLUDONE_I225) {
+ ret_val = IGC_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ return ret_val;
+}
+
+/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds.
+ * @hw: pointer to the HW structure
+ * @link: bool indicating link status
+ *
+ * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
+ * settings, otherwise specify that there is no LTR requirement.
+ */
+static s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
+{
+ u16 speed, duplex;
+ u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
+ s32 size;
+
+ DEBUGFUNC("igc_set_ltr_i225");
+
+ /* If we do not have link, LTR thresholds are zero. */
+ if (link) {
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+
+ /* Check if using copper interface with EEE enabled or if the
+ * link speed is 10 Mbps.
+ */
+ if ((hw->phy.media_type == igc_media_type_copper) &&
+ !(hw->dev_spec._i225.eee_disable) &&
+ (speed != SPEED_10)) {
+ /* EEE enabled, so send LTRMAX threshold. */
+ ltrc = IGC_READ_REG(hw, IGC_LTRC) |
+ IGC_LTRC_EEEMS_EN;
+ IGC_WRITE_REG(hw, IGC_LTRC, ltrc);
+
+ /* Calculate tw_system (nsec). */
+ if (speed == SPEED_100) {
+ tw_system = ((IGC_READ_REG(hw, IGC_EEE_SU) &
+ IGC_TW_SYSTEM_100_MASK) >>
+ IGC_TW_SYSTEM_100_SHIFT) * 500;
+ } else {
+ tw_system = (IGC_READ_REG(hw, IGC_EEE_SU) &
+ IGC_TW_SYSTEM_1000_MASK) * 500;
+ }
+ } else {
+ tw_system = 0;
+ }
+
+ /* Get the Rx packet buffer size. */
+ size = IGC_READ_REG(hw, IGC_RXPBS) &
+ IGC_RXPBS_SIZE_I225_MASK;
+
+ /* Calculations vary based on DMAC settings. */
+ if (IGC_READ_REG(hw, IGC_DMACR) & IGC_DMACR_DMAC_EN) {
+ size -= (IGC_READ_REG(hw, IGC_DMACR) &
+ IGC_DMACR_DMACTHR_MASK) >>
+ IGC_DMACR_DMACTHR_SHIFT;
+ /* Convert size to bits. */
+ size *= 1024 * 8;
+ } else {
+ /* Convert size to bytes, subtract the MTU, and then
+ * convert the size to bits.
+ */
+ size *= 1024;
+ size -= hw->dev_spec._i225.mtu;
+ size *= 8;
+ }
+
+ if (size < 0) {
+ DEBUGOUT1("Invalid effective Rx buffer size %d\n",
+ size);
+ return -IGC_ERR_CONFIG;
+ }
+
+ /* Calculate the thresholds. Since speed is in Mbps, simplify
+ * the calculation by multiplying size/speed by 1000 for result
+ * to be in nsec before dividing by the scale in nsec. Set the
+ * scale such that the LTR threshold fits in the register.
+ */
+ ltr_min = (1000 * size) / speed;
+ ltr_max = ltr_min + tw_system;
+ scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
+ IGC_LTRMINV_SCALE_32768;
+ scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
+ IGC_LTRMAXV_SCALE_32768;
+ ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
+ ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
+
+ /* Only write the LTR thresholds if they differ from before. */
+ ltrv = IGC_READ_REG(hw, IGC_LTRMINV);
+ if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
+ ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
+ (scale_min << IGC_LTRMINV_SCALE_SHIFT);
+ IGC_WRITE_REG(hw, IGC_LTRMINV, ltrv);
+ }
+
+ ltrv = IGC_READ_REG(hw, IGC_LTRMAXV);
+ if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
+ ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
+ (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+ IGC_WRITE_REG(hw, IGC_LTRMAXV, ltrv);
+ }
+ }
+
+ return IGC_SUCCESS;
+}
+
+/* igc_check_for_link_i225 - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ */
+s32 igc_check_for_link_i225(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link = false;
+
+ DEBUGFUNC("igc_check_for_link_i225");
+
+ /* We only want to go out to the PHY registers to see if
+ * Auto-Neg has completed and/or if our link status has
+ * changed. The get_link_status flag is set upon receiving
+ * a Link Status Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = IGC_SUCCESS;
+ goto out;
+ }
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ igc_check_downshift_generic(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ goto out;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igc_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+out:
+ /* Now that we are aware of our link settings, we can set the LTR
+ * thresholds.
+ */
+ ret_val = igc_set_ltr_i225(hw, link);
+
+ return ret_val;
+}
+
+/* igc_init_function_pointers_i225 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ */
+void igc_init_function_pointers_i225(struct igc_hw *hw)
+{
+ igc_init_mac_ops_generic(hw);
+ igc_init_phy_ops_generic(hw);
+ igc_init_nvm_ops_generic(hw);
+ hw->mac.ops.init_params = igc_init_mac_params_i225;
+ hw->nvm.ops.init_params = igc_init_nvm_params_i225;
+ hw->phy.ops.init_params = igc_init_phy_params_i225;
+}
+
+/* igc_init_hw_i225 - Init hw for I225
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize hw for i225 hw family.
+ */
+s32 igc_init_hw_i225(struct igc_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("igc_init_hw_i225");
+
+ ret_val = igc_init_hw_base(hw);
+ return ret_val;
+}
+
+/*
+ * igc_set_d0_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Note: since I225 does not actually support LPLU, this function
+ * simply enables/disables 1G and 2.5G speeds in D0.
+ */
+s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active)
+{
+ u32 data;
+
+ DEBUGFUNC("igc_set_d0_lplu_state_i225");
+
+ data = IGC_READ_REG(hw, IGC_I225_PHPM);
+
+ if (active) {
+ data |= IGC_I225_PHPM_DIS_1000;
+ data |= IGC_I225_PHPM_DIS_2500;
+ } else {
+ data &= ~IGC_I225_PHPM_DIS_1000;
+ data &= ~IGC_I225_PHPM_DIS_2500;
+ }
+
+ IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
+ return IGC_SUCCESS;
+}
+
+/*
+ * igc_set_d3_lplu_state_i225 - Set Low-Power-Link-Up (LPLU) D3 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Note: since I225 does not actually support LPLU, this function
+ * simply enables/disables 100M, 1G and 2.5G speeds in D3.
+ */
+s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active)
+{
+ u32 data;
+
+ DEBUGFUNC("igc_set_d3_lplu_state_i225");
+
+ data = IGC_READ_REG(hw, IGC_I225_PHPM);
+
+ if (active) {
+ data |= IGC_I225_PHPM_DIS_100_D3;
+ data |= IGC_I225_PHPM_DIS_1000_D3;
+ data |= IGC_I225_PHPM_DIS_2500_D3;
+ } else {
+ data &= ~IGC_I225_PHPM_DIS_100_D3;
+ data &= ~IGC_I225_PHPM_DIS_1000_D3;
+ data &= ~IGC_I225_PHPM_DIS_2500_D3;
+ }
+
+ IGC_WRITE_REG(hw, IGC_I225_PHPM, data);
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_set_eee_i225 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @adv2p5G: boolean flag enabling 2.5G EEE advertisement
+ * @adv1G: boolean flag enabling 1G EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
+ bool adv100M)
+{
+ u32 ipcnfg, eeer;
+
+ DEBUGFUNC("igc_set_eee_i225");
+
+ if (hw->mac.type != igc_i225 ||
+ hw->phy.media_type != igc_media_type_copper)
+ goto out;
+ ipcnfg = IGC_READ_REG(hw, IGC_IPCNFG);
+ eeer = IGC_READ_REG(hw, IGC_EEER);
+
+ /* enable or disable per user setting */
+ if (!(hw->dev_spec._i225.eee_disable)) {
+ u32 eee_su = IGC_READ_REG(hw, IGC_EEE_SU);
+
+ if (adv100M)
+ ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
+ else
+ ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
+
+ if (adv1G)
+ ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
+ else
+ ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
+
+ if (adv2p5G)
+ ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
+ else
+ ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
+
+ eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
+ IGC_EEER_LPI_FC);
+
+ /* This bit should not be set in normal operation. */
+ if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
+ } else {
+ ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
+ IGC_IPCNFG_EEE_100M_AN);
+ eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
+ IGC_EEER_LPI_FC);
+ }
+ IGC_WRITE_REG(hw, IGC_IPCNFG, ipcnfg);
+ IGC_WRITE_REG(hw, IGC_EEER, eeer);
+ IGC_READ_REG(hw, IGC_IPCNFG);
+ IGC_READ_REG(hw, IGC_EEER);
+out:
+
+ return IGC_SUCCESS;
+}
+
diff --git a/sys/dev/igc/igc_i225.h b/sys/dev/igc/igc_i225.h
new file mode 100644
index 000000000000..816f98691ac2
--- /dev/null
+++ b/sys/dev/igc/igc_i225.h
@@ -0,0 +1,112 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_I225_H_
+#define _IGC_I225_H_
+
+bool igc_get_flash_presence_i225(struct igc_hw *hw);
+s32 igc_update_flash_i225(struct igc_hw *hw);
+s32 igc_update_nvm_checksum_i225(struct igc_hw *hw);
+s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw);
+s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 igc_set_flsw_flash_burst_counter_i225(struct igc_hw *hw,
+ u32 burst_counter);
+s32 igc_write_erase_flash_command_i225(struct igc_hw *hw, u32 opcode,
+ u32 address);
+s32 igc_check_for_link_i225(struct igc_hw *hw);
+s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask);
+void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
+s32 igc_init_hw_i225(struct igc_hw *hw);
+s32 igc_setup_copper_link_i225(struct igc_hw *hw);
+s32 igc_set_d0_lplu_state_i225(struct igc_hw *hw, bool active);
+s32 igc_set_d3_lplu_state_i225(struct igc_hw *hw, bool active);
+s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
+ bool adv100M);
+
+#define ID_LED_DEFAULT_I225 ((ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I225_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for I225 devices */
+#define NVM_INIT_CTRL_2_DEFAULT_I225 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I225 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I225 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I225 0x200C
+
+#define IGC_MRQC_ENABLE_RSS_4Q 0x00000002
+#define IGC_MRQC_ENABLE_VMDQ 0x00000003
+#define IGC_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define IGC_I225_SHADOW_RAM_SIZE 4096
+#define IGC_I225_ERASE_CMD_OPCODE 0x02000000
+#define IGC_I225_WRITE_CMD_OPCODE 0x01000000
+#define IGC_FLSWCTL_DONE 0x40000000
+#define IGC_FLSWCTL_CMDV 0x10000000
+
+/* SRRCTL bit definitions */
+#define IGC_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define IGC_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IGC_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IGC_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IGC_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define IGC_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IGC_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define IGC_SRRCTL_DROP_EN 0x80000000
+#define IGC_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IGC_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+#define IGC_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IGC_RXDADV_RSSTYPE_SHIFT 12
+#define IGC_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define IGC_RXDADV_HDRBUFLEN_SHIFT 5
+#define IGC_RXDADV_SPLITHEADER_EN 0x00001000
+#define IGC_RXDADV_SPH 0x8000
+#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define IGC_RXDADV_ERR_HBO 0x00800000
+
+/* RSS Hash results */
+#define IGC_RXDADV_RSSTYPE_NONE 0x00000000
+#define IGC_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IGC_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IGC_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IGC_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IGC_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IGC_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IGC_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IGC_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IGC_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define IGC_RXDADV_PKTTYPE_ILMASK 0x000000F0
+#define IGC_RXDADV_PKTTYPE_TLMASK 0x00000F00
+#define IGC_RXDADV_PKTTYPE_NONE 0x00000000
+#define IGC_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
+#define IGC_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
+#define IGC_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
+#define IGC_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
+#define IGC_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IGC_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IGC_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IGC_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+
+#define IGC_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IGC_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define IGC_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define IGC_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define IGC_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IGC_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+#endif
diff --git a/sys/dev/igc/igc_mac.c b/sys/dev/igc/igc_mac.c
new file mode 100644
index 000000000000..0355e54682bc
--- /dev/null
+++ b/sys/dev/igc/igc_mac.c
@@ -0,0 +1,1050 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "igc_api.h"
+
+static void igc_config_collision_dist_generic(struct igc_hw *hw);
+
+/**
+ * igc_init_mac_ops_generic - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void igc_init_mac_ops_generic(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ DEBUGFUNC("igc_init_mac_ops_generic");
+
+ /* General Setup */
+ mac->ops.init_params = igc_null_ops_generic;
+ mac->ops.config_collision_dist = igc_config_collision_dist_generic;
+ mac->ops.rar_set = igc_rar_set_generic;
+}
+
+/**
+ * igc_null_ops_generic - No-op function, returns 0
+ * @hw: pointer to the HW structure
+ **/
+s32 igc_null_ops_generic(struct igc_hw IGC_UNUSEDARG *hw)
+{
+ DEBUGFUNC("igc_null_ops_generic");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_null_mac_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void igc_null_mac_generic(struct igc_hw IGC_UNUSEDARG *hw)
+{
+ DEBUGFUNC("igc_null_mac_generic");
+ return;
+}
+
+/**
+ * igc_null_link_info - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @s: dummy variable
+ * @d: dummy variable
+ **/
+s32 igc_null_link_info(struct igc_hw IGC_UNUSEDARG *hw,
+ u16 IGC_UNUSEDARG *s, u16 IGC_UNUSEDARG *d)
+{
+ DEBUGFUNC("igc_null_link_info");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_null_mng_mode - No-op function, return false
+ * @hw: pointer to the HW structure
+ **/
+bool igc_null_mng_mode(struct igc_hw IGC_UNUSEDARG *hw)
+{
+ DEBUGFUNC("igc_null_mng_mode");
+ return false;
+}
+
+/**
+ * igc_null_update_mc - No-op function, return void
+ * @hw: pointer to the HW structure
+ * @h: dummy variable
+ * @a: dummy variable
+ **/
+void igc_null_update_mc(struct igc_hw IGC_UNUSEDARG *hw,
+ u8 IGC_UNUSEDARG *h, u32 IGC_UNUSEDARG a)
+{
+ DEBUGFUNC("igc_null_update_mc");
+ return;
+}
+
+/**
+ * igc_null_write_vfta - No-op function, return void
+ * @hw: pointer to the HW structure
+ * @a: dummy variable
+ * @b: dummy variable
+ **/
+void igc_null_write_vfta(struct igc_hw IGC_UNUSEDARG *hw,
+ u32 IGC_UNUSEDARG a, u32 IGC_UNUSEDARG b)
+{
+ DEBUGFUNC("igc_null_write_vfta");
+ return;
+}
+
+/**
+ * igc_null_rar_set - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @h: dummy variable
+ * @a: dummy variable
+ **/
+int igc_null_rar_set(struct igc_hw IGC_UNUSEDARG *hw,
+ u8 IGC_UNUSEDARG *h, u32 IGC_UNUSEDARG a)
+{
+ DEBUGFUNC("igc_null_rar_set");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void igc_set_lan_id_single_port(struct igc_hw *hw)
+{
+ struct igc_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
+ * igc_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void igc_clear_vfta_generic(struct igc_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("igc_clear_vfta_generic");
+
+ for (offset = 0; offset < IGC_VLAN_FILTER_TBL_SIZE; offset++) {
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, offset, 0);
+ IGC_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * igc_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void igc_write_vfta_generic(struct igc_hw *hw, u32 offset, u32 value)
+{
+ DEBUGFUNC("igc_write_vfta_generic");
+
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, offset, value);
+ IGC_WRITE_FLUSH(hw);
+}
+
+/**
+ * igc_init_rx_addrs_generic - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void igc_init_rx_addrs_generic(struct igc_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+ DEBUGFUNC("igc_init_rx_addrs_generic");
+
+ /* Setup the receive address */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * igc_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 igc_check_alt_mac_addr_generic(struct igc_hw *hw)
+{
+ u32 i;
+ s32 ret_val;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ADDR_LEN];
+
+ DEBUGFUNC("igc_check_alt_mac_addr_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ return ret_val;
+
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ return IGC_SUCCESS;
+
+ if (hw->bus.func == IGC_FUNC_1)
+ nvm_alt_mac_addr_offset += IGC_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+ return IGC_SUCCESS;
+ }
+
+ /* We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_rar_set_generic - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+int igc_rar_set_generic(struct igc_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("igc_rar_set_generic");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= IGC_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ IGC_WRITE_REG(hw, IGC_RAL(index), rar_low);
+ IGC_WRITE_FLUSH(hw);
+ IGC_WRITE_REG(hw, IGC_RAH(index), rar_high);
+ IGC_WRITE_FLUSH(hw);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_hash_mc_addr_generic - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+u32 igc_hash_mc_addr_generic(struct igc_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ DEBUGFUNC("igc_hash_mc_addr_generic");
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * igc_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igc_update_mc_addr_list_generic(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ DEBUGFUNC("igc_update_mc_addr_list_generic");
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = igc_hash_mc_addr_generic(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ADDR_LEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, hw->mac.mta_shadow[i]);
+ IGC_WRITE_FLUSH(hw);
+}
+
+/**
+ * igc_clear_hw_cntrs_base_generic - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void igc_clear_hw_cntrs_base_generic(struct igc_hw *hw)
+{
+ DEBUGFUNC("igc_clear_hw_cntrs_base_generic");
+
+ IGC_READ_REG(hw, IGC_CRCERRS);
+ IGC_READ_REG(hw, IGC_MPC);
+ IGC_READ_REG(hw, IGC_SCC);
+ IGC_READ_REG(hw, IGC_ECOL);
+ IGC_READ_REG(hw, IGC_MCC);
+ IGC_READ_REG(hw, IGC_LATECOL);
+ IGC_READ_REG(hw, IGC_COLC);
+ IGC_READ_REG(hw, IGC_RERC);
+ IGC_READ_REG(hw, IGC_DC);
+ IGC_READ_REG(hw, IGC_RLEC);
+ IGC_READ_REG(hw, IGC_XONRXC);
+ IGC_READ_REG(hw, IGC_XONTXC);
+ IGC_READ_REG(hw, IGC_XOFFRXC);
+ IGC_READ_REG(hw, IGC_XOFFTXC);
+ IGC_READ_REG(hw, IGC_FCRUC);
+ IGC_READ_REG(hw, IGC_GPRC);
+ IGC_READ_REG(hw, IGC_BPRC);
+ IGC_READ_REG(hw, IGC_MPRC);
+ IGC_READ_REG(hw, IGC_GPTC);
+ IGC_READ_REG(hw, IGC_GORCL);
+ IGC_READ_REG(hw, IGC_GORCH);
+ IGC_READ_REG(hw, IGC_GOTCL);
+ IGC_READ_REG(hw, IGC_GOTCH);
+ IGC_READ_REG(hw, IGC_RNBC);
+ IGC_READ_REG(hw, IGC_RUC);
+ IGC_READ_REG(hw, IGC_RFC);
+ IGC_READ_REG(hw, IGC_ROC);
+ IGC_READ_REG(hw, IGC_RJC);
+ IGC_READ_REG(hw, IGC_TORL);
+ IGC_READ_REG(hw, IGC_TORH);
+ IGC_READ_REG(hw, IGC_TOTL);
+ IGC_READ_REG(hw, IGC_TOTH);
+ IGC_READ_REG(hw, IGC_TPR);
+ IGC_READ_REG(hw, IGC_TPT);
+ IGC_READ_REG(hw, IGC_MPTC);
+ IGC_READ_REG(hw, IGC_BPTC);
+ IGC_READ_REG(hw, IGC_TLPIC);
+ IGC_READ_REG(hw, IGC_RLPIC);
+ IGC_READ_REG(hw, IGC_RXDMTC);
+}
+
+/**
+ * igc_check_for_copper_link_generic - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 igc_check_for_copper_link_generic(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link = false;
+
+ DEBUGFUNC("igc_check_for_copper_link");
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return IGC_SUCCESS;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = igc_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ return IGC_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ igc_check_downshift_generic(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -IGC_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igc_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * igc_setup_link_generic - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 igc_setup_link_generic(struct igc_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("igc_setup_link_generic");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ return IGC_SUCCESS;
+
+ /* If requested flow control is set to default, set flow control
+ * for both 'rx' and 'tx' pause frames.
+ */
+ if (hw->fc.requested_mode == igc_fc_default) {
+ hw->fc.requested_mode = igc_fc_full;
+ }
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+ IGC_WRITE_REG(hw, IGC_FCT, FLOW_CONTROL_TYPE);
+ IGC_WRITE_REG(hw, IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ IGC_WRITE_REG(hw, IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ IGC_WRITE_REG(hw, IGC_FCTTV, hw->fc.pause_time);
+
+ return igc_set_fc_watermarks_generic(hw);
+}
+
+/**
+ * igc_config_collision_dist_generic - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+static void igc_config_collision_dist_generic(struct igc_hw *hw)
+{
+ u32 tctl;
+
+ DEBUGFUNC("igc_config_collision_dist_generic");
+
+ tctl = IGC_READ_REG(hw, IGC_TCTL);
+
+ tctl &= ~IGC_TCTL_COLD;
+ tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
+
+ IGC_WRITE_REG(hw, IGC_TCTL, tctl);
+ IGC_WRITE_FLUSH(hw);
+}
+
+/**
+ * igc_set_fc_watermarks_generic - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ **/
+s32 igc_set_fc_watermarks_generic(struct igc_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ DEBUGFUNC("igc_set_fc_watermarks_generic");
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & igc_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= IGC_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ IGC_WRITE_REG(hw, IGC_FCRTL, fcrtl);
+ IGC_WRITE_REG(hw, IGC_FCRTH, fcrth);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_force_mac_fc_generic - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 igc_force_mac_fc_generic(struct igc_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("igc_force_mac_fc_generic");
+
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case igc_fc_none:
+ ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
+ break;
+ case igc_fc_rx_pause:
+ ctrl &= (~IGC_CTRL_TFCE);
+ ctrl |= IGC_CTRL_RFCE;
+ break;
+ case igc_fc_tx_pause:
+ ctrl &= (~IGC_CTRL_RFCE);
+ ctrl |= IGC_CTRL_TFCE;
+ break;
+ case igc_fc_full:
+ ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -IGC_ERR_CONFIG;
+ }
+
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_config_fc_after_link_up_generic - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 igc_config_fc_after_link_up_generic(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val = IGC_SUCCESS;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ DEBUGFUNC("igc_config_fc_after_link_up_generic");
+
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if (mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | igc_fc_none
+ * 0 | 1 | 0 | DC | igc_fc_none
+ * 0 | 1 | 1 | 0 | igc_fc_none
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ * 1 | 0 | 0 | DC | igc_fc_none
+ * 1 | DC | 1 | DC | igc_fc_full
+ * 1 | 1 | 0 | 0 | igc_fc_none
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | IGC_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == igc_fc_full) {
+ hw->fc.current_mode = igc_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = igc_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igc_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 igc_get_speed_and_duplex_copper_generic(struct igc_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ DEBUGFUNC("igc_get_speed_and_duplex_copper_generic");
+
+ status = IGC_READ_REG(hw, IGC_STATUS);
+ if (status & IGC_STATUS_SPEED_1000) {
+ /* For I225, STATUS will indicate 1G speed in both 1 Gbps
+ * and 2.5 Gbps link modes. An additional bit is used
+ * to differentiate between 1 Gbps and 2.5 Gbps.
+ */
+ if ((hw->mac.type == igc_i225) &&
+ (status & IGC_STATUS_SPEED_2500)) {
+ *speed = SPEED_2500;
+ DEBUGOUT("2500 Mbs, ");
+ } else {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ }
+ } else if (status & IGC_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & IGC_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_get_hw_semaphore_generic - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 igc_get_hw_semaphore_generic(struct igc_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("igc_get_hw_semaphore_generic");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = IGC_READ_REG(hw, IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -IGC_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = IGC_READ_REG(hw, IGC_SWSM);
+ IGC_WRITE_REG(hw, IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (IGC_READ_REG(hw, IGC_SWSM) & IGC_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igc_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -IGC_ERR_NVM;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_put_hw_semaphore_generic - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void igc_put_hw_semaphore_generic(struct igc_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("igc_put_hw_semaphore_generic");
+
+ swsm = IGC_READ_REG(hw, IGC_SWSM);
+
+ swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
+
+ IGC_WRITE_REG(hw, IGC_SWSM, swsm);
+}
+
+/**
+ * igc_get_auto_rd_done_generic - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 igc_get_auto_rd_done_generic(struct igc_hw *hw)
+{
+ s32 i = 0;
+
+ DEBUGFUNC("igc_get_auto_rd_done_generic");
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (IGC_READ_REG(hw, IGC_EECD) & IGC_EECD_AUTO_RD)
+ break;
+ msec_delay(1);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+ return -IGC_ERR_RESET;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_disable_pcie_master_generic - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns IGC_SUCCESS if successful, else returns -10
+ * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 igc_disable_pcie_master_generic(struct igc_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+ DEBUGFUNC("igc_disable_pcie_master_generic");
+
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+ ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(IGC_READ_REG(hw, IGC_STATUS) &
+ IGC_STATUS_GIO_MASTER_ENABLE))
+ break;
+ usec_delay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Master requests are pending.\n");
+ return -IGC_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return IGC_SUCCESS;
+}
diff --git a/sys/dev/igc/igc_mac.h b/sys/dev/igc/igc_mac.h
new file mode 100644
index 000000000000..d010788c1aad
--- /dev/null
+++ b/sys/dev/igc/igc_mac.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_MAC_H_
+#define _IGC_MAC_H_
+
+void igc_init_mac_ops_generic(struct igc_hw *hw);
+void igc_null_mac_generic(struct igc_hw *hw);
+s32 igc_null_ops_generic(struct igc_hw *hw);
+s32 igc_null_link_info(struct igc_hw *hw, u16 *s, u16 *d);
+bool igc_null_mng_mode(struct igc_hw *hw);
+void igc_null_update_mc(struct igc_hw *hw, u8 *h, u32 a);
+void igc_null_write_vfta(struct igc_hw *hw, u32 a, u32 b);
+int igc_null_rar_set(struct igc_hw *hw, u8 *h, u32 a);
+s32 igc_check_for_copper_link_generic(struct igc_hw *hw);
+s32 igc_config_fc_after_link_up_generic(struct igc_hw *hw);
+s32 igc_disable_pcie_master_generic(struct igc_hw *hw);
+s32 igc_force_mac_fc_generic(struct igc_hw *hw);
+s32 igc_get_auto_rd_done_generic(struct igc_hw *hw);
+s32 igc_get_bus_info_pcie_generic(struct igc_hw *hw);
+void igc_set_lan_id_single_port(struct igc_hw *hw);
+s32 igc_get_hw_semaphore_generic(struct igc_hw *hw);
+s32 igc_get_speed_and_duplex_copper_generic(struct igc_hw *hw, u16 *speed,
+ u16 *duplex);
+void igc_update_mc_addr_list_generic(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+int igc_rar_set_generic(struct igc_hw *hw, u8 *addr, u32 index);
+s32 igc_set_fc_watermarks_generic(struct igc_hw *hw);
+s32 igc_setup_link_generic(struct igc_hw *hw);
+s32 igc_validate_mdi_setting_crossover_generic(struct igc_hw *hw);
+
+u32 igc_hash_mc_addr_generic(struct igc_hw *hw, u8 *mc_addr);
+
+void igc_clear_hw_cntrs_base_generic(struct igc_hw *hw);
+void igc_clear_vfta_generic(struct igc_hw *hw);
+void igc_init_rx_addrs_generic(struct igc_hw *hw, u16 rar_count);
+void igc_pcix_mmrbc_workaround_generic(struct igc_hw *hw);
+void igc_put_hw_semaphore_generic(struct igc_hw *hw);
+s32 igc_check_alt_mac_addr_generic(struct igc_hw *hw);
+void igc_set_pcie_no_snoop_generic(struct igc_hw *hw, u32 no_snoop);
+void igc_write_vfta_generic(struct igc_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/sys/dev/igc/igc_nvm.c b/sys/dev/igc/igc_nvm.c
new file mode 100644
index 000000000000..f5de77ae49c0
--- /dev/null
+++ b/sys/dev/igc/igc_nvm.c
@@ -0,0 +1,721 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "igc_api.h"
+
+static void igc_reload_nvm_generic(struct igc_hw *hw);
+
+/**
+ * igc_init_nvm_ops_generic - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void igc_init_nvm_ops_generic(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ DEBUGFUNC("igc_init_nvm_ops_generic");
+
+ /* Initialize function pointers */
+ nvm->ops.init_params = igc_null_ops_generic;
+ nvm->ops.acquire = igc_null_ops_generic;
+ nvm->ops.read = igc_null_read_nvm;
+ nvm->ops.release = igc_null_nvm_generic;
+ nvm->ops.reload = igc_reload_nvm_generic;
+ nvm->ops.update = igc_null_ops_generic;
+ nvm->ops.validate = igc_null_ops_generic;
+ nvm->ops.write = igc_null_write_nvm;
+}
+
+/**
+ * igc_null_nvm_read - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @a: dummy variable
+ * @b: dummy variable
+ * @c: dummy variable
+ **/
+s32 igc_null_read_nvm(struct igc_hw IGC_UNUSEDARG *hw,
+ u16 IGC_UNUSEDARG a, u16 IGC_UNUSEDARG b,
+ u16 IGC_UNUSEDARG *c)
+{
+ DEBUGFUNC("igc_null_read_nvm");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_null_nvm_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void igc_null_nvm_generic(struct igc_hw IGC_UNUSEDARG *hw)
+{
+ DEBUGFUNC("igc_null_nvm_generic");
+ return;
+}
+
+/**
+ * igc_null_write_nvm - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @a: dummy variable
+ * @b: dummy variable
+ * @c: dummy variable
+ **/
+s32 igc_null_write_nvm(struct igc_hw IGC_UNUSEDARG *hw,
+ u16 IGC_UNUSEDARG a, u16 IGC_UNUSEDARG b,
+ u16 IGC_UNUSEDARG *c)
+{
+ DEBUGFUNC("igc_null_write_nvm");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+static void igc_raise_eec_clk(struct igc_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | IGC_EECD_SK;
+ IGC_WRITE_REG(hw, IGC_EECD, *eecd);
+ IGC_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * igc_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+static void igc_lower_eec_clk(struct igc_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~IGC_EECD_SK;
+ IGC_WRITE_REG(hw, IGC_EECD, *eecd);
+ IGC_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * igc_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+static void igc_shift_out_eec_bits(struct igc_hw *hw, u16 data, u16 count)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 eecd = IGC_READ_REG(hw, IGC_EECD);
+ u32 mask;
+
+ DEBUGFUNC("igc_shift_out_eec_bits");
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == igc_nvm_eeprom_spi)
+ eecd |= IGC_EECD_DO;
+
+ do {
+ eecd &= ~IGC_EECD_DI;
+
+ if (data & mask)
+ eecd |= IGC_EECD_DI;
+
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+ IGC_WRITE_FLUSH(hw);
+
+ usec_delay(nvm->delay_usec);
+
+ igc_raise_eec_clk(hw, &eecd);
+ igc_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~IGC_EECD_DI;
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+}
+
+/**
+ * igc_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+static u16 igc_shift_in_eec_bits(struct igc_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ DEBUGFUNC("igc_shift_in_eec_bits");
+
+ eecd = IGC_READ_REG(hw, IGC_EECD);
+
+ eecd &= ~(IGC_EECD_DO | IGC_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ igc_raise_eec_clk(hw, &eecd);
+
+ eecd = IGC_READ_REG(hw, IGC_EECD);
+
+ eecd &= ~IGC_EECD_DI;
+ if (eecd & IGC_EECD_DO)
+ data |= 1;
+
+ igc_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ DEBUGFUNC("igc_poll_eerd_eewr_done");
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == IGC_NVM_POLL_READ)
+ reg = IGC_READ_REG(hw, IGC_EERD);
+ else
+ reg = IGC_READ_REG(hw, IGC_EEWR);
+
+ if (reg & IGC_NVM_RW_REG_DONE)
+ return IGC_SUCCESS;
+
+ usec_delay(5);
+ }
+
+ return -IGC_ERR_NVM;
+}
+
+/**
+ * igc_acquire_nvm_generic - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -IGC_ERR_NVM (-1).
+ **/
+s32 igc_acquire_nvm_generic(struct igc_hw *hw)
+{
+ u32 eecd = IGC_READ_REG(hw, IGC_EECD);
+ s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
+
+ DEBUGFUNC("igc_acquire_nvm_generic");
+
+ IGC_WRITE_REG(hw, IGC_EECD, eecd | IGC_EECD_REQ);
+ eecd = IGC_READ_REG(hw, IGC_EECD);
+
+ while (timeout) {
+ if (eecd & IGC_EECD_GNT)
+ break;
+ usec_delay(5);
+ eecd = IGC_READ_REG(hw, IGC_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~IGC_EECD_REQ;
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+ DEBUGOUT("Could not acquire NVM grant\n");
+ return -IGC_ERR_NVM;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+static void igc_standby_nvm(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 eecd = IGC_READ_REG(hw, IGC_EECD);
+
+ DEBUGFUNC("igc_standby_nvm");
+
+ if (nvm->type == igc_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= IGC_EECD_CS;
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+ IGC_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ eecd &= ~IGC_EECD_CS;
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+ IGC_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ }
+}
+
+/**
+ * igc_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void igc_stop_nvm(struct igc_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("igc_stop_nvm");
+
+ eecd = IGC_READ_REG(hw, IGC_EECD);
+ if (hw->nvm.type == igc_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= IGC_EECD_CS;
+ igc_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * igc_release_nvm_generic - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void igc_release_nvm_generic(struct igc_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("igc_release_nvm_generic");
+
+ igc_stop_nvm(hw);
+
+ eecd = IGC_READ_REG(hw, IGC_EECD);
+ eecd &= ~IGC_EECD_REQ;
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+}
+
+/**
+ * igc_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+static s32 igc_ready_nvm_eeprom(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 eecd = IGC_READ_REG(hw, IGC_EECD);
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("igc_ready_nvm_eeprom");
+
+ if (nvm->type == igc_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(IGC_EECD_CS | IGC_EECD_SK);
+ IGC_WRITE_REG(hw, IGC_EECD, eecd);
+ IGC_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ igc_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)igc_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ igc_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("SPI NVM Status error\n");
+ return -IGC_ERR_NVM;
+ }
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = IGC_SUCCESS;
+
+ DEBUGFUNC("igc_read_nvm_eerd");
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -IGC_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) +
+ IGC_NVM_RW_REG_START;
+
+ IGC_WRITE_REG(hw, IGC_EERD, eerd);
+ ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (IGC_READ_REG(hw, IGC_EERD) >>
+ IGC_NVM_RW_REG_DATA);
+ }
+
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * igc_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -IGC_ERR_NVM;
+ u16 widx = 0;
+
+ DEBUGFUNC("igc_write_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -IGC_ERR_NVM;
+ }
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = igc_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+ igc_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ igc_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ igc_standby_nvm(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ igc_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ igc_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ igc_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ igc_standby_nvm(hw);
+ break;
+ }
+ }
+ msec_delay(10);
+ nvm->ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 igc_read_pba_string_generic(struct igc_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("igc_read_pba_string_generic");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return -IGC_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < IGC_PBANUM_LENGTH) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IGC_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return IGC_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -IGC_ERR_NVM_PBA_SECTION;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return -IGC_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return IGC_SUCCESS;
+}
+
+
+
+
+
+/**
+ * igc_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 igc_read_mac_addr_generic(struct igc_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = IGC_READ_REG(hw, IGC_RAH(0));
+ rar_low = IGC_READ_REG(hw, IGC_RAL(0));
+
+ for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igc_validate_nvm_checksum_generic(struct igc_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("igc_validate_nvm_checksum_generic");
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ return -IGC_ERR_NVM;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 igc_update_nvm_checksum_generic(struct igc_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("igc_update_nvm_checksum");
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * igc_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+static void igc_reload_nvm_generic(struct igc_hw *hw)
+{
+ u32 ctrl_ext;
+
+ DEBUGFUNC("igc_reload_nvm_generic");
+
+ usec_delay(10);
+ ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
+ ctrl_ext |= IGC_CTRL_EXT_EE_RST;
+ IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext);
+ IGC_WRITE_FLUSH(hw);
+}
+
+
diff --git a/sys/dev/igc/igc_nvm.h b/sys/dev/igc/igc_nvm.h
new file mode 100644
index 000000000000..abe2d3e95a76
--- /dev/null
+++ b/sys/dev/igc/igc_nvm.h
@@ -0,0 +1,32 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_NVM_H_
+#define _IGC_NVM_H_
+
+void igc_init_nvm_ops_generic(struct igc_hw *hw);
+s32 igc_null_read_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c);
+void igc_null_nvm_generic(struct igc_hw *hw);
+s32 igc_null_led_default(struct igc_hw *hw, u16 *data);
+s32 igc_null_write_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c);
+s32 igc_acquire_nvm_generic(struct igc_hw *hw);
+
+s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg);
+s32 igc_read_mac_addr_generic(struct igc_hw *hw);
+s32 igc_read_pba_string_generic(struct igc_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igc_valid_led_default_generic(struct igc_hw *hw, u16 *data);
+s32 igc_validate_nvm_checksum_generic(struct igc_hw *hw);
+s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 igc_update_nvm_checksum_generic(struct igc_hw *hw);
+void igc_release_nvm_generic(struct igc_hw *hw);
+
+#endif
diff --git a/sys/dev/igc/igc_osdep.h b/sys/dev/igc/igc_osdep.h
new file mode 100644
index 000000000000..596108e94246
--- /dev/null
+++ b/sys/dev/igc/igc_osdep.h
@@ -0,0 +1,133 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _FREEBSD_OS_H_
+#define _FREEBSD_OS_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/iflib.h>
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#define usec_delay(x) DELAY(x)
+#define usec_delay_irq(x) usec_delay(x)
+#define msec_delay(x) DELAY(1000*(x))
+#define msec_delay_irq(x) DELAY(1000*(x))
+
+/* Enable/disable debugging statements in shared code */
+#define DBG 0
+
+#define DEBUGOUT(...) \
+ do { if (DBG) printf(__VA_ARGS__); } while (0)
+#define DEBUGOUT1(...) DEBUGOUT(__VA_ARGS__)
+#define DEBUGOUT2(...) DEBUGOUT(__VA_ARGS__)
+#define DEBUGOUT3(...) DEBUGOUT(__VA_ARGS__)
+#define DEBUGOUT7(...) DEBUGOUT(__VA_ARGS__)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef int64_t s64;
+typedef int32_t s32;
+typedef int16_t s16;
+typedef int8_t s8;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+struct igc_osdep
+{
+ bus_space_tag_t mem_bus_space_tag;
+ bus_space_handle_t mem_bus_space_handle;
+ bus_space_tag_t io_bus_space_tag;
+ bus_space_handle_t io_bus_space_handle;
+ bus_space_tag_t flash_bus_space_tag;
+ bus_space_handle_t flash_bus_space_handle;
+ device_t dev;
+ if_ctx_t ctx;
+};
+
+#define IGC_REGISTER(hw, reg) reg
+
+#define IGC_WRITE_FLUSH(a) IGC_READ_REG(a, IGC_STATUS)
+
+/* Read from an absolute offset in the adapter's memory space */
+#define IGC_READ_OFFSET(hw, offset) \
+ bus_space_read_4(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, offset)
+
+/* Write to an absolute offset in the adapter's memory space */
+#define IGC_WRITE_OFFSET(hw, offset, value) \
+ bus_space_write_4(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, offset, value)
+
+/* Register READ/WRITE macros */
+
+#define IGC_READ_REG(hw, reg) \
+ bus_space_read_4(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg))
+
+#define IGC_WRITE_REG(hw, reg, value) \
+ bus_space_write_4(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg), value)
+
+#define IGC_READ_REG_ARRAY(hw, reg, index) \
+ bus_space_read_4(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg) + ((index)<< 2))
+
+#define IGC_WRITE_REG_ARRAY(hw, reg, index, value) \
+ bus_space_write_4(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg) + ((index)<< 2), value)
+
+#define IGC_READ_REG_ARRAY_DWORD IGC_READ_REG_ARRAY
+#define IGC_WRITE_REG_ARRAY_DWORD IGC_WRITE_REG_ARRAY
+
+#define IGC_READ_REG_ARRAY_BYTE(hw, reg, index) \
+ bus_space_read_1(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg) + index)
+
+#define IGC_WRITE_REG_ARRAY_BYTE(hw, reg, index, value) \
+ bus_space_write_1(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg) + index, value)
+
+#define IGC_WRITE_REG_ARRAY_WORD(hw, reg, index, value) \
+ bus_space_write_2(((struct igc_osdep *)(hw)->back)->mem_bus_space_tag, \
+ ((struct igc_osdep *)(hw)->back)->mem_bus_space_handle, \
+ IGC_REGISTER(hw, reg) + (index << 1), value)
+
+#endif /* _FREEBSD_OS_H_ */
diff --git a/sys/dev/igc/igc_phy.c b/sys/dev/igc/igc_phy.c
new file mode 100644
index 000000000000..a1d71ab15829
--- /dev/null
+++ b/sys/dev/igc/igc_phy.c
@@ -0,0 +1,1109 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "igc_api.h"
+
+static s32 igc_wait_autoneg(struct igc_hw *hw);
+
+/**
+ * igc_init_phy_ops_generic - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void igc_init_phy_ops_generic(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ DEBUGFUNC("igc_init_phy_ops_generic");
+
+ /* Initialize function pointers */
+ phy->ops.init_params = igc_null_ops_generic;
+ phy->ops.acquire = igc_null_ops_generic;
+ phy->ops.check_reset_block = igc_null_ops_generic;
+ phy->ops.commit = igc_null_ops_generic;
+ phy->ops.force_speed_duplex = igc_null_ops_generic;
+ phy->ops.get_info = igc_null_ops_generic;
+ phy->ops.set_page = igc_null_set_page;
+ phy->ops.read_reg = igc_null_read_reg;
+ phy->ops.read_reg_locked = igc_null_read_reg;
+ phy->ops.read_reg_page = igc_null_read_reg;
+ phy->ops.release = igc_null_phy_generic;
+ phy->ops.reset = igc_null_ops_generic;
+ phy->ops.set_d0_lplu_state = igc_null_lplu_state;
+ phy->ops.set_d3_lplu_state = igc_null_lplu_state;
+ phy->ops.write_reg = igc_null_write_reg;
+ phy->ops.write_reg_locked = igc_null_write_reg;
+ phy->ops.write_reg_page = igc_null_write_reg;
+ phy->ops.power_up = igc_null_phy_generic;
+ phy->ops.power_down = igc_null_phy_generic;
+}
+
+/**
+ * igc_null_set_page - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @data: dummy variable
+ **/
+s32 igc_null_set_page(struct igc_hw IGC_UNUSEDARG *hw,
+ u16 IGC_UNUSEDARG data)
+{
+ DEBUGFUNC("igc_null_set_page");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_null_read_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @offset: dummy variable
+ * @data: dummy variable
+ **/
+s32 igc_null_read_reg(struct igc_hw IGC_UNUSEDARG *hw,
+ u32 IGC_UNUSEDARG offset, u16 IGC_UNUSEDARG *data)
+{
+ DEBUGFUNC("igc_null_read_reg");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_null_phy_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void igc_null_phy_generic(struct igc_hw IGC_UNUSEDARG *hw)
+{
+ DEBUGFUNC("igc_null_phy_generic");
+ return;
+}
+
+/**
+ * igc_null_lplu_state - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @active: dummy variable
+ **/
+s32 igc_null_lplu_state(struct igc_hw IGC_UNUSEDARG *hw,
+ bool IGC_UNUSEDARG active)
+{
+ DEBUGFUNC("igc_null_lplu_state");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_null_write_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ * @offset: dummy variable
+ * @data: dummy variable
+ **/
+s32 igc_null_write_reg(struct igc_hw IGC_UNUSEDARG *hw,
+ u32 IGC_UNUSEDARG offset, u16 IGC_UNUSEDARG data)
+{
+ DEBUGFUNC("igc_null_write_reg");
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return IGC_SUCCESS, otherwise
+ * return IGC_BLK_PHY_RESET (12).
+ **/
+s32 igc_check_reset_block_generic(struct igc_hw *hw)
+{
+ u32 manc;
+
+ DEBUGFUNC("igc_check_reset_block");
+
+ manc = IGC_READ_REG(hw, IGC_MANC);
+
+ return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
+ IGC_BLK_PHY_RESET : IGC_SUCCESS;
+}
+
+/**
+ * igc_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 igc_get_phy_id(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val = IGC_SUCCESS;
+ u16 phy_id;
+
+ DEBUGFUNC("igc_get_phy_id");
+
+ if (!phy->ops.read_reg)
+ return IGC_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("igc_read_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -IGC_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << IGC_MDIC_REG_SHIFT) |
+ (phy->addr << IGC_MDIC_PHY_SHIFT) |
+ (IGC_MDIC_OP_READ));
+
+ IGC_WRITE_REG(hw, IGC_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (IGC_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = IGC_READ_REG(hw, IGC_MDIC);
+ if (mdic & IGC_MDIC_READY)
+ break;
+ }
+ if (!(mdic & IGC_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -IGC_ERR_PHY;
+ }
+ if (mdic & IGC_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -IGC_ERR_PHY;
+ }
+ if (((mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT);
+ return -IGC_ERR_PHY;
+ }
+ *data = (u16) mdic;
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("igc_write_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -IGC_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << IGC_MDIC_REG_SHIFT) |
+ (phy->addr << IGC_MDIC_PHY_SHIFT) |
+ (IGC_MDIC_OP_WRITE));
+
+ IGC_WRITE_REG(hw, IGC_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (IGC_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = IGC_READ_REG(hw, IGC_MDIC);
+ if (mdic & IGC_MDIC_READY)
+ break;
+ }
+ if (!(mdic & IGC_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -IGC_ERR_PHY;
+ }
+ if (mdic & IGC_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -IGC_ERR_PHY;
+ }
+ if (((mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT);
+ return -IGC_ERR_PHY;
+ }
+
+ return IGC_SUCCESS;
+}
+
+/**
+ * igc_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+ u16 aneg_multigbt_an_ctrl = 0;
+
+ DEBUGFUNC("igc_phy_setup_autoneg");
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
+ hw->phy.id == I225_I_PHY_ID) {
+ /* Read the MULTI GBT AN Control Register - reg 7.32 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ ANEG_MULTIGBT_AN_CTRL,
+ &aneg_multigbt_an_ctrl);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 2500 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
+ DEBUGOUT("Advertise 2500mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 2500 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
+ DEBUGOUT("Advertise 2500mb Full duplex\n");
+ aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
+ } else {
+ aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case igc_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case igc_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in igc_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case igc_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case igc_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -IGC_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+
+ if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
+ hw->phy.id == I225_I_PHY_ID)
+ ret_val = phy->ops.write_reg(hw,
+ (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ ANEG_MULTIGBT_AN_CTRL,
+ aneg_multigbt_an_ctrl);
+
+ return ret_val;
+}
+
+/**
+ * igc_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 igc_copper_link_autoneg(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("igc_copper_link_autoneg");
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (!phy->autoneg_advertised)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = igc_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = igc_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+ return ret_val;
+}
+
+/**
+ * igc_setup_copper_link_generic - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -IGC_ERR_PHY (-2).
+ **/
+s32 igc_setup_copper_link_generic(struct igc_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("igc_setup_copper_link_generic");
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igc_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = igc_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (link) {
+ DEBUGOUT("Valid link established!!!\n");
+ hw->mac.ops.config_collision_dist(hw);
+ ret_val = igc_config_fc_after_link_up_generic(hw);
+ } else {
+ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+void igc_phy_force_speed_duplex_setup(struct igc_hw *hw, u16 *phy_ctrl)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ DEBUGFUNC("igc_phy_force_speed_duplex_setup");
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+ ctrl |= (IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
+ ctrl &= ~IGC_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~IGC_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & IGC_ALL_HALF_DUPLEX) {
+ ctrl &= ~IGC_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ } else {
+ ctrl |= IGC_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & IGC_ALL_100_SPEED) {
+ ctrl |= IGC_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+ *phy_ctrl &= ~MII_CR_SPEED_1000;
+ DEBUGOUT("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(IGC_CTRL_SPD_1000 | IGC_CTRL_SPD_100);
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb\n");
+ }
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
+}
+
+/**
+ * igc_set_d3_lplu_state_generic - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 igc_set_d3_lplu_state_generic(struct igc_hw *hw, bool active)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("igc_set_d3_lplu_state_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return IGC_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02IGC_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02IGC_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02IGC_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == igc_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01IGC_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01IGC_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01IGC_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == igc_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01IGC_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01IGC_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01IGC_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == IGC_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == IGC_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == IGC_ALL_10_SPEED)) {
+ data |= IGP02IGC_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02IGC_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01IGC_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01IGC_PHY_PORT_CONFIG,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_check_downshift_generic - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 igc_check_downshift_generic(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("igc_check_downshift_generic");
+
+ switch (phy->type) {
+ case igc_phy_i225:
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ return IGC_SUCCESS;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+static s32 igc_wait_autoneg(struct igc_hw *hw)
+{
+ s32 ret_val = IGC_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("igc_wait_autoneg");
+
+ if (!hw->phy.ops.read_reg)
+ return IGC_SUCCESS;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msec_delay(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * igc_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 igc_phy_has_link_generic(struct igc_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = IGC_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("igc_phy_has_link_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return IGC_SUCCESS;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+ msec_delay(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ msec_delay(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+
+ *success = (i < iterations);
+
+ return ret_val;
+}
+
+/**
+ * igc_phy_sw_reset_generic - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 igc_phy_sw_reset_generic(struct igc_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("igc_phy_sw_reset_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return IGC_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ return ret_val;
+}
+
+/**
+ * igc_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+s32 igc_phy_hw_reset_generic(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl, timeout = 10000, phpm = 0;
+
+ DEBUGFUNC("igc_phy_hw_reset_generic");
+
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return IGC_SUCCESS;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ phpm = IGC_READ_REG(hw, IGC_I225_PHPM);
+
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
+ IGC_WRITE_FLUSH(hw);
+
+ usec_delay(phy->reset_delay_us);
+
+ IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
+ IGC_WRITE_FLUSH(hw);
+
+ usec_delay(150);
+
+ do {
+ phpm = IGC_READ_REG(hw, IGC_I225_PHPM);
+ timeout--;
+ usec_delay(1);
+ } while (!(phpm & IGC_I225_PHPM_RST_COMPL) && timeout);
+
+ if (!timeout)
+ DEBUGOUT("Timeout expired after a phy reset\n");
+
+ phy->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void igc_power_up_phy_copper(struct igc_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ usec_delay(300);
+}
+
+/**
+ * igc_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void igc_power_down_phy_copper(struct igc_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msec_delay(1);
+}
+/**
+ * igc_write_phy_reg_gpy - Write GPY PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+
+ DEBUGFUNC("igc_write_phy_reg_gpy");
+
+ offset = offset & GPY_REG_MASK;
+
+ if (!dev_addr) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = igc_write_phy_reg_mdic(hw, offset, data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.release(hw);
+ } else {
+ ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
+ data);
+ }
+ return ret_val;
+}
+
+/**
+ * igc_read_phy_reg_gpy - Read GPY PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is MMD to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+
+ DEBUGFUNC("igc_read_phy_reg_gpy");
+
+ offset = offset & GPY_REG_MASK;
+
+ if (!dev_addr) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = igc_read_phy_reg_mdic(hw, offset, data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.release(hw);
+ } else {
+ ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
+ data);
+ }
+ return ret_val;
+}
+
+
+/**
+ * __igc_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__igc_access_xmdio_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * igc_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+ DEBUGFUNC("igc_read_xmdio_reg");
+
+ return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * igc_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+ DEBUGFUNC("igc_write_xmdio_reg");
+
+ return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
diff --git a/sys/dev/igc/igc_phy.h b/sys/dev/igc/igc_phy.h
new file mode 100644
index 000000000000..61cc46cdc583
--- /dev/null
+++ b/sys/dev/igc/igc_phy.h
@@ -0,0 +1,134 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_PHY_H_
+#define _IGC_PHY_H_
+
+void igc_init_phy_ops_generic(struct igc_hw *hw);
+s32 igc_null_read_reg(struct igc_hw *hw, u32 offset, u16 *data);
+void igc_null_phy_generic(struct igc_hw *hw);
+s32 igc_null_lplu_state(struct igc_hw *hw, bool active);
+s32 igc_null_write_reg(struct igc_hw *hw, u32 offset, u16 data);
+s32 igc_null_set_page(struct igc_hw *hw, u16 data);
+s32 igc_check_downshift_generic(struct igc_hw *hw);
+s32 igc_check_reset_block_generic(struct igc_hw *hw);
+s32 igc_get_phy_id(struct igc_hw *hw);
+s32 igc_phy_sw_reset_generic(struct igc_hw *hw);
+void igc_phy_force_speed_duplex_setup(struct igc_hw *hw, u16 *phy_ctrl);
+s32 igc_phy_hw_reset_generic(struct igc_hw *hw);
+s32 igc_phy_reset_dsp_generic(struct igc_hw *hw);
+s32 igc_set_d3_lplu_state_generic(struct igc_hw *hw, bool active);
+s32 igc_setup_copper_link_generic(struct igc_hw *hw);
+s32 igc_phy_has_link_generic(struct igc_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+enum igc_phy_type igc_get_phy_type_from_id(u32 phy_id);
+s32 igc_determine_phy_address(struct igc_hw *hw);
+s32 igc_enable_phy_wakeup_reg_access_bm(struct igc_hw *hw, u16 *phy_reg);
+s32 igc_disable_phy_wakeup_reg_access_bm(struct igc_hw *hw, u16 *phy_reg);
+void igc_power_up_phy_copper(struct igc_hw *hw);
+void igc_power_down_phy_copper(struct igc_hw *hw);
+s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data);
+s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data);
+
+s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
+s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
+s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
+
+#define IGC_MAX_PHY_ADDR 8
+
+/* IGP01IGC Specific Registers */
+#define IGP01IGC_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01IGC_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01IGC_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01IGC_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02IGC_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01IGC_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+#define IGC_I225_PHPM 0x0E14 /* I225 PHY Power Management */
+#define IGC_I225_PHPM_DIS_1000_D3 0x0008 /* Disable 1G in D3 */
+#define IGC_I225_PHPM_LINK_ENERGY 0x0010 /* Link Energy Detect */
+#define IGC_I225_PHPM_GO_LINKD 0x0020 /* Go Link Disconnect */
+#define IGC_I225_PHPM_DIS_1000 0x0040 /* Disable 1G globally */
+#define IGC_I225_PHPM_SPD_B2B_EN 0x0080 /* Smart Power Down Back2Back */
+#define IGC_I225_PHPM_RST_COMPL 0x0100 /* PHY Reset Completed */
+#define IGC_I225_PHPM_DIS_100_D3 0x0200 /* Disable 100M in D3 */
+#define IGC_I225_PHPM_ULP 0x0400 /* Ultra Low-Power Mode */
+#define IGC_I225_PHPM_DIS_2500 0x0800 /* Disable 2.5G globally */
+#define IGC_I225_PHPM_DIS_2500_D3 0x1000 /* Disable 2.5G in D3 */
+/* GPY211 - I225 defines */
+#define GPY_MMD_MASK 0xFFFF0000
+#define GPY_MMD_SHIFT 16
+#define GPY_REG_MASK 0x0000FFFF
+#define IGP01IGC_PHY_PCS_INIT_REG 0x00B4
+#define IGP01IGC_PHY_POLARITY_MASK 0x0078
+
+#define IGP01IGC_PSCR_AUTO_MDIX 0x1000
+#define IGP01IGC_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01IGC_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02IGC_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02IGC_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02IGC_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01IGC_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01IGC_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01IGC_PSSR_MDIX 0x0800
+#define IGP01IGC_PSSR_SPEED_MASK 0xC000
+#define IGP01IGC_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02IGC_PHY_CHANNEL_NUM 4
+#define IGP02IGC_PHY_AGC_A 0x11B1
+#define IGP02IGC_PHY_AGC_B 0x12B1
+#define IGP02IGC_PHY_AGC_C 0x14B1
+#define IGP02IGC_PHY_AGC_D 0x18B1
+
+#define IGP02IGC_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02IGC_AGC_LENGTH_MASK 0x7F
+#define IGP02IGC_AGC_RANGE 15
+
+#define IGC_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define IGC_KMRNCTRLSTA_OFFSET 0x001F0000
+#define IGC_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define IGC_KMRNCTRLSTA_REN 0x00200000
+#define IGC_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define IGC_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define IGC_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define IGC_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define IGC_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/sys/dev/igc/igc_regs.h b/sys/dev/igc/igc_regs.h
new file mode 100644
index 000000000000..d2b15957dcca
--- /dev/null
+++ b/sys/dev/igc/igc_regs.h
@@ -0,0 +1,424 @@
+/*-
+ * Copyright 2021 Intel Corp
+ * Copyright 2021 Rubicon Communications, LLC (Netgate)
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IGC_REGS_H_
+#define _IGC_REGS_H_
+
+/* General Register Descriptions */
+#define IGC_CTRL 0x00000 /* Device Control - RW */
+#define IGC_STATUS 0x00008 /* Device Status - RO */
+#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
+/* NVM Register Descriptions */
+#define IGC_EERD 0x12014 /* EEprom mode read - RW */
+#define IGC_EEWR 0x12018 /* EEprom mode write - RW */
+#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define IGC_MDIC 0x00020 /* MDI Control - RW */
+#define IGC_MDICNFG 0x00E04 /* MDI Config - RW */
+#define IGC_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define IGC_FCAH 0x0002C /* Flow Control Address High -RW */
+#define IGC_I225_FLSWCTL 0x12048 /* FLASH control register */
+#define IGC_I225_FLSWDATA 0x1204C /* FLASH data register */
+#define IGC_I225_FLSWCNT 0x12050 /* FLASH Access Counter */
+#define IGC_I225_FLSECU 0x12114 /* FLASH Security */
+#define IGC_FCT 0x00030 /* Flow Control Type - RW */
+#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
+#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */
+#define IGC_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define IGC_ICS 0x01504 /* Intr Cause Set - WO */
+#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */
+#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */
+#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */
+#define IGC_RCTL 0x00100 /* Rx Control - RW */
+#define IGC_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define IGC_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define IGC_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define IGC_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define IGC_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define IGC_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define IGC_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+#define IGC_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define IGC_TCTL 0x00400 /* Tx Control - RW */
+#define IGC_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define IGC_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define IGC_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define IGC_LEDCTL 0x00E00 /* LED Control - RW */
+#define IGC_LEDMUX 0x08130 /* LED MUX Control */
+#define IGC_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define IGC_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define IGC_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define IGC_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define IGC_PBS 0x01008 /* Packet Buffer Size */
+#define IGC_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define IGC_EEMNGCTL_I225 0x01010 /* i225 MNG EEprom Mode Control */
+#define IGC_EEARBC_I225 0x12024 /* EEPROM Auto Read Bus Control */
+#define IGC_FLOP 0x0103C /* FLASH Opcode Register */
+#define IGC_WDSTP 0x01040 /* Watchdog Setup - RW */
+#define IGC_SWDSTS 0x01044 /* SW Device Status - RW */
+#define IGC_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define IGC_TCPTIMER 0x0104C /* TCP Timer - RW */
+#define IGC_ERT 0x02008 /* Early Rx Threshold - RW */
+#define IGC_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define IGC_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define IGC_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define IGC_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define IGC_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define IGC_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define IGC_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define IGC_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define IGC_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
+#define IGC_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define IGC_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define IGC_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Shadow Ram Write Register - RW */
+#define IGC_SRWR 0x12018
+#define IGC_EEC_REG 0x12010
+
+
+#define IGC_SHADOWINF 0x12068
+#define IGC_FLFWUPDATE 0x12108
+
+#define IGC_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+#define IGC_INVM_SIZE 64 /* Number of INVM Data Registers */
+
+#define IGC_MMDAC 13 /* MMD Access Control */
+#define IGC_MMDAAD 14 /* MMD Access Address/Data */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * IGC_RDBAL_REG(current_rx_queue)
+ */
+#define IGC_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define IGC_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define IGC_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define IGC_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define IGC_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define IGC_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define IGC_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define IGC_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+ (0x0C030 + ((_n) * 0x40)))
+#define IGC_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define IGC_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define IGC_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define IGC_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define IGC_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define IGC_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define IGC_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define IGC_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
+#define IGC_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define IGC_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define IGC_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define IGC_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
+
+#define IGC_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define IGC_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define IGC_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+#define IGC_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+#define IGC_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+#define IGC_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+#define IGC_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+#define IGC_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define IGC_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+/* Statistics Register Descriptions */
+#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
+#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define IGC_COLC 0x04028 /* Collision Count - R/clr */
+#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */
+#define IGC_DC 0x04030 /* Defer Count - R/clr */
+#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */
+#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
+#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
+
+#define IGC_VFGPRC 0x00F10
+#define IGC_VFGORC 0x00F18
+#define IGC_VFMPRC 0x00F3C
+#define IGC_VFGPTC 0x00F14
+#define IGC_VFGOTC 0x00F34
+#define IGC_VFGOTLBC 0x00F50
+#define IGC_VFGPTLBC 0x00F44
+#define IGC_VFGORLBC 0x00F48
+#define IGC_VFGPRLBC 0x00F40
+#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define IGC_LENERRS 0x04138 /* Length Errors Count */
+#define IGC_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define IGC_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
+#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define IGC_RA 0x05400 /* Receive Address - RW Array */
+#define IGC_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define IGC_WUC 0x05800 /* Wakeup Control - RW */
+#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define IGC_WUS 0x05810 /* Wakeup Status - RO */
+/* Management registers */
+#define IGC_MANC 0x05820 /* Management Control - RW */
+#define IGC_IPAV 0x05838 /* IP Address Valid - RW */
+#define IGC_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define IGC_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define IGC_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define IGC_WUPM_EXT 0x0B800 /* Wakeup Packet Memory Extended - RO Array */
+#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Extended - RW */
+#define IGC_WUS_EXT 0x05814 /* Wakeup Status Extended - RW1C */
+#define IGC_FHFTSL 0x05804 /* Flex Filter Indirect Table Select - RW */
+#define IGC_PROXYFCEX 0x05590 /* Proxy Filter Control Extended - RW1C */
+#define IGC_PROXYEXS 0x05594 /* Proxy Extended Status - RO */
+#define IGC_WFUTPF 0x05500 /* Wake Flex UDP TCP Port Filter - RW Array */
+#define IGC_RFUTPF 0x05580 /* Range Flex UDP TCP Port Filter - RW */
+#define IGC_RWPFC 0x05584 /* Range Wake Port Filter Control - RW */
+#define IGC_WFUTPS 0x05588 /* Wake Filter UDP TCP Status - RW1C */
+#define IGC_WCS 0x0558C /* Wake Control Status - RW1C */
+/* MSI-X Table Register Descriptions */
+#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
+#define IGC_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define IGC_HOST_IF 0x08800 /* Host Interface */
+/* Flexible Host Filter Table */
+#define IGC_FHFT(_n) (0x09000 + ((_n) * 0x100))
+/* Ext Flexible Host Filter Table */
+#define IGC_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
+
+
+#define IGC_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define IGC_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define IGC_MDEF(_n) (0x05890 + (4 * (_n)))
+/* Semaphore registers */
+#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+/* Function Active and Power State to MNG */
+#define IGC_FACTPS 0x05B30
+#define IGC_SWSM 0x05B50 /* SW Semaphore */
+#define IGC_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define IGC_SWSM2 0x05B58
+#define IGC_FFLT_DBG 0x05F04 /* Debug Register */
+#define IGC_HICR 0x08F00 /* Host Interface Control */
+#define IGC_FWSTS 0x08F0C /* FW Status */
+
+/* RSS registers */
+#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+#define IGC_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
+#define IGC_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
+/* Redirection Table - RW Array */
+#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
+/* RSS Random Key - RW Array */
+#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4))
+#define IGC_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define IGC_RSSIR 0x05868 /* RSS Interrupt Request */
+#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
+#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define IGC_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define IGC_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
+#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
+#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define IGC_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define IGC_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
+#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define IGC_TSICR 0x0B66C /* Interrupt Cause Register */
+#define IGC_TSIM 0x0B674 /* Interrupt Mask Register */
+
+/* Filtering Registers */
+#define IGC_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define IGC_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define IGC_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define IGC_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define IGC_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+/* ETQF register bit definitions */
+#define IGC_ETQF_FILTER_ENABLE (1 << 26)
+#define IGC_ETQF_IMM_INT (1 << 29)
+#define IGC_ETQF_QUEUE_ENABLE (1 << 31)
+#define IGC_ETQF_QUEUE_SHIFT 16
+#define IGC_ETQF_QUEUE_MASK 0x00070000
+#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+
+#define IGC_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
+#define IGC_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
+#define IGC_RTRPCS 0x2474 /* Rx packet plane control and status */
+#define IGC_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
+#define IGC_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
+/* Tx Desc plane TC Rate-scheduler config */
+#define IGC_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Config */
+#define IGC_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Config */
+#define IGC_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler Status */
+#define IGC_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler MMW */
+#define IGC_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Status */
+#define IGC_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
+/* Tx Packet plane TC Rate-scheduler MMW */
+#define IGC_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Status */
+#define IGC_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler MMW */
+#define IGC_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
+/* Tx Desc plane VM Rate-Scheduler MMW*/
+#define IGC_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
+/* Tx BCN Rate-Scheduler MMW */
+#define IGC_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
+#define IGC_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
+#define IGC_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
+#define IGC_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
+#define IGC_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
+#define IGC_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
+#define IGC_RTTBCNCR 0xB200 /* Tx BCN Control Register */
+#define IGC_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
+#define IGC_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
+#define IGC_RTRBCNCR 0xB20C /* Rx BCN Control Register */
+#define IGC_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
+#define IGC_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
+#define IGC_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
+#define IGC_RTTBCNACH 0x0B214 /* Tx BCN Control High */
+#define IGC_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
+
+/* DMA Coalescing registers */
+#define IGC_DMACR 0x02508 /* Control Register */
+#define IGC_DMCTXTH 0x03550 /* Transmit Threshold */
+#define IGC_DMCTLX 0x02514 /* Time to Lx Request */
+#define IGC_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define IGC_DMCCNT 0x05DD4 /* Current Rx Count */
+#define IGC_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define IGC_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define IGC_PCIEERRSTS 0x05BA8
+
+#define IGC_PROXYS 0x5F64 /* Proxying Status */
+#define IGC_PROXYFC 0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define IGC_THMJT 0x08100 /* Junction Temperature */
+#define IGC_THLOWTC 0x08104 /* Low Threshold Control */
+#define IGC_THMIDTC 0x08108 /* Mid Threshold Control */
+#define IGC_THHIGHTC 0x0810C /* High Threshold Control */
+#define IGC_THSTAT 0x08110 /* Thermal Sensor Status */
+
+/* Energy Efficient Ethernet "EEE" registers */
+#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
+#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define IGC_EEE_SU 0x0E34 /* EEE Setup */
+#define IGC_EEE_SU_2P5 0x0E3C /* EEE 2.5G Setup */
+#define IGC_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
+#define IGC_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
+
+/* OS2BMC Registers */
+#define IGC_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define IGC_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define IGC_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define IGC_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
+#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
+#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */
+
+
+/* IEEE 1588 TIMESYNCH */
+#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
+#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
+#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
+#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */
+#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */
+#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
+
+
+#endif
diff --git a/sys/dev/igc/igc_txrx.c b/sys/dev/igc/igc_txrx.c
new file mode 100644
index 000000000000..2636aa77069a
--- /dev/null
+++ b/sys/dev/igc/igc_txrx.c
@@ -0,0 +1,580 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io>
+ * All rights reserved.
+ * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "if_igc.h"
+
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define DPRINTF device_printf
+#else
+#define DPRINTF(...)
+#endif
+
+/*********************************************************************
+ * Local Function prototypes
+ *********************************************************************/
+static int igc_isc_txd_encap(void *arg, if_pkt_info_t pi);
+static void igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+
+static void igc_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+
+static void igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx);
+static int igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget);
+
+static int igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+
+static int igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status);
+static int igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status);
+
+static void igc_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
+static int igc_determine_rsstype(u16 pkt_info);
+
+extern void igc_if_enable_intr(if_ctx_t ctx);
+extern int igc_intr(void *arg);
+
+struct if_txrx igc_txrx = {
+ .ift_txd_encap = igc_isc_txd_encap,
+ .ift_txd_flush = igc_isc_txd_flush,
+ .ift_txd_credits_update = igc_isc_txd_credits_update,
+ .ift_rxd_available = igc_isc_rxd_available,
+ .ift_rxd_pkt_get = igc_isc_rxd_pkt_get,
+ .ift_rxd_refill = igc_isc_rxd_refill,
+ .ift_rxd_flush = igc_isc_rxd_flush,
+ .ift_legacy_intr = igc_intr
+};
+
+void
+igc_dump_rs(struct igc_adapter *adapter)
+{
+ if_softc_ctx_t scctx = adapter->shared;
+ struct igc_tx_queue *que;
+ struct tx_ring *txr;
+ qidx_t i, ntxd, qid, cur;
+ int16_t rs_cidx;
+ uint8_t status;
+
+ printf("\n");
+ ntxd = scctx->isc_ntxd[0];
+ for (qid = 0; qid < adapter->tx_num_queues; qid++) {
+ que = &adapter->tx_queues[qid];
+ txr = &que->txr;
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx != txr->tx_rs_pidx) {
+ cur = txr->tx_rsq[rs_cidx];
+ status = txr->tx_base[cur].upper.fields.status;
+ if (!(status & IGC_TXD_STAT_DD))
+ printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
+ } else {
+ rs_cidx = (rs_cidx-1)&(ntxd-1);
+ cur = txr->tx_rsq[rs_cidx];
+ printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
+ }
+ printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx);
+ for (i = 0; i < ntxd; i++) {
+ if (txr->tx_base[i].upper.fields.status & IGC_TXD_STAT_DD)
+ printf("%d set ", i);
+ }
+ printf("\n");
+ }
+}
+
+/**********************************************************************
+ *
+ * Setup work for hardware segmentation offload (TSO) on
+ * adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+static int
+igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status)
+{
+ struct igc_adv_tx_context_desc *TXD;
+ u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 paylen;
+
+ switch(pi->ipi_etype) {
+ case ETHERTYPE_IPV6:
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
+ break;
+ case ETHERTYPE_IP:
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
+ /* Tell transmit desc to also do IPv4 checksum. */
+ *olinfo_status |= IGC_TXD_POPTS_IXSM << 8;
+ break;
+ default:
+ panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
+ __func__, ntohs(pi->ipi_etype));
+ break;
+ }
+
+ TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
+
+ /* This is used in the transmit desc in encap */
+ paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
+
+ /* VLAN MACLEN IPLEN */
+ if (pi->ipi_mflags & M_VLANTAG) {
+ vlan_macip_lens |= (pi->ipi_vtag << IGC_ADVTXD_VLAN_SHIFT);
+ }
+
+ vlan_macip_lens |= pi->ipi_ehdrlen << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= pi->ipi_ip_hlen;
+ TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+
+ /* ADV DTYPE TUCMD */
+ type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
+ TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx |= (pi->ipi_tso_segsz << IGC_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (pi->ipi_tcp_hlen << IGC_ADVTXD_L4LEN_SHIFT);
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+ TXD->seqnum_seed = htole32(0);
+ *cmd_type_len |= IGC_ADVTXD_DCMD_TSE;
+ *olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
+ *olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ return (1);
+}
+
+/*********************************************************************
+ *
+ * Advanced Context Descriptor setup for VLAN, CSUM or TSO
+ *
+ **********************************************************************/
+static int
+igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status)
+{
+ struct igc_adv_tx_context_desc *TXD;
+ u32 vlan_macip_lens, type_tucmd_mlhl;
+ u32 mss_l4len_idx;
+ mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
+
+ /* First check if TSO is to be used */
+ if (pi->ipi_csum_flags & CSUM_TSO)
+ return (igc_tso_setup(txr, pi, cmd_type_len, olinfo_status));
+
+ /* Indicate the whole packet as payload when not doing TSO */
+ *olinfo_status |= pi->ipi_len << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ /* Now ready a context descriptor */
+ TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
+
+ /*
+ ** In advanced descriptors the vlan tag must
+ ** be placed into the context descriptor. Hence
+ ** we need to make one even if not doing offloads.
+ */
+ if (pi->ipi_mflags & M_VLANTAG) {
+ vlan_macip_lens |= (pi->ipi_vtag << IGC_ADVTXD_VLAN_SHIFT);
+ } else if ((pi->ipi_csum_flags & IGC_CSUM_OFFLOAD) == 0) {
+ return (0);
+ }
+
+ /* Set the ether header length */
+ vlan_macip_lens |= pi->ipi_ehdrlen << IGC_ADVTXD_MACLEN_SHIFT;
+
+ switch(pi->ipi_etype) {
+ case ETHERTYPE_IP:
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
+ break;
+ case ETHERTYPE_IPV6:
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ vlan_macip_lens |= pi->ipi_ip_hlen;
+ type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
+
+ switch (pi->ipi_ipproto) {
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) {
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
+ *olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
+ }
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) {
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
+ *olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
+ }
+ break;
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) {
+ type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP;
+ *olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Now copy bits into descriptor */
+ TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+ TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
+ TXD->seqnum_seed = htole32(0);
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+ return (1);
+}
+
+static int
+igc_isc_txd_encap(void *arg, if_pkt_info_t pi)
+{
+ struct igc_adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct igc_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ union igc_adv_tx_desc *txd = NULL;
+ int i, j, pidx_last;
+ u32 olinfo_status, cmd_type_len, txd_flags;
+ qidx_t ntxd;
+
+ pidx_last = olinfo_status = 0;
+ /* Basic descriptor defines */
+ cmd_type_len = (IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT);
+
+ if (pi->ipi_mflags & M_VLANTAG)
+ cmd_type_len |= IGC_ADVTXD_DCMD_VLE;
+
+ i = pi->ipi_pidx;
+ ntxd = scctx->isc_ntxd[0];
+ txd_flags = pi->ipi_flags & IPI_TX_INTR ? IGC_ADVTXD_DCMD_RS : 0;
+ /* Consume the first descriptor */
+ i += igc_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
+ if (i == scctx->isc_ntxd[0])
+ i = 0;
+
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
+ bus_addr_t segaddr;
+
+ txd = (union igc_adv_tx_desc *)&txr->tx_base[i];
+ seglen = segs[j].ds_len;
+ segaddr = htole64(segs[j].ds_addr);
+
+ txd->read.buffer_addr = segaddr;
+ txd->read.cmd_type_len = htole32(IGC_ADVTXD_DCMD_IFCS |
+ cmd_type_len | seglen);
+ txd->read.olinfo_status = htole32(olinfo_status);
+ pidx_last = i;
+ if (++i == scctx->isc_ntxd[0]) {
+ i = 0;
+ }
+ }
+ if (txd_flags) {
+ txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
+ txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
+ MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
+ }
+
+ txd->read.cmd_type_len |= htole32(IGC_ADVTXD_DCMD_EOP | txd_flags);
+ pi->ipi_new_pidx = i;
+
+ return (0);
+}
+
+static void
+igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
+{
+ struct igc_adapter *adapter = arg;
+ struct igc_tx_queue *que = &adapter->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
+
+ IGC_WRITE_REG(&adapter->hw, IGC_TDT(txr->me), pidx);
+}
+
+static int
+igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+{
+ struct igc_adapter *adapter = arg;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct igc_tx_queue *que = &adapter->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
+
+ qidx_t processed = 0;
+ int updated;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ uint8_t status;
+
+ rs_cidx = txr->tx_rs_cidx;
+ if (rs_cidx == txr->tx_rs_pidx)
+ return (0);
+ cur = txr->tx_rsq[rs_cidx];
+ status = ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
+ updated = !!(status & IGC_TXD_STAT_DD);
+
+ if (!updated)
+ return (0);
+
+ /* If clear is false just let caller know that there
+ * are descriptors to reclaim */
+ if (!clear)
+ return (1);
+
+ prev = txr->tx_cidx_processed;
+ ntxd = scctx->isc_ntxd[0];
+ do {
+ MPASS(prev != cur);
+ delta = (int32_t)cur - (int32_t)prev;
+ if (delta < 0)
+ delta += ntxd;
+ MPASS(delta > 0);
+
+ processed += delta;
+ prev = cur;
+ rs_cidx = (rs_cidx + 1) & (ntxd-1);
+ if (rs_cidx == txr->tx_rs_pidx)
+ break;
+ cur = txr->tx_rsq[rs_cidx];
+ status = ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
+ } while ((status & IGC_TXD_STAT_DD));
+
+ txr->tx_rs_cidx = rs_cidx;
+ txr->tx_cidx_processed = prev;
+ return (processed);
+}
+
+static void
+igc_isc_rxd_refill(void *arg, if_rxd_update_t iru)
+{
+ struct igc_adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ uint16_t rxqid = iru->iru_qsidx;
+ struct igc_rx_queue *que = &sc->rx_queues[rxqid];
+ union igc_adv_rx_desc *rxd;
+ struct rx_ring *rxr = &que->rxr;
+ uint64_t *paddrs;
+ uint32_t next_pidx, pidx;
+ uint16_t count;
+ int i;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
+
+ for (i = 0, next_pidx = pidx; i < count; i++) {
+ rxd = (union igc_adv_rx_desc *)&rxr->rx_base[next_pidx];
+
+ rxd->read.pkt_addr = htole64(paddrs[i]);
+ if (++next_pidx == scctx->isc_nrxd[0])
+ next_pidx = 0;
+ }
+}
+
+static void
+igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
+{
+ struct igc_adapter *sc = arg;
+ struct igc_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
+
+ IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), pidx);
+}
+
+static int
+igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
+{
+ struct igc_adapter *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct igc_rx_queue *que = &sc->rx_queues[rxqid];
+ struct rx_ring *rxr = &que->rxr;
+ union igc_adv_rx_desc *rxd;
+ u32 staterr = 0;
+ int cnt, i;
+
+ for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
+ rxd = (union igc_adv_rx_desc *)&rxr->rx_base[i];
+ staterr = le32toh(rxd->wb.upper.status_error);
+
+ if ((staterr & IGC_RXD_STAT_DD) == 0)
+ break;
+ if (++i == scctx->isc_nrxd[0])
+ i = 0;
+ if (staterr & IGC_RXD_STAT_EOP)
+ cnt++;
+ }
+ return (cnt);
+}
+
+/****************************************************************
+ * Routine sends data which has been dma'ed into host memory
+ * to upper layer. Initialize ri structure.
+ *
+ * Returns 0 upon success, errno on failure
+ ***************************************************************/
+
+static int
+igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
+{
+ struct igc_adapter *adapter = arg;
+ if_softc_ctx_t scctx = adapter->shared;
+ struct igc_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
+ union igc_adv_rx_desc *rxd;
+
+ u16 pkt_info, len;
+ u16 vtag = 0;
+ u32 ptype;
+ u32 staterr = 0;
+ bool eop;
+ int i = 0;
+ int cidx = ri->iri_cidx;
+
+ do {
+ rxd = (union igc_adv_rx_desc *)&rxr->rx_base[cidx];
+ staterr = le32toh(rxd->wb.upper.status_error);
+ pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
+
+ MPASS ((staterr & IGC_RXD_STAT_DD) != 0);
+
+ len = le16toh(rxd->wb.upper.length);
+ ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGC_PKTTYPE_MASK;
+
+ ri->iri_len += len;
+ rxr->rx_bytes += ri->iri_len;
+
+ rxd->wb.upper.status_error = 0;
+ eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP);
+
+ vtag = le16toh(rxd->wb.upper.vlan);
+
+ /* Make sure bad packets are discarded */
+ if (eop && ((staterr & IGC_RXDEXT_STATERR_RXE) != 0)) {
+ adapter->dropped_pkts++;
+ ++rxr->rx_discarded;
+ return (EBADMSG);
+ }
+ ri->iri_frags[i].irf_flid = 0;
+ ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_len = len;
+
+ if (++cidx == scctx->isc_nrxd[0])
+ cidx = 0;
+#ifdef notyet
+ if (rxr->hdr_split == true) {
+ ri->iri_frags[i].irf_flid = 1;
+ ri->iri_frags[i].irf_idx = cidx;
+ if (++cidx == scctx->isc_nrxd[0])
+ cidx = 0;
+ }
+#endif
+ i++;
+ } while (!eop);
+
+ rxr->rx_packets++;
+
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ igc_rx_checksum(staterr, ri, ptype);
+
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (staterr & IGC_RXD_STAT_VP) != 0) {
+ ri->iri_vtag = vtag;
+ ri->iri_flags |= M_VLANTAG;
+ }
+ ri->iri_flowid =
+ le32toh(rxd->wb.lower.hi_dword.rss);
+ ri->iri_rsstype = igc_determine_rsstype(pkt_info);
+ ri->iri_nfrags = i;
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Verify that the hardware indicated that the checksum is valid.
+ * Inform the stack about the status of checksum so that stack
+ * doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+igc_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
+{
+ u16 status = (u16)staterr;
+ u8 errors = (u8) (staterr >> 24);
+
+ /* Ignore Checksum bit is set */
+ if (status & IGC_RXD_STAT_IXSM) {
+ ri->iri_csum_flags = 0;
+ return;
+ }
+
+ if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) {
+ u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ /* Did it pass? */
+ if (!(errors & IGC_RXD_ERR_TCPE)) {
+ ri->iri_csum_flags |= type;
+ ri->iri_csum_data = htons(0xffff);
+ }
+ }
+ return;
+}
+
+/********************************************************************
+ *
+ * Parse the packet type to determine the appropriate hash
+ *
+ ******************************************************************/
+static int
+igc_determine_rsstype(u16 pkt_info)
+{
+ switch (pkt_info & IGC_RXDADV_RSSTYPE_MASK) {
+ case IGC_RXDADV_RSSTYPE_IPV4_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case IGC_RXDADV_RSSTYPE_IPV4:
+ return M_HASHTYPE_RSS_IPV4;
+ case IGC_RXDADV_RSSTYPE_IPV6_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case IGC_RXDADV_RSSTYPE_IPV6_EX:
+ return M_HASHTYPE_RSS_IPV6_EX;
+ case IGC_RXDADV_RSSTYPE_IPV6:
+ return M_HASHTYPE_RSS_IPV6;
+ case IGC_RXDADV_RSSTYPE_IPV6_TCP_EX:
+ return M_HASHTYPE_RSS_TCP_IPV6_EX;
+ default:
+ return M_HASHTYPE_OPAQUE;
+ }
+}