aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/e1000/if_em.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/e1000/if_em.c')
-rw-r--r--sys/dev/e1000/if_em.c3189
1 files changed, 1398 insertions, 1791 deletions
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index b45e4784014d..1db90121ddb7 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -35,7 +35,6 @@
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#include "opt_inet.h"
-#include "opt_altq.h"
#endif
#include <sys/param.h>
@@ -55,9 +54,7 @@
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
-#if __FreeBSD_version >= 700029
#include <sys/eventhandler.h>
-#endif
#include <machine/bus.h>
#include <machine/resource.h>
@@ -80,6 +77,7 @@
#include <netinet/udp.h>
#include <machine/in_cksum.h>
+#include <dev/led/led.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
@@ -95,7 +93,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char em_driver_version[] = "6.9.25";
+char em_driver_version[] = "7.0.5";
/*********************************************************************
@@ -111,51 +109,6 @@ char em_driver_version[] = "6.9.25";
static em_vendor_info_t em_vendor_info_array[] =
{
/* Intel(R) PRO/1000 Network Connection */
- { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
- PCI_ANY_ID, PCI_ANY_ID, 0},
-
- { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
-
{ 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -238,10 +191,11 @@ static int em_shutdown(device_t);
static int em_suspend(device_t);
static int em_resume(device_t);
static void em_start(struct ifnet *);
-static void em_start_locked(struct ifnet *ifp);
-#if __FreeBSD_version >= 800000
+static void em_start_locked(struct ifnet *, struct tx_ring *);
+#ifdef EM_MULTIQUEUE
static int em_mq_start(struct ifnet *, struct mbuf *);
-static int em_mq_start_locked(struct ifnet *, struct mbuf *);
+static int em_mq_start_locked(struct ifnet *,
+ struct tx_ring *, struct mbuf *);
static void em_qflush(struct ifnet *);
#endif
static int em_ioctl(struct ifnet *, u_long, caddr_t);
@@ -252,55 +206,49 @@ static void em_media_status(struct ifnet *, struct ifmediareq *);
static int em_media_change(struct ifnet *);
static void em_identify_hardware(struct adapter *);
static int em_allocate_pci_resources(struct adapter *);
-static int em_allocate_legacy(struct adapter *adapter);
-static int em_allocate_msix(struct adapter *adapter);
+static int em_allocate_legacy(struct adapter *);
+static int em_allocate_msix(struct adapter *);
+static int em_allocate_queues(struct adapter *);
static int em_setup_msix(struct adapter *);
static void em_free_pci_resources(struct adapter *);
static void em_local_timer(void *);
-static int em_hardware_init(struct adapter *);
+static void em_reset(struct adapter *);
static void em_setup_interface(device_t, struct adapter *);
+
static void em_setup_transmit_structures(struct adapter *);
static void em_initialize_transmit_unit(struct adapter *);
+static int em_allocate_transmit_buffers(struct tx_ring *);
+static void em_free_transmit_structures(struct adapter *);
+static void em_free_transmit_buffers(struct tx_ring *);
+
static int em_setup_receive_structures(struct adapter *);
+static int em_allocate_receive_buffers(struct rx_ring *);
static void em_initialize_receive_unit(struct adapter *);
+static void em_free_receive_structures(struct adapter *);
+static void em_free_receive_buffers(struct rx_ring *);
+
static void em_enable_intr(struct adapter *);
static void em_disable_intr(struct adapter *);
-static void em_free_transmit_structures(struct adapter *);
-static void em_free_receive_structures(struct adapter *);
static void em_update_stats_counters(struct adapter *);
-static void em_txeof(struct adapter *);
-static void em_tx_purge(struct adapter *);
-static int em_allocate_receive_structures(struct adapter *);
-static int em_allocate_transmit_structures(struct adapter *);
-static int em_rxeof(struct adapter *, int);
+static bool em_txeof(struct tx_ring *);
+static int em_rxeof(struct rx_ring *, int);
#ifndef __NO_STRICT_ALIGNMENT
-static int em_fixup_rx(struct adapter *);
+static int em_fixup_rx(struct rx_ring *);
#endif
-static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
- struct mbuf *);
-static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
- u32 *, u32 *);
-#if __FreeBSD_version >= 700000
-static bool em_tso_setup(struct adapter *, struct mbuf *,
+static void em_receive_checksum(struct e1000_rx_desc *, struct mbuf *);
+static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *,
u32 *, u32 *);
-#endif /* FreeBSD_version >= 700000 */
+static bool em_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *);
static void em_set_promisc(struct adapter *);
static void em_disable_promisc(struct adapter *);
static void em_set_multi(struct adapter *);
static void em_print_hw_stats(struct adapter *);
static void em_update_link_status(struct adapter *);
-static int em_get_buf(struct adapter *, int);
-#if __FreeBSD_version >= 700029
+static void em_refresh_mbufs(struct rx_ring *, int);
static void em_register_vlan(void *, struct ifnet *, u16);
static void em_unregister_vlan(void *, struct ifnet *, u16);
static void em_setup_vlan_hw_support(struct adapter *);
-#endif
-static int em_xmit(struct adapter *, struct mbuf **);
-static void em_smartspeed(struct adapter *);
-static int em_82547_fifo_workaround(struct adapter *, int);
-static void em_82547_update_fifo_head(struct adapter *, int);
-static int em_82547_tx_fifo_reset(struct adapter *);
-static void em_82547_move_tail(void *);
+static int em_xmit(struct tx_ring *, struct mbuf **);
static int em_dma_malloc(struct adapter *, bus_size_t,
struct em_dma_alloc *, int);
static void em_dma_free(struct adapter *, struct em_dma_alloc *);
@@ -309,8 +257,6 @@ static void em_print_nvm_info(struct adapter *);
static int em_is_valid_ether_addr(u8 *);
static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
-static u32 em_fill_descriptors (bus_addr_t address, u32 length,
- PDESC_ARRAY desc_array);
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct adapter *, const char *,
const char *, struct em_int_delay_info *, int, int);
@@ -322,28 +268,20 @@ static void em_release_hw_control(struct adapter *);
static void em_get_wakeup(device_t);
static void em_enable_wakeup(device_t);
static int em_enable_phy_wakeup(struct adapter *);
+static void em_led_func(void *, int);
-#ifdef EM_LEGACY_IRQ
-static void em_intr(void *);
-#else /* FAST IRQ */
-#if __FreeBSD_version < 700000
-static void em_irq_fast(void *);
-#else
static int em_irq_fast(void *);
-#endif
/* MSIX handlers */
static void em_msix_tx(void *);
static void em_msix_rx(void *);
static void em_msix_link(void *);
-static void em_handle_rx(void *context, int pending);
static void em_handle_tx(void *context, int pending);
-
-static void em_handle_rxtx(void *context, int pending);
+static void em_handle_rx(void *context, int pending);
static void em_handle_link(void *context, int pending);
+
static void em_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
-#endif /* ~EM_LEGACY_IRQ */
#ifdef DEVICE_POLLING
static poll_handler_t em_poll;
@@ -368,7 +306,7 @@ static driver_t em_driver = {
"em", em_methods, sizeof(struct adapter),
};
-static devclass_t em_devclass;
+devclass_t em_devclass;
DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
MODULE_DEPEND(em, pci, 1, 1, 1);
MODULE_DEPEND(em, ether, 1, 1, 1);
@@ -388,31 +326,35 @@ MODULE_DEPEND(em, ether, 1, 1, 1);
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
-static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
-static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
-static int em_rxd = EM_DEFAULT_RXD;
-static int em_txd = EM_DEFAULT_TXD;
-static int em_smart_pwr_down = FALSE;
-/* Controls whether promiscuous also shows bad packets */
-static int em_debug_sbp = FALSE;
-/* Local switch for MSI/MSIX */
-static int em_enable_msi = TRUE;
-
TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
+
+static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
+static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
+
+static int em_rxd = EM_DEFAULT_RXD;
+static int em_txd = EM_DEFAULT_TXD;
TUNABLE_INT("hw.em.rxd", &em_rxd);
TUNABLE_INT("hw.em.txd", &em_txd);
+
+static int em_smart_pwr_down = FALSE;
TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
+
+/* Controls whether promiscuous also shows bad packets */
+static int em_debug_sbp = FALSE;
TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
-TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
-#ifndef EM_LEGACY_IRQ
+/* Local controls for MSI/MSIX */
+static int em_enable_msix = TRUE;
+static int em_msix_queues = 2; /* for 82574, can be 1 or 2 */
+TUNABLE_INT("hw.em.enable_msix", &em_enable_msix);
+TUNABLE_INT("hw.em.msix_queues", &em_msix_queues);
+
/* How many packets rxeof tries to clean at a time */
static int em_rx_process_limit = 100;
TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
-#endif
/* Flow control setting - default to FULL */
static int em_fc_setting = e1000_fc_full;
@@ -494,7 +436,6 @@ static int
em_attach(device_t dev)
{
struct adapter *adapter;
- int tsize, rsize;
int error = 0;
INIT_DEBUGOUT("em_attach: begin");
@@ -502,8 +443,6 @@ em_attach(device_t dev)
adapter = device_get_softc(dev);
adapter->dev = adapter->osdep.dev = dev;
EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
- EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
- EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
/* SYSCTL stuff */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
@@ -517,7 +456,6 @@ em_attach(device_t dev)
em_sysctl_stats, "I", "Statistics");
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
- callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
/* Determine hardware and mac info */
em_identify_hardware(adapter);
@@ -571,25 +509,21 @@ em_attach(device_t dev)
em_add_int_delay_sysctl(adapter, "tx_int_delay",
"transmit interrupt delay in usecs", &adapter->tx_int_delay,
E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
- if (adapter->hw.mac.type >= e1000_82540) {
- em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
- "receive interrupt delay limit in usecs",
- &adapter->rx_abs_int_delay,
- E1000_REGISTER(&adapter->hw, E1000_RADV),
- em_rx_abs_int_delay_dflt);
- em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
- "transmit interrupt delay limit in usecs",
- &adapter->tx_abs_int_delay,
- E1000_REGISTER(&adapter->hw, E1000_TADV),
- em_tx_abs_int_delay_dflt);
- }
+ em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
+ "receive interrupt delay limit in usecs",
+ &adapter->rx_abs_int_delay,
+ E1000_REGISTER(&adapter->hw, E1000_RADV),
+ em_rx_abs_int_delay_dflt);
+ em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
+ "transmit interrupt delay limit in usecs",
+ &adapter->tx_abs_int_delay,
+ E1000_REGISTER(&adapter->hw, E1000_TADV),
+ em_tx_abs_int_delay_dflt);
-#ifndef EM_LEGACY_IRQ
/* Sysctls for limiting the amount of work done in the taskqueue */
em_add_rx_process_limit(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
em_rx_process_limit);
-#endif
/*
* Validate number of transmit and receive descriptors. It
@@ -597,18 +531,15 @@ em_attach(device_t dev)
* of E1000_DBA_ALIGN.
*/
if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
- (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
- (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
- (em_txd < EM_MIN_TXD)) {
+ (em_txd > EM_MAX_TXD) || (em_txd < EM_MIN_TXD)) {
device_printf(dev, "Using %d TX descriptors instead of %d!\n",
EM_DEFAULT_TXD, em_txd);
adapter->num_tx_desc = EM_DEFAULT_TXD;
} else
adapter->num_tx_desc = em_txd;
+
if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
- (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
- (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
- (em_rxd < EM_MIN_RXD)) {
+ (em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) {
device_printf(dev, "Using %d RX descriptors instead of %d!\n",
EM_DEFAULT_RXD, em_rxd);
adapter->num_rx_desc = EM_DEFAULT_RXD;
@@ -618,10 +549,6 @@ em_attach(device_t dev)
adapter->hw.mac.autoneg = DO_AUTO_NEG;
adapter->hw.phy.autoneg_wait_to_complete = FALSE;
adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
- adapter->rx_buffer_len = 2048;
-
- e1000_init_script_state_82541(&adapter->hw, TRUE);
- e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
/* Copper options */
if (adapter->hw.phy.media_type == e1000_media_type_copper) {
@@ -643,29 +570,13 @@ em_attach(device_t dev)
*/
adapter->hw.mac.report_tx_early = 1;
- tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
- EM_DBA_ALIGN);
-
- /* Allocate Transmit Descriptor ring */
- if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
- device_printf(dev, "Unable to allocate tx_desc memory\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- adapter->tx_desc_base =
- (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
-
- rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
- EM_DBA_ALIGN);
-
- /* Allocate Receive Descriptor ring */
- if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
- device_printf(dev, "Unable to allocate rx_desc memory\n");
+ /*
+ ** Get queue/ring memory
+ */
+ if (em_allocate_queues(adapter)) {
error = ENOMEM;
- goto err_rx_desc;
+ goto err_pci;
}
- adapter->rx_desc_base =
- (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
/*
** Start from a known state, this is
@@ -685,7 +596,7 @@ em_attach(device_t dev)
device_printf(dev,
"The EEPROM Checksum Is Not Valid\n");
error = EIO;
- goto err_hw_init;
+ goto err_late;
}
}
@@ -694,45 +605,24 @@ em_attach(device_t dev)
device_printf(dev, "EEPROM read error while reading MAC"
" address\n");
error = EIO;
- goto err_hw_init;
+ goto err_late;
}
if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
device_printf(dev, "Invalid MAC address\n");
error = EIO;
- goto err_hw_init;
- }
-
- /* Initialize the hardware */
- if (em_hardware_init(adapter)) {
- device_printf(dev, "Unable to initialize the hardware\n");
- error = EIO;
- goto err_hw_init;
- }
-
- /* Allocate transmit descriptors and buffers */
- if (em_allocate_transmit_structures(adapter)) {
- device_printf(dev, "Could not setup transmit structures\n");
- error = ENOMEM;
- goto err_tx_struct;
- }
-
- /* Allocate receive descriptors and buffers */
- if (em_allocate_receive_structures(adapter)) {
- device_printf(dev, "Could not setup receive structures\n");
- error = ENOMEM;
- goto err_rx_struct;
+ goto err_late;
}
/*
** Do interrupt configuration
*/
- if (adapter->msi > 1) /* Do MSI/X */
+ if (adapter->msix > 1) /* Do MSIX */
error = em_allocate_msix(adapter);
else /* MSI or Legacy */
error = em_allocate_legacy(adapter);
if (error)
- goto err_rx_struct;
+ goto err_late;
/*
* Get Wake-on-Lan and Management info for later use
@@ -742,6 +632,8 @@ em_attach(device_t dev)
/* Setup OS specific network interface */
em_setup_interface(dev, adapter);
+ em_reset(adapter);
+
/* Initialize statistics */
em_update_stats_counters(adapter);
@@ -753,20 +645,11 @@ em_attach(device_t dev)
device_printf(dev,
"PHY reset is blocked due to SOL/IDER session.\n");
- /* Do we need workaround for 82544 PCI-X adapter? */
- if (adapter->hw.bus.type == e1000_bus_type_pcix &&
- adapter->hw.mac.type == e1000_82544)
- adapter->pcix_82544 = TRUE;
- else
- adapter->pcix_82544 = FALSE;
-
-#if __FreeBSD_version >= 700029
/* Register for VLAN events */
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-#endif
/* Non-AMT based hardware can now take control from firmware */
if (adapter->has_manage && !adapter->has_amt)
@@ -775,23 +658,19 @@ em_attach(device_t dev)
/* Tell the stack that the interface is not active */
adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ adapter->led_dev = led_create(em_led_func, adapter,
+ device_get_nameunit(dev));
+
INIT_DEBUGOUT("em_attach: end");
return (0);
-err_rx_struct:
+err_late:
em_free_transmit_structures(adapter);
-err_tx_struct:
-err_hw_init:
+ em_free_receive_structures(adapter);
em_release_hw_control(adapter);
- em_dma_free(adapter, &adapter->rxdma);
-err_rx_desc:
- em_dma_free(adapter, &adapter->txdma);
-err_tx_desc:
err_pci:
em_free_pci_resources(adapter);
- EM_TX_LOCK_DESTROY(adapter);
- EM_RX_LOCK_DESTROY(adapter);
EM_CORE_LOCK_DESTROY(adapter);
return (error);
@@ -816,11 +695,7 @@ em_detach(device_t dev)
INIT_DEBUGOUT("em_detach: begin");
/* Make sure VLANS are not using driver */
-#if __FreeBSD_version >= 700000
if (adapter->ifp->if_vlantrunk != NULL) {
-#else
- if (adapter->ifp->if_nvlans != 0) {
-#endif
device_printf(dev,"Vlan in use, detach first\n");
return (EBUSY);
}
@@ -831,27 +706,24 @@ em_detach(device_t dev)
#endif
EM_CORE_LOCK(adapter);
- EM_TX_LOCK(adapter);
adapter->in_detach = 1;
em_stop(adapter);
+ EM_CORE_UNLOCK(adapter);
+ EM_CORE_LOCK_DESTROY(adapter);
+
e1000_phy_hw_reset(&adapter->hw);
em_release_manageability(adapter);
+ em_release_hw_control(adapter);
- EM_TX_UNLOCK(adapter);
- EM_CORE_UNLOCK(adapter);
-
-#if __FreeBSD_version >= 700029
/* Unregister VLAN events */
if (adapter->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-#endif
ether_ifdetach(adapter->ifp);
callout_drain(&adapter->timer);
- callout_drain(&adapter->tx_fifo_timer);
em_free_pci_resources(adapter);
bus_generic_detach(dev);
@@ -860,22 +732,7 @@ em_detach(device_t dev)
em_free_transmit_structures(adapter);
em_free_receive_structures(adapter);
- /* Free Transmit Descriptor ring */
- if (adapter->tx_desc_base) {
- em_dma_free(adapter, &adapter->txdma);
- adapter->tx_desc_base = NULL;
- }
-
- /* Free Receive Descriptor ring */
- if (adapter->rx_desc_base) {
- em_dma_free(adapter, &adapter->rxdma);
- adapter->rx_desc_base = NULL;
- }
-
em_release_hw_control(adapter);
- EM_TX_LOCK_DESTROY(adapter);
- EM_RX_LOCK_DESTROY(adapter);
- EM_CORE_LOCK_DESTROY(adapter);
return (0);
}
@@ -917,6 +774,9 @@ em_resume(device_t dev)
struct adapter *adapter = device_get_softc(dev);
struct ifnet *ifp = adapter->ifp;
+ if (adapter->led_dev != NULL)
+ led_destroy(adapter->led_dev);
+
EM_CORE_LOCK(adapter);
em_init_locked(adapter);
em_init_manageability(adapter);
@@ -937,71 +797,60 @@ em_resume(device_t dev)
* the packet is requeued.
**********************************************************************/
-#if __FreeBSD_version >= 800000
+#ifdef EM_MULTIQUEUE
static int
-em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
+em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
{
- struct adapter *adapter = ifp->if_softc;
- struct mbuf *next;
- int error = E1000_SUCCESS;
+ struct adapter *adapter = txr->adapter;
+ struct mbuf *next;
+ int err = 0, enq = 0;
- EM_TX_LOCK_ASSERT(adapter);
- /* To allow being called from a tasklet */
- if (m == NULL)
- goto process;
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || adapter->link_active == 0) {
+ if (m != NULL)
+ err = drbr_enqueue(ifp, txr->br, m);
+ return (err);
+ }
- if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING)
- || (!adapter->link_active)) {
- error = drbr_enqueue(ifp, adapter->br, m);
- return (error);
- } else if (!drbr_needs_enqueue(ifp, adapter->br) &&
- (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
- if ((error = em_xmit(adapter, &m)) != 0) {
- if (m)
- error = drbr_enqueue(ifp, adapter->br, m);
- return (error);
- } else {
- /*
- * We've bypassed the buf ring so we need to update
- * ifp directly
- */
- drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
- /*
- ** Send a copy of the frame to the BPF
- ** listener and set the watchdog on.
- */
- ETHER_BPF_MTAP(ifp, m);
- adapter->watchdog_check = TRUE;
- }
- } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
- return (error);
-
-process:
- if (drbr_empty(ifp, adapter->br))
- return(error);
- /* Process the queue */
- while (TRUE) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
- next = drbr_dequeue(ifp, adapter->br);
- if (next == NULL)
- break;
- if ((error = em_xmit(adapter, &next)) != 0) {
- if (next != NULL)
- error = drbr_enqueue(ifp, adapter->br, next);
+ /* Call cleanup if number of TX descriptors low */
+ if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
+ em_txeof(txr);
+
+ enq = 0;
+ if (m == NULL) {
+ next = drbr_dequeue(ifp, txr->br);
+ } else if (drbr_needs_enqueue(ifp, txr->br)) {
+ if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
+ next = drbr_dequeue(ifp, txr->br);
+ } else
+ next = m;
+
+ /* Process the queue */
+ while (next != NULL) {
+ if ((err = em_xmit(txr, &next)) != 0) {
+ if (next != NULL)
+ err = drbr_enqueue(ifp, txr->br, next);
break;
}
+ enq++;
drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
- ETHER_BPF_MTAP(ifp, next);
- /* Set the watchdog */
- adapter->watchdog_check = TRUE;
- }
-
- if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ ETHER_BPF_MTAP(ifp, next);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ if (txr->tx_avail < EM_MAX_SCATTER) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+ next = drbr_dequeue(ifp, txr->br);
+ }
- return (error);
+ if (enq > 0) {
+ /* Set the watchdog */
+ txr->watchdog_check = TRUE;
+ txr->watchdog_time = ticks;
+ }
+ return (err);
}
/*
@@ -1011,50 +860,72 @@ process:
static int
em_mq_start(struct ifnet *ifp, struct mbuf *m)
{
-
- struct adapter *adapter = ifp->if_softc;
- int error = 0;
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr;
+ int i, error = 0;
+
+ /* Which queue to use */
+ if ((m->m_flags & M_FLOWID) != 0)
+ i = m->m_pkthdr.flowid % adapter->num_queues;
+ else
+ i = curcpu % adapter->num_queues;
+
+ txr = &adapter->tx_rings[i];
- if (EM_TX_TRYLOCK(adapter)) {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- error = em_mq_start_locked(ifp, m);
- EM_TX_UNLOCK(adapter);
+ if (EM_TX_TRYLOCK(txr)) {
+ error = em_mq_start_locked(ifp, txr, m);
+ EM_TX_UNLOCK(txr);
} else
- error = drbr_enqueue(ifp, adapter->br, m);
+ error = drbr_enqueue(ifp, txr->br, m);
return (error);
}
+/*
+** Flush all ring buffers
+*/
static void
em_qflush(struct ifnet *ifp)
{
- struct mbuf *m;
- struct adapter *adapter = (struct adapter *)ifp->if_softc;
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct mbuf *m;
- EM_TX_LOCK(adapter);
- while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
- m_freem(m);
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+ while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+ m_freem(m);
+ EM_TX_UNLOCK(txr);
+ }
if_qflush(ifp);
- EM_TX_UNLOCK(adapter);
}
-#endif /* FreeBSD_version */
+
+#endif /* EM_MULTIQUEUE */
static void
-em_start_locked(struct ifnet *ifp)
+em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
{
struct adapter *adapter = ifp->if_softc;
struct mbuf *m_head;
- EM_TX_LOCK_ASSERT(adapter);
+ EM_TX_LOCK_ASSERT(txr);
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
+
if (!adapter->link_active)
return;
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ /* Call cleanup if number of TX descriptors low */
+ if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
+ em_txeof(txr);
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ if (txr->tx_avail < EM_MAX_SCATTER) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
@@ -1062,7 +933,7 @@ em_start_locked(struct ifnet *ifp)
* Encapsulation can modify our pointer, and or make it
* NULL on failure. In that event, we can't requeue.
*/
- if (em_xmit(adapter, &m_head)) {
+ if (em_xmit(txr, &m_head)) {
if (m_head == NULL)
break;
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
@@ -1074,10 +945,9 @@ em_start_locked(struct ifnet *ifp)
ETHER_BPF_MTAP(ifp, m_head);
/* Set timeout in case hardware has problems transmitting. */
- adapter->watchdog_check = TRUE;
+ txr->watchdog_time = ticks;
+ txr->watchdog_check = TRUE;
}
- if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
return;
}
@@ -1085,12 +955,15 @@ em_start_locked(struct ifnet *ifp)
static void
em_start(struct ifnet *ifp)
{
- struct adapter *adapter = ifp->if_softc;
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
- EM_TX_LOCK(adapter);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- em_start_locked(ifp);
- EM_TX_UNLOCK(adapter);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ EM_TX_LOCK(txr);
+ em_start_locked(ifp, txr);
+ EM_TX_UNLOCK(txr);
+ }
+ return;
}
/*********************************************************************
@@ -1140,37 +1013,23 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
case SIOCSIFMTU:
{
int max_frame_size;
- u16 eeprom_data = 0;
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
EM_CORE_LOCK(adapter);
switch (adapter->hw.mac.type) {
- case e1000_82573:
- /*
- * 82573 only supports jumbo frames
- * if ASPM is disabled.
- */
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_3GIO_3, 1, &eeprom_data);
- if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
- max_frame_size = ETHER_MAX_LEN;
- break;
- }
- /* Allow Jumbo frames - fall thru */
case e1000_82571:
case e1000_82572:
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_82574:
- case e1000_80003es2lan: /* Limit Jumbo Frame size */
+ case e1000_80003es2lan: /* 9K Jumbo Frame size */
max_frame_size = 9234;
break;
case e1000_pchlan:
max_frame_size = 4096;
break;
/* Adapters that do not support jumbo frames */
- case e1000_82542:
case e1000_82583:
case e1000_ich8lan:
max_frame_size = ETHER_MAX_LEN;
@@ -1206,11 +1065,8 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
} else
em_init_locked(adapter);
} else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- EM_TX_LOCK(adapter);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
em_stop(adapter);
- EM_TX_UNLOCK(adapter);
- }
adapter->if_flags = ifp->if_flags;
EM_CORE_UNLOCK(adapter);
break;
@@ -1221,10 +1077,6 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
EM_CORE_LOCK(adapter);
em_disable_intr(adapter);
em_set_multi(adapter);
- if (adapter->hw.mac.type == e1000_82542 &&
- adapter->hw.revision_id == E1000_REVISION_2) {
- em_initialize_receive_unit(adapter);
- }
#ifdef DEVICE_POLLING
if (!(ifp->if_capenable & IFCAP_POLLING))
#endif
@@ -1278,22 +1130,18 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_HWCSUM;
reinit = 1;
}
-#if __FreeBSD_version >= 700000
if (mask & IFCAP_TSO4) {
ifp->if_capenable ^= IFCAP_TSO4;
reinit = 1;
}
-#endif
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
-
if (mask & IFCAP_VLAN_HWFILTER) {
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
reinit = 1;
}
-
if ((mask & IFCAP_WOL) &&
(ifp->if_capabilities & IFCAP_WOL) != 0) {
if (mask & IFCAP_WOL_MCAST)
@@ -1301,12 +1149,9 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
if (mask & IFCAP_WOL_MAGIC)
ifp->if_capenable ^= IFCAP_WOL_MAGIC;
}
-
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
em_init(adapter);
-#if __FreeBSD_version >= 700000
VLAN_CAPABILITIES(ifp);
-#endif
break;
}
@@ -1341,33 +1186,15 @@ em_init_locked(struct adapter *adapter)
EM_CORE_LOCK_ASSERT(adapter);
- EM_TX_LOCK(adapter);
- em_stop(adapter);
- EM_TX_UNLOCK(adapter);
+ em_disable_intr(adapter);
+ callout_stop(&adapter->timer);
/*
* Packet Buffer Allocation (PBA)
* Writing PBA sets the receive portion of the buffer
* the remainder is used for the transmit buffer.
- *
- * Devices before the 82547 had a Packet Buffer of 64K.
- * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
- * After the 82547 the buffer was reduced to 40K.
- * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
- * Note: default does not leave enough room for Jumbo Frame >10k.
*/
switch (adapter->hw.mac.type) {
- case e1000_82547:
- case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
- if (adapter->max_frame_size > 8192)
- pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
- else
- pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
- adapter->tx_fifo_head = 0;
- adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
- adapter->tx_fifo_size =
- (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
- break;
/* Total Packet Buffer on these is 48K */
case e1000_82571:
case e1000_82572:
@@ -1390,7 +1217,6 @@ em_init_locked(struct adapter *adapter)
pba = E1000_PBA_8K;
break;
default:
- /* Devices before 82547 had a Packet Buffer of 64K. */
if (adapter->max_frame_size > 8192)
pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
else
@@ -1420,14 +1246,13 @@ em_init_locked(struct adapter *adapter)
}
/* Initialize the hardware */
- if (em_hardware_init(adapter)) {
- device_printf(dev, "Unable to initialize the hardware\n");
- return;
- }
+ em_reset(adapter);
em_update_link_status(adapter);
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+
+ /* Use real VLAN Filter support? */
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
/* Use real VLAN Filter support */
@@ -1442,14 +1267,10 @@ em_init_locked(struct adapter *adapter)
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
- if (adapter->hw.mac.type >= e1000_82543) {
- if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 700000
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_TSO;
-#endif
- }
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
/* Configure for OS presence */
em_init_manageability(adapter);
@@ -1464,9 +1285,7 @@ em_init_locked(struct adapter *adapter)
/* Prepare receive descriptors and buffers */
if (em_setup_receive_structures(adapter)) {
device_printf(dev, "Could not setup receive structures\n");
- EM_TX_LOCK(adapter);
em_stop(adapter);
- EM_TX_UNLOCK(adapter);
return;
}
em_initialize_receive_unit(adapter);
@@ -1486,14 +1305,8 @@ em_init_locked(struct adapter *adapter)
tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
tmp |= E1000_CTRL_EXT_PBA_CLR;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
- /*
- ** Set the IVAR - interrupt vector routing.
- ** Each nibble represents a vector, high bit
- ** is enable, other 3 bits are the MSIX table
- ** entry, we map RXQ0 to 0, TXQ0 to 1, and
- ** Link (other) to 2, hence the magic number.
- */
- E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
+ /* Set the IVAR - interrupt vector routing. */
+ E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars);
}
#ifdef DEVICE_POLLING
@@ -1529,13 +1342,15 @@ em_init(void *arg)
#ifdef DEVICE_POLLING
/*********************************************************************
*
- * Legacy polling routine
+ * Legacy polling routine: note this only works with single queue
*
*********************************************************************/
static int
em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
u32 reg_icr, rx_done = 0;
EM_CORE_LOCK(adapter);
@@ -1546,147 +1361,42 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
if (cmd == POLL_AND_CHECK_STATUS) {
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
- /* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ callout_stop(&adapter->timer);
adapter->hw.mac.get_link_status = 1;
em_update_link_status(adapter);
+ callout_reset(&adapter->timer, hz,
+ em_local_timer, adapter);
}
- if (reg_icr & E1000_ICR_RXO)
- adapter->rx_overruns++;
}
EM_CORE_UNLOCK(adapter);
- rx_done = em_rxeof(adapter, count);
+ EM_RX_LOCK(rxr);
+ rx_done = em_rxeof(rxr, count);
+ EM_RX_UNLOCK(rxr);
- EM_TX_LOCK(adapter);
- em_txeof(adapter);
-#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, adapter->br))
- em_mq_start_locked(ifp, NULL);
+ EM_TX_LOCK(txr);
+ em_txeof(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
#else
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp);
+ em_start_locked(ifp, txr);
#endif
- EM_TX_UNLOCK(adapter);
+ EM_TX_UNLOCK(txr);
+
return (rx_done);
}
#endif /* DEVICE_POLLING */
-#ifdef EM_LEGACY_IRQ
-/*********************************************************************
- *
- * Legacy Interrupt Service routine
- *
- *********************************************************************/
-
-static void
-em_intr(void *arg)
-{
- struct adapter *adapter = arg;
- struct ifnet *ifp = adapter->ifp;
- u32 reg_icr;
-
-
- if (ifp->if_capenable & IFCAP_POLLING)
- return;
-
- EM_CORE_LOCK(adapter);
- reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
- if (reg_icr & E1000_ICR_RXO)
- adapter->rx_overruns++;
- if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
- (adapter->hw.mac.type >= e1000_82571 &&
- (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
- goto out;
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- goto out;
-
- if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
- callout_stop(&adapter->timer);
- adapter->hw.mac.get_link_status = 1;
- em_update_link_status(adapter);
- /* Deal with TX cruft when link lost */
- em_tx_purge(adapter);
- callout_reset(&adapter->timer, hz,
- em_local_timer, adapter);
- goto out;
- }
-
- EM_TX_LOCK(adapter);
- em_txeof(adapter);
- em_rxeof(adapter, -1);
- em_txeof(adapter);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp);
- EM_TX_UNLOCK(adapter);
-
-out:
- EM_CORE_UNLOCK(adapter);
- return;
-}
-
-#else /* EM_FAST_IRQ, then fast interrupt routines only */
-
-static void
-em_handle_link(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ifnet *ifp = adapter->ifp;
-
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- EM_CORE_LOCK(adapter);
- callout_stop(&adapter->timer);
- em_update_link_status(adapter);
- /* Deal with TX cruft when link lost */
- em_tx_purge(adapter);
- callout_reset(&adapter->timer, hz, em_local_timer, adapter);
- EM_CORE_UNLOCK(adapter);
-}
-
-
-/* Combined RX/TX handler, used by Legacy and MSI */
-static void
-em_handle_rxtx(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ifnet *ifp = adapter->ifp;
-
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
- taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
- EM_TX_LOCK(adapter);
- em_txeof(adapter);
-
-#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, adapter->br))
- em_mq_start_locked(ifp, NULL);
-#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp);
-#endif
- EM_TX_UNLOCK(adapter);
- }
-
- em_enable_intr(adapter);
-}
/*********************************************************************
*
* Fast Legacy/MSI Combined Interrupt Service routine
*
*********************************************************************/
-#if __FreeBSD_version < 700000
-#define FILTER_STRAY
-#define FILTER_HANDLED
-static void
-#else
static int
-#endif
em_irq_fast(void *arg)
{
struct adapter *adapter = arg;
@@ -1713,13 +1423,8 @@ em_irq_fast(void *arg)
(reg_icr & E1000_ICR_INT_ASSERTED) == 0)
return FILTER_STRAY;
- /*
- * Mask interrupts until the taskqueue is finished running. This is
- * cheap, just assume that it is needed. This also works around the
- * MSI message reordering errata on certain systems.
- */
em_disable_intr(adapter);
- taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
+ taskqueue_enqueue(adapter->tq, &adapter->que_task);
/* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
@@ -1732,30 +1437,64 @@ em_irq_fast(void *arg)
return FILTER_HANDLED;
}
+/* Combined RX/TX handler, used by Legacy and MSI */
+static void
+em_handle_que(void *context, int pending)
+{
+ struct adapter *adapter = context;
+ struct ifnet *ifp = adapter->ifp;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+ bool more_rx;
+
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ EM_RX_LOCK(rxr);
+ more_rx = em_rxeof(rxr, adapter->rx_process_limit);
+ EM_RX_UNLOCK(rxr);
+
+ EM_TX_LOCK(txr);
+ em_txeof(txr);
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
+#endif
+ EM_TX_UNLOCK(txr);
+ if (more_rx) {
+ taskqueue_enqueue(adapter->tq, &adapter->que_task);
+ return;
+ }
+ }
+
+ em_enable_intr(adapter);
+ return;
+}
+
+
/*********************************************************************
*
* MSIX Interrupt Service Routines
*
**********************************************************************/
-#define EM_MSIX_TX 0x00040000
-#define EM_MSIX_RX 0x00010000
-#define EM_MSIX_LINK 0x00100000
-
static void
em_msix_tx(void *arg)
{
- struct adapter *adapter = arg;
- struct ifnet *ifp = adapter->ifp;
-
- ++adapter->tx_irq;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- EM_TX_LOCK(adapter);
- em_txeof(adapter);
- EM_TX_UNLOCK(adapter);
- taskqueue_enqueue(adapter->tq, &adapter->tx_task);
- }
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
+ struct tx_ring *txr = arg;
+ struct adapter *adapter = txr->adapter;
+ bool more;
+
+ ++txr->tx_irq;
+ EM_TX_LOCK(txr);
+ more = em_txeof(txr);
+ EM_TX_UNLOCK(txr);
+ if (more)
+ taskqueue_enqueue(txr->tq, &txr->tx_task);
+ else
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
return;
}
@@ -1768,15 +1507,19 @@ em_msix_tx(void *arg)
static void
em_msix_rx(void *arg)
{
- struct adapter *adapter = arg;
- struct ifnet *ifp = adapter->ifp;
-
- ++adapter->rx_irq;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
- (em_rxeof(adapter, adapter->rx_process_limit) != 0))
- taskqueue_enqueue(adapter->tq, &adapter->rx_task);
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
+ struct rx_ring *rxr = arg;
+ struct adapter *adapter = rxr->adapter;
+ bool more;
+
+ EM_RX_LOCK(rxr);
+ ++rxr->rx_irq;
+ more = em_rxeof(rxr, adapter->rx_process_limit);
+ EM_RX_UNLOCK(rxr);
+ if (more)
+ taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ else
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
return;
}
@@ -1785,7 +1528,6 @@ em_msix_rx(void *arg)
* MSIX Link Fast Interrupt Service routine
*
**********************************************************************/
-
static void
em_msix_link(void *arg)
{
@@ -1798,45 +1540,70 @@ em_msix_link(void *arg)
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
adapter->hw.mac.get_link_status = 1;
taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
- }
- E1000_WRITE_REG(&adapter->hw, E1000_IMS,
- EM_MSIX_LINK | E1000_IMS_LSC);
+ } else
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+ EM_MSIX_LINK | E1000_IMS_LSC);
return;
}
static void
em_handle_rx(void *context, int pending)
{
- struct adapter *adapter = context;
- struct ifnet *ifp = adapter->ifp;
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
- (em_rxeof(adapter, adapter->rx_process_limit) != 0))
- taskqueue_enqueue(adapter->tq, &adapter->rx_task);
-
+ struct rx_ring *rxr = context;
+ struct adapter *adapter = rxr->adapter;
+ bool more;
+
+ EM_RX_LOCK(rxr);
+ more = em_rxeof(rxr, adapter->rx_process_limit);
+ EM_RX_UNLOCK(rxr);
+ if (more)
+ taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ else
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
}
static void
em_handle_tx(void *context, int pending)
{
- struct adapter *adapter = context;
+ struct tx_ring *txr = context;
+ struct adapter *adapter = txr->adapter;
struct ifnet *ifp = adapter->ifp;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (!EM_TX_TRYLOCK(adapter))
- return;
- em_txeof(adapter);
-#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, adapter->br))
- em_mq_start_locked(ifp, NULL);
+ if (!EM_TX_TRYLOCK(txr))
+ return;
+
+ em_txeof(txr);
+
+#ifdef EM_MULTIQUEUE
+ if (!drbr_empty(ifp, txr->br))
+ em_mq_start_locked(ifp, txr, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp, txr);
#endif
- EM_TX_UNLOCK(adapter);
- }
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
+ EM_TX_UNLOCK(txr);
}
-#endif /* EM_FAST_IRQ */
+
+static void
+em_handle_link(void *context, int pending)
+{
+ struct adapter *adapter = context;
+ struct ifnet *ifp = adapter->ifp;
+
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ return;
+
+ EM_CORE_LOCK(adapter);
+ callout_stop(&adapter->timer);
+ em_update_link_status(adapter);
+ callout_reset(&adapter->timer, hz, em_local_timer, adapter);
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+ EM_MSIX_LINK | E1000_IMS_LSC);
+ EM_CORE_UNLOCK(adapter);
+}
+
/*********************************************************************
*
@@ -1869,8 +1636,6 @@ em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
(adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
- if (adapter->hw.mac.type == e1000_82545)
- fiber_type = IFM_1000_LX;
ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (adapter->link_speed) {
@@ -1962,8 +1727,9 @@ em_media_change(struct ifnet *ifp)
**********************************************************************/
static int
-em_xmit(struct adapter *adapter, struct mbuf **m_headp)
+em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
{
+ struct adapter *adapter = txr->adapter;
bus_dma_segment_t segs[EM_MAX_SCATTER];
bus_dmamap_t map;
struct em_buffer *tx_buffer, *tx_buffer_mapped;
@@ -1972,31 +1738,10 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
u32 txd_upper, txd_lower, txd_used, txd_saved;
int nsegs, i, j, first, last = 0;
int error, do_tso, tso_desc = 0;
-#if __FreeBSD_version < 700000
- struct m_tag *mtag;
-#endif
+
m_head = *m_headp;
txd_upper = txd_lower = txd_used = txd_saved = 0;
-
-#if __FreeBSD_version >= 700000
do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
-#else
- do_tso = 0;
-#endif
-
- /*
- * Force a cleanup if number of TX descriptors
- * available hits the threshold
- */
- if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
- em_txeof(adapter);
- /* Now do we at least have a minimal? */
- if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
- adapter->no_tx_desc_avail1++;
- return (ENOBUFS);
- }
- }
-
/*
* TSO workaround:
@@ -2018,12 +1763,12 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
* of the EOP which is the only one that
* now gets a DONE bit writeback.
*/
- first = adapter->next_avail_tx_desc;
- tx_buffer = &adapter->tx_buffer_area[first];
+ first = txr->next_avail_desc;
+ tx_buffer = &txr->tx_buffers[first];
tx_buffer_mapped = tx_buffer;
map = tx_buffer->map;
- error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
+ error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
/*
@@ -2048,7 +1793,7 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
*m_headp = m;
/* Try it again */
- error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
+ error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error) {
@@ -2068,15 +1813,15 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
* it follows a TSO burst, then we need to add a
* sentinel descriptor to prevent premature writeback.
*/
- if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
+ if ((do_tso == 0) && (txr->tx_tso == TRUE)) {
if (nsegs == 1)
tso_desc = TRUE;
- adapter->tx_tso = FALSE;
+ txr->tx_tso = FALSE;
}
- if (nsegs > (adapter->num_tx_desc_avail - 2)) {
- adapter->no_tx_desc_avail2++;
- bus_dmamap_unload(adapter->txtag, map);
+ if (nsegs > (txr->tx_avail - 2)) {
+ txr->no_desc_avail++;
+ bus_dmamap_unload(txr->txtag, map);
return (ENOBUFS);
}
m_head = *m_headp;
@@ -2084,7 +1829,7 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
/* Do hardware assists */
#if __FreeBSD_version >= 700000
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
+ error = em_tso_setup(txr, m_head, &txd_upper, &txd_lower);
if (error != TRUE)
return (ENXIO); /* something foobar */
/* we need to make a final sentinel transmit desc */
@@ -2092,123 +1837,70 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
} else
#endif
if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
- em_transmit_checksum_setup(adapter, m_head,
+ em_transmit_checksum_setup(txr, m_head,
&txd_upper, &txd_lower);
- i = adapter->next_avail_tx_desc;
- if (adapter->pcix_82544)
- txd_saved = i;
+ i = txr->next_avail_desc;
/* Set up our transmit descriptors */
for (j = 0; j < nsegs; j++) {
bus_size_t seg_len;
bus_addr_t seg_addr;
- /* If adapter is 82544 and on PCIX bus */
- if(adapter->pcix_82544) {
- DESC_ARRAY desc_array;
- u32 array_elements, counter;
- /*
- * Check the Address and Length combination and
- * split the data accordingly
- */
- array_elements = em_fill_descriptors(segs[j].ds_addr,
- segs[j].ds_len, &desc_array);
- for (counter = 0; counter < array_elements; counter++) {
- if (txd_used == adapter->num_tx_desc_avail) {
- adapter->next_avail_tx_desc = txd_saved;
- adapter->no_tx_desc_avail2++;
- bus_dmamap_unload(adapter->txtag, map);
- return (ENOBUFS);
- }
- tx_buffer = &adapter->tx_buffer_area[i];
- ctxd = &adapter->tx_desc_base[i];
- ctxd->buffer_addr = htole64(
- desc_array.descriptor[counter].address);
- ctxd->lower.data = htole32(
- (adapter->txd_cmd | txd_lower | (u16)
- desc_array.descriptor[counter].length));
- ctxd->upper.data =
- htole32((txd_upper));
- last = i;
- if (++i == adapter->num_tx_desc)
- i = 0;
- tx_buffer->m_head = NULL;
- tx_buffer->next_eop = -1;
- txd_used++;
- }
+
+ tx_buffer = &txr->tx_buffers[i];
+ ctxd = &txr->tx_base[i];
+ seg_addr = segs[j].ds_addr;
+ seg_len = segs[j].ds_len;
+ /*
+ ** TSO Workaround:
+ ** If this is the last descriptor, we want to
+ ** split it so we have a small final sentinel
+ */
+ if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
+ seg_len -= 4;
+ ctxd->buffer_addr = htole64(seg_addr);
+ ctxd->lower.data = htole32(
+ adapter->txd_cmd | txd_lower | seg_len);
+ ctxd->upper.data =
+ htole32(txd_upper);
+ if (++i == adapter->num_tx_desc)
+ i = 0;
+ /* Now make the sentinel */
+ ++txd_used; /* using an extra txd */
+ ctxd = &txr->tx_base[i];
+ tx_buffer = &txr->tx_buffers[i];
+ ctxd->buffer_addr =
+ htole64(seg_addr + seg_len);
+ ctxd->lower.data = htole32(
+ adapter->txd_cmd | txd_lower | 4);
+ ctxd->upper.data =
+ htole32(txd_upper);
+ last = i;
+ if (++i == adapter->num_tx_desc)
+ i = 0;
} else {
- tx_buffer = &adapter->tx_buffer_area[i];
- ctxd = &adapter->tx_desc_base[i];
- seg_addr = segs[j].ds_addr;
- seg_len = segs[j].ds_len;
- /*
- ** TSO Workaround:
- ** If this is the last descriptor, we want to
- ** split it so we have a small final sentinel
- */
- if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
- seg_len -= 4;
- ctxd->buffer_addr = htole64(seg_addr);
- ctxd->lower.data = htole32(
- adapter->txd_cmd | txd_lower | seg_len);
- ctxd->upper.data =
- htole32(txd_upper);
- if (++i == adapter->num_tx_desc)
- i = 0;
- /* Now make the sentinel */
- ++txd_used; /* using an extra txd */
- ctxd = &adapter->tx_desc_base[i];
- tx_buffer = &adapter->tx_buffer_area[i];
- ctxd->buffer_addr =
- htole64(seg_addr + seg_len);
- ctxd->lower.data = htole32(
- adapter->txd_cmd | txd_lower | 4);
- ctxd->upper.data =
- htole32(txd_upper);
- last = i;
- if (++i == adapter->num_tx_desc)
- i = 0;
- } else {
- ctxd->buffer_addr = htole64(seg_addr);
- ctxd->lower.data = htole32(
- adapter->txd_cmd | txd_lower | seg_len);
- ctxd->upper.data =
- htole32(txd_upper);
- last = i;
- if (++i == adapter->num_tx_desc)
- i = 0;
- }
- tx_buffer->m_head = NULL;
- tx_buffer->next_eop = -1;
+ ctxd->buffer_addr = htole64(seg_addr);
+ ctxd->lower.data = htole32(
+ adapter->txd_cmd | txd_lower | seg_len);
+ ctxd->upper.data =
+ htole32(txd_upper);
+ last = i;
+ if (++i == adapter->num_tx_desc)
+ i = 0;
}
+ tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
}
- adapter->next_avail_tx_desc = i;
- if (adapter->pcix_82544)
- adapter->num_tx_desc_avail -= txd_used;
- else {
- adapter->num_tx_desc_avail -= nsegs;
- if (tso_desc) /* TSO used an extra for sentinel */
- adapter->num_tx_desc_avail -= txd_used;
- }
+ txr->next_avail_desc = i;
+ txr->tx_avail -= nsegs;
+ if (tso_desc) /* TSO used an extra for sentinel */
+ txr->tx_avail -= txd_used;
- /*
- ** Handle VLAN tag, this is the
- ** biggest difference between
- ** 6.x and 7
- */
-#if __FreeBSD_version < 700000
- /* Find out if we are in vlan mode. */
- mtag = VLAN_OUTPUT_TAG(ifp, m_head);
- if (mtag != NULL) {
- ctxd->upper.fields.special =
- htole16(VLAN_TAG_VALUE(mtag));
-#else /* FreeBSD 7 */
if (m_head->m_flags & M_VLANTAG) {
/* Set the vlan id. */
ctxd->upper.fields.special =
htole16(m_head->m_pkthdr.ether_vtag);
-#endif
/* Tell hardware to add tag */
ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
}
@@ -2216,7 +1908,7 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
tx_buffer->m_head = m_head;
tx_buffer_mapped->map = tx_buffer->map;
tx_buffer->map = map;
- bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
/*
* Last Descriptor of Packet
@@ -2229,146 +1921,20 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
* Keep track in the first buffer which
* descriptor will be written back
*/
- tx_buffer = &adapter->tx_buffer_area[first];
+ tx_buffer = &txr->tx_buffers[first];
tx_buffer->next_eop = last;
- adapter->watchdog_time = ticks;
/*
* Advance the Transmit Descriptor Tail (TDT), this tells the E1000
* that this frame is available to transmit.
*/
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- if (adapter->hw.mac.type == e1000_82547 &&
- adapter->link_duplex == HALF_DUPLEX)
- em_82547_move_tail(adapter);
- else {
- E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
- if (adapter->hw.mac.type == e1000_82547)
- em_82547_update_fifo_head(adapter,
- m_head->m_pkthdr.len);
- }
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
return (0);
}
-/*********************************************************************
- *
- * 82547 workaround to avoid controller hang in half-duplex environment.
- * The workaround is to avoid queuing a large packet that would span
- * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
- * in this case. We do that only when FIFO is quiescent.
- *
- **********************************************************************/
-static void
-em_82547_move_tail(void *arg)
-{
- struct adapter *adapter = arg;
- struct e1000_tx_desc *tx_desc;
- u16 hw_tdt, sw_tdt, length = 0;
- bool eop = 0;
-
- EM_TX_LOCK_ASSERT(adapter);
-
- hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
- sw_tdt = adapter->next_avail_tx_desc;
-
- while (hw_tdt != sw_tdt) {
- tx_desc = &adapter->tx_desc_base[hw_tdt];
- length += tx_desc->lower.flags.length;
- eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
- if (++hw_tdt == adapter->num_tx_desc)
- hw_tdt = 0;
-
- if (eop) {
- if (em_82547_fifo_workaround(adapter, length)) {
- adapter->tx_fifo_wrk_cnt++;
- callout_reset(&adapter->tx_fifo_timer, 1,
- em_82547_move_tail, adapter);
- break;
- }
- E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
- em_82547_update_fifo_head(adapter, length);
- length = 0;
- }
- }
-}
-
-static int
-em_82547_fifo_workaround(struct adapter *adapter, int len)
-{
- int fifo_space, fifo_pkt_len;
-
- fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
-
- if (adapter->link_duplex == HALF_DUPLEX) {
- fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
-
- if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
- if (em_82547_tx_fifo_reset(adapter))
- return (0);
- else
- return (1);
- }
- }
-
- return (0);
-}
-
-static void
-em_82547_update_fifo_head(struct adapter *adapter, int len)
-{
- int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
-
- /* tx_fifo_head is always 16 byte aligned */
- adapter->tx_fifo_head += fifo_pkt_len;
- if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
- adapter->tx_fifo_head -= adapter->tx_fifo_size;
- }
-}
-
-
-static int
-em_82547_tx_fifo_reset(struct adapter *adapter)
-{
- u32 tctl;
-
- if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
- E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
- (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
- E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
- (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
- E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
- (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
- /* Disable TX unit */
- tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
- E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
- tctl & ~E1000_TCTL_EN);
-
- /* Reset FIFO pointers */
- E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
- adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
- adapter->tx_head_addr);
-
- /* Re-enable TX unit */
- E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- adapter->tx_fifo_head = 0;
- adapter->tx_fifo_reset_cnt++;
-
- return (TRUE);
- }
- else {
- return (FALSE);
- }
-}
-
static void
em_set_promisc(struct adapter *adapter)
{
@@ -2492,13 +2058,10 @@ em_local_timer(void *arg)
{
struct adapter *adapter = arg;
struct ifnet *ifp = adapter->ifp;
+ struct tx_ring *txr = adapter->tx_rings;
EM_CORE_LOCK_ASSERT(adapter);
-#ifndef DEVICE_POLLING
- taskqueue_enqueue(adapter->tq,
- &adapter->rxtx_task);
-#endif
em_update_link_status(adapter);
em_update_stats_counters(adapter);
@@ -2509,26 +2072,31 @@ em_local_timer(void *arg)
if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
em_print_hw_stats(adapter);
- em_smartspeed(adapter);
-
/*
- * We check the watchdog: the time since
- * the last TX descriptor was cleaned.
- * This implies a functional TX engine.
- */
- if ((adapter->watchdog_check == TRUE) &&
- (ticks - adapter->watchdog_time > EM_WATCHDOG))
- goto hung;
+ ** Check for time since any descriptor was cleaned
+ */
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+ if (txr->watchdog_check == FALSE) {
+ EM_TX_UNLOCK(txr);
+ continue;
+ }
+ if ((ticks - txr->watchdog_time) > EM_WATCHDOG)
+ goto hung;
+ EM_TX_UNLOCK(txr);
+ }
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
return;
hung:
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
+ EM_TX_UNLOCK(txr);
em_init_locked(adapter);
}
+
static void
em_update_link_status(struct adapter *adapter)
{
@@ -2592,7 +2160,8 @@ em_update_link_status(struct adapter *adapter)
device_printf(dev, "Link is Down\n");
adapter->link_active = 0;
/* Link down, disable watchdog */
- adapter->watchdog_check = FALSE;
+ // JFV change later
+ //adapter->watchdog_check = FALSE;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
@@ -2611,22 +2180,30 @@ em_stop(void *arg)
{
struct adapter *adapter = arg;
struct ifnet *ifp = adapter->ifp;
+ struct tx_ring *txr = adapter->tx_rings;
EM_CORE_LOCK_ASSERT(adapter);
- EM_TX_LOCK_ASSERT(adapter);
INIT_DEBUGOUT("em_stop: begin");
em_disable_intr(adapter);
callout_stop(&adapter->timer);
- callout_stop(&adapter->tx_fifo_timer);
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ /* Unarm watchdog timer. */
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+ txr->watchdog_check = FALSE;
+ EM_TX_UNLOCK(txr);
+ }
+
e1000_reset_hw(&adapter->hw);
- if (adapter->hw.mac.type >= e1000_82544)
- E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+ E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+
+ e1000_led_off(&adapter->hw);
+ e1000_cleanup_led(&adapter->hw);
}
@@ -2672,7 +2249,7 @@ static int
em_allocate_pci_resources(struct adapter *adapter)
{
device_t dev = adapter->dev;
- int val, rid, error = E1000_SUCCESS;
+ int rid;
rid = PCIR_BAR(0);
adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
@@ -2687,58 +2264,17 @@ em_allocate_pci_resources(struct adapter *adapter)
rman_get_bushandle(adapter->memory);
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
- /* Only older adapters use IO mapping */
- if ((adapter->hw.mac.type > e1000_82543) &&
- (adapter->hw.mac.type < e1000_82571)) {
- /* Figure our where our IO BAR is ? */
- for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
- val = pci_read_config(dev, rid, 4);
- if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
- adapter->io_rid = rid;
- break;
- }
- rid += 4;
- /* check for 64bit BAR */
- if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
- rid += 4;
- }
- if (rid >= PCIR_CIS) {
- device_printf(dev, "Unable to locate IO BAR\n");
- return (ENXIO);
- }
- adapter->ioport = bus_alloc_resource_any(dev,
- SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
- if (adapter->ioport == NULL) {
- device_printf(dev, "Unable to allocate bus resource: "
- "ioport\n");
- return (ENXIO);
- }
- adapter->hw.io_base = 0;
- adapter->osdep.io_bus_space_tag =
- rman_get_bustag(adapter->ioport);
- adapter->osdep.io_bus_space_handle =
- rman_get_bushandle(adapter->ioport);
- }
-
- /*
- ** Init the resource arrays
- ** used by MSIX setup
- */
- for (int i = 0; i < 3; i++) {
- adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
- adapter->tag[i] = NULL;
- adapter->res[i] = NULL;
- }
+ /* Default to a single queue */
+ adapter->num_queues = 1;
/*
* Setup MSI/X or MSI if PCI Express
*/
- if (em_enable_msi)
- adapter->msi = em_setup_msix(adapter);
+ adapter->msix = em_setup_msix(adapter);
adapter->hw.back = &adapter->osdep;
- return (error);
+ return (0);
}
/*********************************************************************
@@ -2750,63 +2286,40 @@ int
em_allocate_legacy(struct adapter *adapter)
{
device_t dev = adapter->dev;
- int error;
+ int error, rid = 0;
/* Manually turn off all interrupts */
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
- /* Legacy RID is 0 */
- if (adapter->msi == 0)
- adapter->rid[0] = 0;
-
+ if (adapter->msix == 1) /* using MSI */
+ rid = 1;
/* We allocate a single interrupt resource */
- adapter->res[0] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res[0] == NULL) {
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (adapter->res == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"interrupt\n");
return (ENXIO);
}
-#ifdef EM_LEGACY_IRQ
- /* We do Legacy setup */
- if ((error = bus_setup_intr(dev, adapter->res[0],
-#if __FreeBSD_version > 700000
- INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
-#else /* 6.X */
- INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
-#endif
- &adapter->tag[0])) != 0) {
- device_printf(dev, "Failed to register interrupt handler");
- return (error);
- }
-
-#else /* FAST_IRQ */
/*
- * Try allocating a fast interrupt and the associated deferred
- * processing contexts.
+ * Allocate a fast interrupt and the associated
+ * deferred processing contexts.
*/
- TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
+ TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter);
TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &adapter->tq);
taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(adapter->dev));
-#if __FreeBSD_version < 700000
- if ((error = bus_setup_intr(dev, adapter->res[0],
- INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
-#else
- if ((error = bus_setup_intr(dev, adapter->res[0],
- INTR_TYPE_NET, em_irq_fast, NULL, adapter,
-#endif
- &adapter->tag[0])) != 0) {
+ if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET,
+ em_irq_fast, NULL, adapter, &adapter->tag)) != 0) {
device_printf(dev, "Failed to register fast interrupt "
"handler: %d\n", error);
taskqueue_free(adapter->tq);
adapter->tq = NULL;
return (error);
}
-#endif /* EM_LEGACY_IRQ */
return (0);
}
@@ -2821,80 +2334,109 @@ em_allocate_legacy(struct adapter *adapter)
int
em_allocate_msix(struct adapter *adapter)
{
- device_t dev = adapter->dev;
- int error;
+ device_t dev = adapter->dev;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+ int error, rid, vector = 0;
+
/* Make sure all interrupts are disabled */
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
- /* First get the resources */
- for (int i = 0; i < adapter->msi; i++) {
- adapter->res[i] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
- if (adapter->res[i] == NULL) {
+ /* First set up ring resources */
+ for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
+
+ /* RX ring */
+ rid = vector + 1;
+
+ rxr->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_ACTIVE);
+ if (rxr->res == NULL) {
device_printf(dev,
"Unable to allocate bus resource: "
- "MSIX Interrupt\n");
+ "RX MSIX Interrupt %d\n", i);
return (ENXIO);
}
+ if ((error = bus_setup_intr(dev, rxr->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx,
+ rxr, &rxr->tag)) != 0) {
+ device_printf(dev, "Failed to register RX handler");
+ return (error);
+ }
+ rxr->msix = vector++; /* NOTE increment vector for TX */
+ TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr);
+ rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT,
+ taskqueue_thread_enqueue, &rxr->tq);
+ taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
+ device_get_nameunit(adapter->dev));
+ /*
+ ** Set the bit to enable interrupt
+ ** in E1000_IMS -- bits 20 and 21
+ ** are for RX0 and RX1, note this has
+ ** NOTHING to do with the MSIX vector
+ */
+ rxr->ims = 1 << (20 + i);
+ adapter->ivars |= (8 | rxr->msix) << (i * 4);
+
+ /* TX ring */
+ rid = vector + 1;
+ txr->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_ACTIVE);
+ if (txr->res == NULL) {
+ device_printf(dev,
+ "Unable to allocate bus resource: "
+ "TX MSIX Interrupt %d\n", i);
+ return (ENXIO);
+ }
+ if ((error = bus_setup_intr(dev, txr->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx,
+ txr, &txr->tag)) != 0) {
+ device_printf(dev, "Failed to register TX handler");
+ return (error);
+ }
+ txr->msix = vector++; /* Increment vector for next pass */
+ TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
+ txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
+ taskqueue_thread_enqueue, &txr->tq);
+ taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
+ device_get_nameunit(adapter->dev));
+ /*
+ ** Set the bit to enable interrupt
+ ** in E1000_IMS -- bits 22 and 23
+ ** are for TX0 and TX1, note this has
+ ** NOTHING to do with the MSIX vector
+ */
+ txr->ims = 1 << (22 + i);
+ adapter->ivars |= (8 | txr->msix) << (8 + (i * 4));
}
- /*
- * Now allocate deferred processing contexts.
- */
- TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
- TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
- /*
- * Handle compatibility for msi case for deferral due to
- * trylock failure
- */
- TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
+ /* Link interrupt */
+ ++rid;
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_ACTIVE);
+ if (!adapter->res) {
+ device_printf(dev,"Unable to allocate "
+ "bus resource: Link interrupt [%d]\n", rid);
+ return (ENXIO);
+ }
+ /* Set the link handler function */
+ error = bus_setup_intr(dev, adapter->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ em_msix_link, adapter, &adapter->tag);
+ if (error) {
+ adapter->res = NULL;
+ device_printf(dev, "Failed to register LINK handler");
+ return (error);
+ }
+ adapter->linkvec = vector;
+ adapter->ivars |= (8 | vector) << 16;
+ adapter->ivars |= 0x80000000;
TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
- adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
+ adapter->tq = taskqueue_create_fast("em_link", M_NOWAIT,
taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
+ taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
device_get_nameunit(adapter->dev));
- /*
- * And setup the interrupt handlers
- */
-
- /* First slot to RX */
- if ((error = bus_setup_intr(dev, adapter->res[0],
-#if __FreeBSD_version > 700000
- INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
-#else /* 6.X */
- INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
-#endif
- &adapter->tag[0])) != 0) {
- device_printf(dev, "Failed to register RX handler");
- return (error);
- }
-
- /* Next TX */
- if ((error = bus_setup_intr(dev, adapter->res[1],
-#if __FreeBSD_version > 700000
- INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
-#else /* 6.X */
- INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
-#endif
- &adapter->tag[1])) != 0) {
- device_printf(dev, "Failed to register TX handler");
- return (error);
- }
-
- /* And Link */
- if ((error = bus_setup_intr(dev, adapter->res[2],
-#if __FreeBSD_version > 700000
- INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
-#else /* 6.X */
- INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
-#endif
- &adapter->tag[2])) != 0) {
- device_printf(dev, "Failed to register TX handler");
- return (error);
- }
-
return (0);
}
@@ -2902,36 +2444,56 @@ em_allocate_msix(struct adapter *adapter)
static void
em_free_pci_resources(struct adapter *adapter)
{
- device_t dev = adapter->dev;
+ device_t dev = adapter->dev;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rid;
- /* Make sure the for loop below runs once */
- if (adapter->msi == 0)
- adapter->msi = 1;
/*
- * First release all the interrupt resources:
- * notice that since these are just kept
- * in an array we can do the same logic
- * whether its MSIX or just legacy.
- */
- for (int i = 0; i < adapter->msi; i++) {
- if (adapter->tag[i] != NULL) {
- bus_teardown_intr(dev, adapter->res[i],
- adapter->tag[i]);
- adapter->tag[i] = NULL;
+ ** Release all the queue interrupt resources:
+ */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ txr = &adapter->tx_rings[i];
+ rxr = &adapter->rx_rings[i];
+ rid = txr->msix +1;
+ if (txr->tag != NULL) {
+ bus_teardown_intr(dev, txr->res, txr->tag);
+ txr->tag = NULL;
}
- if (adapter->res[i] != NULL) {
+ if (txr->res != NULL)
bus_release_resource(dev, SYS_RES_IRQ,
- adapter->rid[i], adapter->res[i]);
+ rid, txr->res);
+ rid = rxr->msix +1;
+ if (rxr->tag != NULL) {
+ bus_teardown_intr(dev, rxr->res, rxr->tag);
+ rxr->tag = NULL;
}
+ if (rxr->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ rid, rxr->res);
}
- if (adapter->msi)
+ if (adapter->linkvec) /* we are doing MSIX */
+ rid = adapter->linkvec + 1;
+ else
+ (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (adapter->tag != NULL) {
+ bus_teardown_intr(dev, adapter->res, adapter->tag);
+ adapter->tag = NULL;
+ }
+
+ if (adapter->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+
+ if (adapter->msix)
pci_release_msi(dev);
- if (adapter->msix != NULL)
+ if (adapter->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(EM_MSIX_BAR), adapter->msix);
+ PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
if (adapter->memory != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -2940,10 +2502,6 @@ em_free_pci_resources(struct adapter *adapter)
if (adapter->flash != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
EM_FLASH, adapter->flash);
-
- if (adapter->ioport != NULL)
- bus_release_resource(dev, SYS_RES_IOPORT,
- adapter->io_rid, adapter->ioport);
}
/*
@@ -2955,73 +2513,81 @@ em_setup_msix(struct adapter *adapter)
device_t dev = adapter->dev;
int val = 0;
- if (adapter->hw.mac.type < e1000_82571)
- return (0);
/* Setup MSI/X for Hartwell */
- if (adapter->hw.mac.type == e1000_82574) {
+ if ((adapter->hw.mac.type == e1000_82574) &&
+ (em_enable_msix == TRUE)) {
/* Map the MSIX BAR */
int rid = PCIR_BAR(EM_MSIX_BAR);
- adapter->msix = bus_alloc_resource_any(dev,
+ adapter->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!adapter->msix) {
+ if (!adapter->msix_mem) {
/* May not be enabled */
device_printf(adapter->dev,
"Unable to map MSIX table \n");
goto msi;
}
val = pci_msix_count(dev);
- /*
- ** 82574 can be configured for 5 but
- ** we limit use to 3.
- */
- if (val > 3) val = 3;
- if ((val) && pci_alloc_msix(dev, &val) == 0) {
- device_printf(adapter->dev,"Using MSIX interrupts\n");
- return (val);
+ if (val != 5) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
+ adapter->msix_mem = NULL;
+ device_printf(adapter->dev,
+ "MSIX vectors wrong, using MSI \n");
+ goto msi;
}
+ if (em_msix_queues == 2) {
+ val = 5;
+ adapter->num_queues = 2;
+ } else {
+ val = 3;
+ adapter->num_queues = 1;
+ }
+ if (pci_alloc_msix(dev, &val) == 0) {
+ device_printf(adapter->dev,
+ "Using MSIX interrupts "
+ "with %d vectors\n", val);
+ }
+
+ return (val);
}
msi:
val = pci_msi_count(dev);
if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
- adapter->msi = 1;
+ adapter->msix = 1;
device_printf(adapter->dev,"Using MSI interrupt\n");
return (val);
}
+ /* Should only happen due to manual invention */
+ device_printf(adapter->dev,"Setup MSIX failure\n");
return (0);
}
+
/*********************************************************************
*
* Initialize the hardware to a configuration
* as specified by the adapter structure.
*
**********************************************************************/
-static int
-em_hardware_init(struct adapter *adapter)
+static void
+em_reset(struct adapter *adapter)
{
- device_t dev = adapter->dev;
- u16 rx_buffer_size;
-
- INIT_DEBUGOUT("em_hardware_init: begin");
-
- /* Issue a global reset */
- e1000_reset_hw(&adapter->hw);
+ device_t dev = adapter->dev;
+ struct e1000_hw *hw = &adapter->hw;
+ u16 rx_buffer_size;
- /* When hardware is reset, fifo_head is also reset */
- adapter->tx_fifo_head = 0;
+ INIT_DEBUGOUT("em_reset: begin");
/* Set up smart power down as default off on newer adapters. */
- if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
- adapter->hw.mac.type == e1000_82572)) {
+ if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||
+ hw->mac.type == e1000_82572)) {
u16 phy_tmp = 0;
/* Speed up time to link by disabling smart power down. */
- e1000_read_phy_reg(&adapter->hw,
- IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
+ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
phy_tmp &= ~IGP02E1000_PM_SPD;
- e1000_write_phy_reg(&adapter->hw,
- IGP02E1000_PHY_POWER_MGMT, phy_tmp);
+ e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
}
/*
@@ -3038,37 +2604,42 @@ em_hardware_init(struct adapter *adapter)
* by 1500.
* - The pause time is fairly large at 1000 x 512ns = 512 usec.
*/
- rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
- 0xffff) << 10 );
+ rx_buffer_size = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10 );
- adapter->hw.fc.high_water = rx_buffer_size -
+ hw->fc.high_water = rx_buffer_size -
roundup2(adapter->max_frame_size, 1024);
- adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
+ hw->fc.low_water = hw->fc.high_water - 1500;
- if (adapter->hw.mac.type == e1000_80003es2lan)
- adapter->hw.fc.pause_time = 0xFFFF;
+ if (hw->mac.type == e1000_80003es2lan)
+ hw->fc.pause_time = 0xFFFF;
else
- adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
- adapter->hw.fc.send_xon = TRUE;
+ hw->fc.pause_time = EM_FC_PAUSE_TIME;
+
+ hw->fc.send_xon = TRUE;
/* Set Flow control, use the tunable location if sane */
if ((em_fc_setting >= 0) || (em_fc_setting < 4))
- adapter->hw.fc.requested_mode = em_fc_setting;
- else
- adapter->hw.fc.requested_mode = e1000_fc_none;
+ hw->fc.requested_mode = em_fc_setting;
+ else
+ hw->fc.requested_mode = e1000_fc_none;
/* Override - workaround for PCHLAN issue */
- if (adapter->hw.mac.type == e1000_pchlan)
- adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
+ if (hw->mac.type == e1000_pchlan)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
- if (e1000_init_hw(&adapter->hw) < 0) {
+ /* Issue a global reset */
+ e1000_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ if (e1000_init_hw(hw) < 0) {
device_printf(dev, "Hardware Initialization Failed\n");
- return (EIO);
+ return;
}
- e1000_check_for_link(&adapter->hw);
-
- return (0);
+ E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
+ e1000_get_phy_info(hw);
+ e1000_check_for_link(hw);
+ return;
}
/*********************************************************************
@@ -3101,42 +2672,26 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = ifp->if_capenable = 0;
-#if __FreeBSD_version >= 800000
+#ifdef EM_MULTIQUEUE
/* Multiqueue tx functions */
ifp->if_transmit = em_mq_start;
ifp->if_qflush = em_qflush;
- adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
#endif
- if (adapter->hw.mac.type >= e1000_82543) {
- int version_cap;
-#if __FreeBSD_version < 700000
- version_cap = IFCAP_HWCSUM;
-#else
- version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
-#endif
- ifp->if_capabilities |= version_cap;
- ifp->if_capenable |= version_cap;
- }
-#if __FreeBSD_version >= 700000
- /* Identify TSO capable adapters */
- if ((adapter->hw.mac.type > e1000_82544) &&
- (adapter->hw.mac.type != e1000_82547))
- ifp->if_capabilities |= IFCAP_TSO4;
- /*
- * By default only enable on PCI-E, this
- * can be overriden by ifconfig.
- */
- if (adapter->hw.mac.type >= e1000_82571)
- ifp->if_capenable |= IFCAP_TSO4;
-#endif
+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+
+ /* Enable TSO by default, can disable with ifconfig */
+ ifp->if_capabilities |= IFCAP_TSO4;
+ ifp->if_capenable |= IFCAP_TSO4;
+
/*
* Tell the upper layer(s) we
* support full VLAN capability
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capenable |= (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
+ ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
/*
** Dont turn this on by default, if vlans are
@@ -3144,7 +2699,7 @@ em_setup_interface(device_t dev, struct adapter *adapter)
** then vlan events are not passed thru, breaking
** operation, but with HW FILTER off it works. If
** using vlans directly on the em driver you can
- ** enable this and get full hardware tag filtering.
+ ** enable this and get full hardware tag filtering.
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
@@ -3152,10 +2707,10 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities |= IFCAP_POLLING;
#endif
- /* Limit WOL to MAGIC, not clear others are used */
+ /* Enable All WOL methods by default */
if (adapter->wol) {
- ifp->if_capabilities |= IFCAP_WOL_MAGIC;
- ifp->if_capenable |= IFCAP_WOL_MAGIC;
+ ifp->if_capabilities |= IFCAP_WOL;
+ ifp->if_capenable |= IFCAP_WOL;
}
/*
@@ -3168,8 +2723,6 @@ em_setup_interface(device_t dev, struct adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
u_char fiber_type = IFM_1000_SX; /* default type */
- if (adapter->hw.mac.type == e1000_82545)
- fiber_type = IFM_1000_LX;
ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
0, NULL);
ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
@@ -3193,67 +2746,6 @@ em_setup_interface(device_t dev, struct adapter *adapter)
}
-/*********************************************************************
- *
- * Workaround for SmartSpeed on 82541 and 82547 controllers
- *
- **********************************************************************/
-static void
-em_smartspeed(struct adapter *adapter)
-{
- u16 phy_tmp;
-
- if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
- adapter->hw.mac.autoneg == 0 ||
- (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
- return;
-
- if (adapter->smartspeed == 0) {
- /* If Master/Slave config fault is asserted twice,
- * we assume back-to-back */
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
- if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
- return;
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
- if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
- e1000_read_phy_reg(&adapter->hw,
- PHY_1000T_CTRL, &phy_tmp);
- if(phy_tmp & CR_1000T_MS_ENABLE) {
- phy_tmp &= ~CR_1000T_MS_ENABLE;
- e1000_write_phy_reg(&adapter->hw,
- PHY_1000T_CTRL, phy_tmp);
- adapter->smartspeed++;
- if(adapter->hw.mac.autoneg &&
- !e1000_copper_link_autoneg(&adapter->hw) &&
- !e1000_read_phy_reg(&adapter->hw,
- PHY_CONTROL, &phy_tmp)) {
- phy_tmp |= (MII_CR_AUTO_NEG_EN |
- MII_CR_RESTART_AUTO_NEG);
- e1000_write_phy_reg(&adapter->hw,
- PHY_CONTROL, phy_tmp);
- }
- }
- }
- return;
- } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
- /* If still no link, perhaps using 2/3 pair cable */
- e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
- phy_tmp |= CR_1000T_MS_ENABLE;
- e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
- if(adapter->hw.mac.autoneg &&
- !e1000_copper_link_autoneg(&adapter->hw) &&
- !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
- phy_tmp |= (MII_CR_AUTO_NEG_EN |
- MII_CR_RESTART_AUTO_NEG);
- e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
- }
- }
- /* Restart process after EM_SMARTSPEED_MAX iterations */
- if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
- adapter->smartspeed = 0;
-}
-
-
/*
* Manage DMA'able memory.
*/
@@ -3271,11 +2763,7 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size,
{
int error;
-#if __FreeBSD_version >= 700000
error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
-#else
- error = bus_dma_tag_create(NULL, /* parent */
-#endif
EM_DBA_ALIGN, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -3346,97 +2834,243 @@ em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
/*********************************************************************
*
+ * Allocate memory for the transmit and receive rings, and then
+ * the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+em_allocate_queues(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct tx_ring *txr = NULL;
+ struct rx_ring *rxr = NULL;
+ int rsize, tsize, error = E1000_SUCCESS;
+ int txconf = 0, rxconf = 0;
+
+
+ /* Allocate the TX ring struct memory */
+ if (!(adapter->tx_rings =
+ (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate TX ring memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* Now allocate the RX */
+ if (!(adapter->rx_rings =
+ (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate RX ring memory\n");
+ error = ENOMEM;
+ goto rx_fail;
+ }
+
+ tsize = roundup2(adapter->num_tx_desc *
+ sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
+ /*
+ * Now set up the TX queues, txconf is needed to handle the
+ * possibility that things fail midcourse and we need to
+ * undo memory gracefully
+ */
+ for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+ /* Set up some basics */
+ txr = &adapter->tx_rings[i];
+ txr->adapter = adapter;
+ txr->me = i;
+
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), txr->me);
+ mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+ if (em_dma_malloc(adapter, tsize,
+ &txr->txdma, BUS_DMA_NOWAIT)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+ txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
+ bzero((void *)txr->tx_base, tsize);
+
+ if (em_allocate_transmit_buffers(txr)) {
+ device_printf(dev,
+ "Critical Failure setting up transmit buffers\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+#if __FreeBSD_version >= 800000
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(4096, M_DEVBUF,
+ M_WAITOK, &txr->tx_mtx);
+#endif
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
+ for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+ rxr = &adapter->rx_rings[i];
+ rxr->adapter = adapter;
+ rxr->me = i;
+
+ /* Initialize the RX lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), txr->me);
+ mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (em_dma_malloc(adapter, rsize,
+ &rxr->rxdma, BUS_DMA_NOWAIT)) {
+ device_printf(dev,
+ "Unable to allocate RxDescriptor memory\n");
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
+ rxr->rx_base = (struct e1000_rx_desc *)rxr->rxdma.dma_vaddr;
+ bzero((void *)rxr->rx_base, rsize);
+
+ /* Allocate receive buffers for the ring*/
+ if (em_allocate_receive_buffers(rxr)) {
+ device_printf(dev,
+ "Critical Failure setting up receive buffers\n");
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
+ }
+
+ return (0);
+
+err_rx_desc:
+ for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+ em_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+ for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+ em_dma_free(adapter, &txr->txdma);
+ free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+ buf_ring_free(txr->br, M_DEVBUF);
+ free(adapter->tx_rings, M_DEVBUF);
+fail:
+ return (error);
+}
+
+
+/*********************************************************************
+ *
* Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire.
+ * the information needed to transmit a packet on the wire. This is
+ * called only once at attach, setup is done every reset.
*
**********************************************************************/
static int
-em_allocate_transmit_structures(struct adapter *adapter)
+em_allocate_transmit_buffers(struct tx_ring *txr)
{
+ struct adapter *adapter = txr->adapter;
device_t dev = adapter->dev;
- struct em_buffer *tx_buffer;
- int error;
+ struct em_buffer *txbuf;
+ int error, i;
/*
- * Create DMA tags for tx descriptors
+ * Setup DMA descriptor areas.
*/
-#if __FreeBSD_version >= 700000
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
-#else
- if ((error = bus_dma_tag_create(NULL, /* parent */
-#endif
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- EM_TSO_SIZE, /* maxsize */
- EM_MAX_SCATTER, /* nsegments */
- EM_TSO_SEG_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockarg */
- &adapter->txtag)) != 0) {
- device_printf(dev, "Unable to allocate TX DMA tag\n");
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ EM_TSO_SIZE, /* maxsize */
+ EM_MAX_SCATTER, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txr->txtag))) {
+ device_printf(dev,"Unable to allocate TX DMA tag\n");
goto fail;
}
- adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (adapter->tx_buffer_area == NULL) {
+ if (!(txr->tx_buffers =
+ (struct em_buffer *) malloc(sizeof(struct em_buffer) *
+ adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
error = ENOMEM;
goto fail;
}
- /* Create the descriptor buffer dma maps */
- for (int i = 0; i < adapter->num_tx_desc; i++) {
- tx_buffer = &adapter->tx_buffer_area[i];
- error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
+ /* Create the descriptor buffer dma maps */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+ error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
if (error != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
goto fail;
}
- tx_buffer->next_eop = -1;
}
- return (0);
+ return 0;
fail:
+ /* We free all, it handles case where we are in the middle */
em_free_transmit_structures(adapter);
return (error);
}
/*********************************************************************
*
- * (Re)Initialize transmit structures.
+ * Initialize a transmit ring.
*
**********************************************************************/
static void
-em_setup_transmit_structures(struct adapter *adapter)
+em_setup_transmit_ring(struct tx_ring *txr)
{
- struct em_buffer *tx_buffer;
+ struct adapter *adapter = txr->adapter;
+ struct em_buffer *txbuf;
+ int i;
- /* Clear the old ring contents */
- bzero(adapter->tx_desc_base,
- (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
-
- /* Free any existing TX buffers */
- for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- tx_buffer = &adapter->tx_buffer_area[i];
- bus_dmamap_sync(adapter->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(adapter->txtag, tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- tx_buffer->next_eop = -1;
- }
+ /* Clear the old descriptor contents */
+ EM_TX_LOCK(txr);
+ bzero((void *)txr->tx_base,
+ (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
+ /* Reset indices */
+ txr->next_avail_desc = 0;
+ txr->next_to_clean = 0;
+
+ /* Free any existing tx buffers. */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+ if (txbuf->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag, txbuf->map);
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ }
+ /* clear the watch index */
+ txbuf->next_eop = -1;
+ }
- /* Reset state */
- adapter->next_avail_tx_desc = 0;
- adapter->next_tx_to_clean = 0;
- adapter->num_tx_desc_avail = adapter->num_tx_desc;
+ /* Set number of descriptors available */
+ txr->tx_avail = adapter->num_tx_desc;
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ EM_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ * Initialize all transmit rings.
+ *
+ **********************************************************************/
+static void
+em_setup_transmit_structures(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ em_setup_transmit_ring(txr);
return;
}
@@ -3449,25 +3083,31 @@ em_setup_transmit_structures(struct adapter *adapter)
static void
em_initialize_transmit_unit(struct adapter *adapter)
{
+ struct tx_ring *txr = adapter->tx_rings;
+ struct e1000_hw *hw = &adapter->hw;
u32 tctl, tarc, tipg = 0;
- u64 bus_addr;
INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
- /* Setup the Base and Length of the Tx Descriptor Ring */
- bus_addr = adapter->txdma.dma_paddr;
- E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
- adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
- E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
- (u32)(bus_addr >> 32));
- E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
- (u32)bus_addr);
- /* Setup the HW Tx Head and Tail descriptor pointers */
- E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
- E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
-
- HW_DEBUGOUT2("Base = %x, Length = %x\n",
- E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
- E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ u64 bus_addr = txr->txdma.dma_paddr;
+ /* Base and Len of TX Ring */
+ E1000_WRITE_REG(hw, E1000_TDLEN(i),
+ adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAH(i),
+ (u32)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(i),
+ (u32)bus_addr);
+ /* Init the HEAD/TAIL indices */
+ E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+ HW_DEBUGOUT2("Base = %x, Length = %x\n",
+ E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
+ E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
+
+ txr->watchdog_check = FALSE;
+ }
/* Set the default values for the Tx Inter Packet Gap timer */
switch (adapter->hw.mac.type) {
@@ -3494,6 +3134,7 @@ em_initialize_transmit_unit(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
+
if(adapter->hw.mac.type >= e1000_82540)
E1000_WRITE_REG(&adapter->hw, E1000_TADV,
adapter->tx_abs_int_delay.value);
@@ -3512,6 +3153,10 @@ em_initialize_transmit_unit(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
}
+ adapter->txd_cmd = E1000_TXD_CMD_IFCS;
+ if (adapter->tx_int_delay.value > 0)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
/* Program the Transmit Control Register */
tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
@@ -3524,59 +3169,84 @@ em_initialize_transmit_unit(struct adapter *adapter)
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
- /* Setup Transmit Descriptor Base Settings */
- adapter->txd_cmd = E1000_TXD_CMD_IFCS;
-
- if (adapter->tx_int_delay.value > 0)
- adapter->txd_cmd |= E1000_TXD_CMD_IDE;
}
+
/*********************************************************************
*
- * Free all transmit related data structures.
+ * Free all transmit rings.
*
**********************************************************************/
static void
em_free_transmit_structures(struct adapter *adapter)
{
- struct em_buffer *tx_buffer;
+ struct tx_ring *txr = adapter->tx_rings;
- INIT_DEBUGOUT("free_transmit_structures: begin");
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ EM_TX_LOCK(txr);
+ em_free_transmit_buffers(txr);
+ em_dma_free(adapter, &txr->txdma);
+ EM_TX_UNLOCK(txr);
+ EM_TX_LOCK_DESTROY(txr);
+ }
- if (adapter->tx_buffer_area != NULL) {
- for (int i = 0; i < adapter->num_tx_desc; i++) {
- tx_buffer = &adapter->tx_buffer_area[i];
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(adapter->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(adapter->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- } else if (tx_buffer->map != NULL)
- bus_dmamap_unload(adapter->txtag,
- tx_buffer->map);
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(adapter->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
+ free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ * Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+em_free_transmit_buffers(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct em_buffer *txbuf;
+
+ INIT_DEBUGOUT("free_transmit_ring: begin");
+
+ if (txr->tx_buffers == NULL)
+ return;
+
+ for (int i = 0; i < adapter->num_tx_desc; i++) {
+ txbuf = &txr->tx_buffers[i];
+ if (txbuf->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ txbuf->map);
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ if (txbuf->map != NULL) {
+ bus_dmamap_destroy(txr->txtag,
+ txbuf->map);
+ txbuf->map = NULL;
}
+ } else if (txbuf->map != NULL) {
+ bus_dmamap_unload(txr->txtag,
+ txbuf->map);
+ bus_dmamap_destroy(txr->txtag,
+ txbuf->map);
+ txbuf->map = NULL;
}
}
- if (adapter->tx_buffer_area != NULL) {
- free(adapter->tx_buffer_area, M_DEVBUF);
- adapter->tx_buffer_area = NULL;
- }
- if (adapter->txtag != NULL) {
- bus_dma_tag_destroy(adapter->txtag);
- adapter->txtag = NULL;
- }
#if __FreeBSD_version >= 800000
- if (adapter->br != NULL)
- buf_ring_free(adapter->br, M_DEVBUF);
+ if (txr->br != NULL)
+ buf_ring_free(txr->br, M_DEVBUF);
#endif
+ if (txr->tx_buffers != NULL) {
+ free(txr->tx_buffers, M_DEVBUF);
+ txr->tx_buffers = NULL;
+ }
+ if (txr->txtag != NULL) {
+ bus_dma_tag_destroy(txr->txtag);
+ txr->txtag = NULL;
+ }
+ return;
}
+
/*********************************************************************
*
* The offload context needs to be set when we transfer the first
@@ -3588,22 +3258,23 @@ em_free_transmit_structures(struct adapter *adapter)
* big performance win. -jfv
**********************************************************************/
static void
-em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
+em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp,
u32 *txd_upper, u32 *txd_lower)
{
- struct e1000_context_desc *TXD = NULL;
+ struct adapter *adapter = txr->adapter;
+ struct e1000_context_desc *TXD = NULL;
struct em_buffer *tx_buffer;
struct ether_vlan_header *eh;
struct ip *ip = NULL;
struct ip6_hdr *ip6;
- int curr_txd, ehdrlen;
+ int cur, ehdrlen;
u32 cmd, hdr_len, ip_hlen;
u16 etype;
u8 ipproto;
cmd = hdr_len = ipproto = 0;
- curr_txd = adapter->next_avail_tx_desc;
+ cur = txr->next_avail_desc;
/*
* Determine where frame payload starts.
@@ -3636,7 +3307,7 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
* Offset of place to put the checksum.
*/
TXD = (struct e1000_context_desc *)
- &adapter->tx_desc_base[curr_txd];
+ &txr->tx_base[cur];
TXD->lower_setup.ip_fields.ipcss = ehdrlen;
TXD->lower_setup.ip_fields.ipcse =
htole16(ehdrlen + ip_hlen);
@@ -3678,16 +3349,16 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
/* no need for context if already set */
- if (adapter->last_hw_offload == CSUM_TCP)
+ if (txr->last_hw_offload == CSUM_TCP)
return;
- adapter->last_hw_offload = CSUM_TCP;
+ txr->last_hw_offload = CSUM_TCP;
/*
* Start offset for payload checksum calculation.
* End offset for payload checksum calculation.
* Offset of place to put the checksum.
*/
TXD = (struct e1000_context_desc *)
- &adapter->tx_desc_base[curr_txd];
+ &txr->tx_base[cur];
TXD->upper_setup.tcp_fields.tucss = hdr_len;
TXD->upper_setup.tcp_fields.tucse = htole16(0);
TXD->upper_setup.tcp_fields.tucso =
@@ -3701,16 +3372,16 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
/* no need for context if already set */
- if (adapter->last_hw_offload == CSUM_UDP)
+ if (txr->last_hw_offload == CSUM_UDP)
return;
- adapter->last_hw_offload = CSUM_UDP;
+ txr->last_hw_offload = CSUM_UDP;
/*
* Start offset for header checksum calculation.
* End offset for header checksum calculation.
* Offset of place to put the checksum.
*/
TXD = (struct e1000_context_desc *)
- &adapter->tx_desc_base[curr_txd];
+ &txr->tx_base[cur];
TXD->upper_setup.tcp_fields.tucss = hdr_len;
TXD->upper_setup.tcp_fields.tucse = htole16(0);
TXD->upper_setup.tcp_fields.tucso =
@@ -3725,35 +3396,35 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
TXD->tcp_seg_setup.data = htole32(0);
TXD->cmd_and_length =
htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
- tx_buffer = &adapter->tx_buffer_area[curr_txd];
+ tx_buffer = &txr->tx_buffers[cur];
tx_buffer->m_head = NULL;
tx_buffer->next_eop = -1;
- if (++curr_txd == adapter->num_tx_desc)
- curr_txd = 0;
+ if (++cur == adapter->num_tx_desc)
+ cur = 0;
- adapter->num_tx_desc_avail--;
- adapter->next_avail_tx_desc = curr_txd;
+ txr->tx_avail--;
+ txr->next_avail_desc = cur;
}
-#if __FreeBSD_version >= 700000
/**********************************************************************
*
* Setup work for hardware segmentation offload (TSO)
*
**********************************************************************/
static bool
-em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
+em_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *txd_upper,
u32 *txd_lower)
{
- struct e1000_context_desc *TXD;
- struct em_buffer *tx_buffer;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- struct tcphdr *th;
- int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
+ struct adapter *adapter = txr->adapter;
+ struct e1000_context_desc *TXD;
+ struct em_buffer *tx_buffer;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *th;
+ int cur, ehdrlen, hdr_len, ip_hlen, isip6;
u16 etype;
/*
@@ -3833,9 +3504,9 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
*txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
E1000_TXD_POPTS_TXSM) << 8;
- curr_txd = adapter->next_avail_tx_desc;
- tx_buffer = &adapter->tx_buffer_area[curr_txd];
- TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
+ cur = txr->next_avail_desc;
+ tx_buffer = &txr->tx_buffers[cur];
+ TXD = (struct e1000_context_desc *) &txr->tx_base[cur];
/* IPv6 doesn't have a header checksum. */
if (!isip6) {
@@ -3870,24 +3541,23 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
TXD->cmd_and_length = htole32(adapter->txd_cmd |
E1000_TXD_CMD_DEXT | /* Extended descr */
E1000_TXD_CMD_TSE | /* TSE context */
- (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
+ (isip6 ? 0 : E1000_TXD_CMD_IP) |
E1000_TXD_CMD_TCP | /* Do TCP checksum */
(mp->m_pkthdr.len - (hdr_len))); /* Total len */
tx_buffer->m_head = NULL;
tx_buffer->next_eop = -1;
- if (++curr_txd == adapter->num_tx_desc)
- curr_txd = 0;
+ if (++cur == adapter->num_tx_desc)
+ cur = 0;
- adapter->num_tx_desc_avail--;
- adapter->next_avail_tx_desc = curr_txd;
- adapter->tx_tso = TRUE;
+ txr->tx_avail--;
+ txr->next_avail_desc = cur;
+ txr->tx_tso = TRUE;
return TRUE;
}
-#endif /* __FreeBSD_version >= 700000 */
/**********************************************************************
*
@@ -3896,25 +3566,26 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
* tx_buffer is put back on the free queue.
*
**********************************************************************/
-static void
-em_txeof(struct adapter *adapter)
+static bool
+em_txeof(struct tx_ring *txr)
{
+ struct adapter *adapter = txr->adapter;
int first, last, done, num_avail;
struct em_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
- EM_TX_LOCK_ASSERT(adapter);
+ EM_TX_LOCK_ASSERT(txr);
- if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
- return;
+ if (txr->tx_avail == adapter->num_tx_desc)
+ return (FALSE);
- num_avail = adapter->num_tx_desc_avail;
- first = adapter->next_tx_to_clean;
- tx_desc = &adapter->tx_desc_base[first];
- tx_buffer = &adapter->tx_buffer_area[first];
+ num_avail = txr->tx_avail;
+ first = txr->next_to_clean;
+ tx_desc = &txr->tx_base[first];
+ tx_buffer = &txr->tx_buffers[first];
last = tx_buffer->next_eop;
- eop_desc = &adapter->tx_desc_base[last];
+ eop_desc = &txr->tx_base[last];
/*
* What this does is get the index of the
@@ -3926,7 +3597,7 @@ em_txeof(struct adapter *adapter)
last = 0;
done = last;
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
@@ -3939,38 +3610,38 @@ em_txeof(struct adapter *adapter)
if (tx_buffer->m_head) {
ifp->if_opackets++;
- bus_dmamap_sync(adapter->txtag,
+ bus_dmamap_sync(txr->txtag,
tx_buffer->map,
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(adapter->txtag,
+ bus_dmamap_unload(txr->txtag,
tx_buffer->map);
m_freem(tx_buffer->m_head);
tx_buffer->m_head = NULL;
}
tx_buffer->next_eop = -1;
- adapter->watchdog_time = ticks;
+ txr->watchdog_time = ticks;
if (++first == adapter->num_tx_desc)
first = 0;
- tx_buffer = &adapter->tx_buffer_area[first];
- tx_desc = &adapter->tx_desc_base[first];
+ tx_buffer = &txr->tx_buffers[first];
+ tx_desc = &txr->tx_base[first];
}
/* See if we can continue to the next packet */
last = tx_buffer->next_eop;
if (last != -1) {
- eop_desc = &adapter->tx_desc_base[last];
+ eop_desc = &txr->tx_base[last];
/* Get new done point */
if (++last == adapter->num_tx_desc) last = 0;
done = last;
} else
break;
}
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- adapter->next_tx_to_clean = first;
+ txr->next_to_clean = first;
/*
* If we have enough room, clear IFF_DRV_OACTIVE to
@@ -3980,88 +3651,87 @@ em_txeof(struct adapter *adapter)
if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if (num_avail == adapter->num_tx_desc) {
- adapter->watchdog_check = FALSE;
- adapter->num_tx_desc_avail = num_avail;
- return;
+ txr->watchdog_check = FALSE;
+ txr->tx_avail = num_avail;
+ return (FALSE);
}
}
- adapter->num_tx_desc_avail = num_avail;
- return;
+ txr->tx_avail = num_avail;
+ return (TRUE);
}
-/*********************************************************************
- *
- * When Link is lost sometimes there is work still in the TX ring
- * which may result in a watchdog, rather than allow that we do an
- * attempted cleanup and then reinit here. Note that this has been
- * seens mostly with fiber adapters.
- *
- **********************************************************************/
-static void
-em_tx_purge(struct adapter *adapter)
-{
- if ((!adapter->link_active) && (adapter->watchdog_check)) {
- EM_TX_LOCK(adapter);
- em_txeof(adapter);
- EM_TX_UNLOCK(adapter);
- if (adapter->watchdog_check) /* Still outstanding? */
- em_init_locked(adapter);
- }
-}
/*********************************************************************
*
- * Get a buffer from system mbuf buffer pool.
+ * Refresh RX descriptor mbufs from system mbuf buffer pool.
*
**********************************************************************/
-static int
-em_get_buf(struct adapter *adapter, int i)
+static void
+em_refresh_mbufs(struct rx_ring *rxr, int limit)
{
+ struct adapter *adapter = rxr->adapter;
struct mbuf *m;
bus_dma_segment_t segs[1];
bus_dmamap_t map;
- struct em_buffer *rx_buffer;
- int error, nsegs;
-
- m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL) {
- adapter->mbuf_cluster_failed++;
- return (ENOBUFS);
- }
- m->m_len = m->m_pkthdr.len = MCLBYTES;
-
- if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(m, ETHER_ALIGN);
+ struct em_buffer *rxbuf;
+ int i, error, nsegs, cleaned;
- /*
- * Using memory from the mbuf cluster pool, invoke the
- * bus_dma machinery to arrange the memory mapping.
- */
- error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
- adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- m_free(m);
- return (error);
- }
+ i = rxr->next_to_refresh;
+ cleaned = -1;
+ while (i != limit) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ goto update;
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
- /* If nsegs is wrong then the stack is corrupt. */
- KASSERT(nsegs == 1, ("Too many segments returned!"));
+ if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ m_adj(m, ETHER_ALIGN);
- rx_buffer = &adapter->rx_buffer_area[i];
- if (rx_buffer->m_head != NULL)
- bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
+ /*
+ * Using memory from the mbuf cluster pool, invoke the
+ * bus_dma machinery to arrange the memory mapping.
+ */
+ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxr->rx_sparemap,
+ m, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ m_free(m);
+ goto update;
+ }
- map = rx_buffer->map;
- rx_buffer->map = adapter->rx_sparemap;
- adapter->rx_sparemap = map;
- bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
- rx_buffer->m_head = m;
+ /* If nsegs is wrong then the stack is corrupt. */
+ KASSERT(nsegs == 1, ("Too many segments returned!"));
+
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->m_head != NULL)
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+
+ map = rxbuf->map;
+ rxbuf->map = rxr->rx_sparemap;
+ rxr->rx_sparemap = map;
+ bus_dmamap_sync(rxr->rxtag,
+ rxbuf->map, BUS_DMASYNC_PREREAD);
+ rxbuf->m_head = m;
+ rxr->rx_base[i].buffer_addr = htole64(segs[0].ds_addr);
+
+ cleaned = i;
+ /* Calculate next index */
+ if (++i == adapter->num_rx_desc)
+ i = 0;
+ /* This is the work marker for refresh */
+ rxr->next_to_refresh = i;
+ }
+update:
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (cleaned != -1) /* Update tail index */
+ E1000_WRITE_REG(&adapter->hw,
+ E1000_RDT(rxr->me), cleaned);
- adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
- return (0);
+ return;
}
+
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
@@ -4071,24 +3741,21 @@ em_get_buf(struct adapter *adapter, int i)
*
**********************************************************************/
static int
-em_allocate_receive_structures(struct adapter *adapter)
+em_allocate_receive_buffers(struct rx_ring *rxr)
{
- device_t dev = adapter->dev;
- struct em_buffer *rx_buffer;
- int i, error;
+ struct adapter *adapter = rxr->adapter;
+ device_t dev = adapter->dev;
+ struct em_buffer *rxbuf;
+ int error;
- adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
+ rxr->rx_buffers = malloc(sizeof(struct em_buffer) *
adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (adapter->rx_buffer_area == NULL) {
+ if (rxr->rx_buffers == NULL) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
return (ENOMEM);
}
-#if __FreeBSD_version >= 700000
error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
-#else
- error = bus_dma_tag_create(NULL, /* parent */
-#endif
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -4099,7 +3766,7 @@ em_allocate_receive_structures(struct adapter *adapter)
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
- &adapter->rxtag);
+ &rxr->rxtag);
if (error) {
device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
__func__, error);
@@ -4107,18 +3774,19 @@ em_allocate_receive_structures(struct adapter *adapter)
}
/* Create the spare map (used by getbuf) */
- error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
- &adapter->rx_sparemap);
+ error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
+ &rxr->rx_sparemap);
if (error) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
__func__, error);
goto fail;
}
- rx_buffer = adapter->rx_buffer_area;
- for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
- error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
- &rx_buffer->map);
+ rxbuf = rxr->rx_buffers;
+ for (int i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+ rxbuf = &rxr->rx_buffers[i];
+ error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
+ &rxbuf->map);
if (error) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
__func__, error);
@@ -4133,50 +3801,184 @@ fail:
return (error);
}
+
/*********************************************************************
*
- * (Re)initialize receive structures.
+ * Initialize a receive ring and its buffers.
*
**********************************************************************/
static int
-em_setup_receive_structures(struct adapter *adapter)
+em_setup_receive_ring(struct rx_ring *rxr)
{
- struct em_buffer *rx_buffer;
- int i, error;
-
- /* Reset descriptor ring */
- bzero(adapter->rx_desc_base,
- (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
-
- /* Free current RX buffers. */
- rx_buffer = adapter->rx_buffer_area;
- for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
- if (rx_buffer->m_head != NULL) {
- bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
+ struct adapter *adapter = rxr->adapter;
+ struct em_buffer *rxbuf;
+ bus_dma_segment_t seg[1];
+ int rsize, nsegs, error;
+
+
+ /* Clear the ring contents */
+ EM_RX_LOCK(rxr);
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
+ bzero((void *)rxr->rx_base, rsize);
+
+ /*
+ ** Free current RX buffer structs and their mbufs
+ */
+ for (int i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
- m_freem(rx_buffer->m_head);
- rx_buffer->m_head = NULL;
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ m_freem(rxbuf->m_head);
}
- }
+ }
- /* Allocate new ones. */
- for (i = 0; i < adapter->num_rx_desc; i++) {
- error = em_get_buf(adapter, i);
- if (error)
- return (error);
+ /* Now replenish the mbufs */
+ for (int j = 0; j != adapter->num_rx_desc; ++j) {
+
+ rxbuf = &rxr->rx_buffers[j];
+ rxbuf->m_head = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (rxbuf->m_head == NULL)
+ panic("RX ring hdr initialization failed!\n");
+ rxbuf->m_head->m_len = MCLBYTES;
+ rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */
+ rxbuf->m_head->m_pkthdr.len = MCLBYTES;
+
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
+ rxbuf->map, rxbuf->m_head, seg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0)
+ panic("RX ring dma initialization failed!\n");
+ bus_dmamap_sync(rxr->rxtag,
+ rxbuf->map, BUS_DMASYNC_PREREAD);
+
+ /* Update descriptor */
+ rxr->rx_base[j].buffer_addr = htole64(seg[0].ds_addr);
}
- /* Setup our descriptor pointers */
- adapter->next_rx_desc_to_check = 0;
- bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
+
+ /* Setup our descriptor indices */
+ rxr->next_to_check = 0;
+ rxr->next_to_refresh = 0;
+
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ EM_RX_UNLOCK(rxr);
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+em_setup_receive_structures(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+ int j;
+
+ for (j = 0; j < adapter->num_queues; j++, rxr++)
+ if (em_setup_receive_ring(rxr))
+ goto fail;
+
return (0);
+fail:
+ /*
+ * Free RX buffers allocated so far, we will only handle
+ * the rings that completed, the failing case will have
+ * cleaned up for itself. 'j' failed, so its the terminus.
+ */
+ for (int i = 0; i < j; ++i) {
+ rxr = &adapter->rx_rings[i];
+ for (int n = 0; n < adapter->num_rx_desc; n++) {
+ struct em_buffer *rxbuf;
+ rxbuf = &rxr->rx_buffers[n];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ m_freem(rxbuf->m_head);
+ rxbuf->m_head = NULL;
+ }
+ }
+ }
+
+ return (ENOBUFS);
}
/*********************************************************************
*
+ * Free all receive rings.
+ *
+ **********************************************************************/
+static void
+em_free_receive_structures(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ em_free_receive_buffers(rxr);
+ /* Free the ring memory as well */
+ em_dma_free(adapter, &rxr->rxdma);
+ EM_RX_LOCK_DESTROY(rxr);
+ }
+
+ free(adapter->rx_rings, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ * Free receive ring data structures
+ *
+ **********************************************************************/
+static void
+em_free_receive_buffers(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ struct em_buffer *rxbuf = NULL;
+
+ INIT_DEBUGOUT("free_receive_buffers: begin");
+
+ if (rxr->rx_sparemap) {
+ bus_dmamap_destroy(rxr->rxtag, rxr->rx_sparemap);
+ rxr->rx_sparemap = NULL;
+ }
+
+ if (rxr->rx_buffers != NULL) {
+ for (int i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->map != NULL) {
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
+ }
+ if (rxbuf->m_head != NULL) {
+ m_freem(rxbuf->m_head);
+ rxbuf->m_head = NULL;
+ }
+ }
+ free(rxr->rx_buffers, M_DEVBUF);
+ rxr->rx_buffers = NULL;
+ }
+
+ if (rxr->rxtag != NULL) {
+ bus_dma_tag_destroy(rxr->rxtag);
+ rxr->rxtag = NULL;
+ }
+
+ return;
+}
+
+
+/*********************************************************************
+ *
* Enable receive unit.
*
**********************************************************************/
@@ -4186,96 +3988,46 @@ em_setup_receive_structures(struct adapter *adapter)
static void
em_initialize_receive_unit(struct adapter *adapter)
{
+ struct rx_ring *rxr = adapter->rx_rings;
struct ifnet *ifp = adapter->ifp;
+ struct e1000_hw *hw = &adapter->hw;
u64 bus_addr;
u32 rctl, rxcsum;
- INIT_DEBUGOUT("em_initialize_receive_unit: begin");
+ INIT_DEBUGOUT("em_initialize_receive_units: begin");
/*
* Make sure receives are disabled while setting
* up the descriptor ring
*/
- rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
- if (adapter->hw.mac.type >= e1000_82540) {
- E1000_WRITE_REG(&adapter->hw, E1000_RADV,
- adapter->rx_abs_int_delay.value);
- /*
- * Set the interrupt throttling rate. Value is calculated
- * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
- */
- E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
- }
+ E1000_WRITE_REG(&adapter->hw, E1000_RADV,
+ adapter->rx_abs_int_delay.value);
+ /*
+ * Set the interrupt throttling rate. Value is calculated
+ * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
+ */
+ E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
/*
** When using MSIX interrupts we need to throttle
** using the EITR register (82574 only)
*/
- if (adapter->msix)
+ if (hw->mac.type == e1000_82574)
for (int i = 0; i < 4; i++)
- E1000_WRITE_REG(&adapter->hw,
- E1000_EITR_82574(i), DEFAULT_ITR);
+ E1000_WRITE_REG(hw, E1000_EITR_82574(i),
+ DEFAULT_ITR);
/* Disable accelerated ackknowledge */
if (adapter->hw.mac.type == e1000_82574)
- E1000_WRITE_REG(&adapter->hw,
- E1000_RFCTL, E1000_RFCTL_ACK_DIS);
-
- /* Setup the Base and Length of the Rx Descriptor Ring */
- bus_addr = adapter->rxdma.dma_paddr;
- E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
- adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
- E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
- (u32)(bus_addr >> 32));
- E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
- (u32)bus_addr);
-
- /* Setup the Receive Control Register */
- rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
- E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-
- /* Make sure VLAN Filters are off */
- rctl &= ~E1000_RCTL_VFE;
-
- if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
- rctl |= E1000_RCTL_SBP;
- else
- rctl &= ~E1000_RCTL_SBP;
-
- switch (adapter->rx_buffer_len) {
- default:
- case 2048:
- rctl |= E1000_RCTL_SZ_2048;
- break;
- case 4096:
- rctl |= E1000_RCTL_SZ_4096 |
- E1000_RCTL_BSEX | E1000_RCTL_LPE;
- break;
- case 8192:
- rctl |= E1000_RCTL_SZ_8192 |
- E1000_RCTL_BSEX | E1000_RCTL_LPE;
- break;
- case 16384:
- rctl |= E1000_RCTL_SZ_16384 |
- E1000_RCTL_BSEX | E1000_RCTL_LPE;
- break;
- }
-
- if (ifp->if_mtu > ETHERMTU)
- rctl |= E1000_RCTL_LPE;
- else
- rctl &= ~E1000_RCTL_LPE;
+ E1000_WRITE_REG(hw, E1000_RFCTL, E1000_RFCTL_ACK_DIS);
- /* Enable 82543 Receive Checksum Offload for TCP and UDP */
- if ((adapter->hw.mac.type >= e1000_82543) &&
- (ifp->if_capenable & IFCAP_RXCSUM)) {
- rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
+ if (ifp->if_capenable & IFCAP_RXCSUM) {
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
- E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
}
/*
@@ -4285,73 +4037,46 @@ em_initialize_receive_unit(struct adapter *adapter)
** values in RDTR is a known source of problems on other
** platforms another solution is being sought.
*/
- if (adapter->hw.mac.type == e1000_82573)
- E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
+ if (hw->mac.type == e1000_82573)
+ E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
- /* Enable Receives */
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
-
- /*
- * Setup the HW Rx Head and
- * Tail Descriptor Pointers
- */
- E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
- E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
-
- return;
-}
-
-/*********************************************************************
- *
- * Free receive related data structures.
- *
- **********************************************************************/
-static void
-em_free_receive_structures(struct adapter *adapter)
-{
- struct em_buffer *rx_buffer;
- int i;
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ bus_addr = rxr->rxdma.dma_paddr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(i),
+ adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
+ E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
+ /* Setup the Head and Tail Descriptor Pointers */
+ E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(i), adapter->num_rx_desc - 1);
+ }
- INIT_DEBUGOUT("free_receive_structures: begin");
+ /* Setup the Receive Control Register */
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
- if (adapter->rx_sparemap) {
- bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
- adapter->rx_sparemap = NULL;
- }
+ /* Strip the CRC */
+ rctl |= E1000_RCTL_SECRC;
- /* Cleanup any existing buffers */
- if (adapter->rx_buffer_area != NULL) {
- rx_buffer = adapter->rx_buffer_area;
- for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
- if (rx_buffer->m_head != NULL) {
- bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(adapter->rxtag,
- rx_buffer->map);
- m_freem(rx_buffer->m_head);
- rx_buffer->m_head = NULL;
- } else if (rx_buffer->map != NULL)
- bus_dmamap_unload(adapter->rxtag,
- rx_buffer->map);
- if (rx_buffer->map != NULL) {
- bus_dmamap_destroy(adapter->rxtag,
- rx_buffer->map);
- rx_buffer->map = NULL;
- }
- }
- }
+ /* Make sure VLAN Filters are off */
+ rctl &= ~E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_SBP;
+ rctl |= E1000_RCTL_SZ_2048;
+ if (ifp->if_mtu > ETHERMTU)
+ rctl |= E1000_RCTL_LPE;
+ else
+ rctl &= ~E1000_RCTL_LPE;
- if (adapter->rx_buffer_area != NULL) {
- free(adapter->rx_buffer_area, M_DEVBUF);
- adapter->rx_buffer_area = NULL;
- }
+ /* Write out the settings */
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- if (adapter->rxtag != NULL) {
- bus_dma_tag_destroy(adapter->rxtag);
- adapter->rxtag = NULL;
- }
+ return;
}
+
/*********************************************************************
*
* This routine executes in interrupt context. It replenishes
@@ -4364,184 +4089,134 @@ em_free_receive_structures(struct adapter *adapter)
* For polling we also now return the number of cleaned packets
*********************************************************************/
static int
-em_rxeof(struct adapter *adapter, int count)
+em_rxeof(struct rx_ring *rxr, int count)
{
- struct ifnet *ifp = adapter->ifp;;
- struct mbuf *mp;
- u8 status, accept_frame = 0, eop = 0;
- u16 len, desc_len, prev_len_adj;
- int i, rx_sent = 0;
- struct e1000_rx_desc *current_desc;
-
- EM_RX_LOCK(adapter);
- i = adapter->next_rx_desc_to_check;
- current_desc = &adapter->rx_desc_base[i];
- bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD);
-
- if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
- EM_RX_UNLOCK(adapter);
- return (rx_sent);
- }
+ struct adapter *adapter = rxr->adapter;
+ struct ifnet *ifp = adapter->ifp;
+ struct mbuf *mp, *sendmp;
+ u8 status = 0;
+ u16 len;
+ int i, processed, rxdone = 0;
+ bool eop;
+ struct e1000_rx_desc *cur;
- while ((current_desc->status & E1000_RXD_STAT_DD) &&
- (count != 0) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- struct mbuf *m = NULL;
+ EM_RX_LOCK_ASSERT(rxr);
- mp = adapter->rx_buffer_area[i].m_head;
- /*
- * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
- * needs to access the last received byte in the mbuf.
- */
- bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
- BUS_DMASYNC_POSTREAD);
-
- accept_frame = 1;
- prev_len_adj = 0;
- desc_len = le16toh(current_desc->length);
- status = current_desc->status;
- if (status & E1000_RXD_STAT_EOP) {
- count--;
- eop = 1;
- if (desc_len < ETHER_CRC_LEN) {
- len = 0;
- prev_len_adj = ETHER_CRC_LEN - desc_len;
- } else
- len = desc_len - ETHER_CRC_LEN;
- } else {
- eop = 0;
- len = desc_len;
- }
+ for (i = rxr->next_to_check, processed = 0; count != 0;) {
- if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
- u8 last_byte;
- u32 pkt_len = desc_len;
-
- if (adapter->fmp != NULL)
- pkt_len += adapter->fmp->m_pkthdr.len;
-
- last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
- if (TBI_ACCEPT(&adapter->hw, status,
- current_desc->errors, pkt_len, last_byte,
- adapter->min_frame_size, adapter->max_frame_size)) {
- e1000_tbi_adjust_stats_82543(&adapter->hw,
- &adapter->stats, pkt_len,
- adapter->hw.mac.addr,
- adapter->max_frame_size);
- if (len > 0)
- len--;
- } else
- accept_frame = 0;
- }
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
- if (accept_frame) {
- if (em_get_buf(adapter, i) != 0) {
- ifp->if_iqdrops++;
- goto discard;
- }
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cur = &rxr->rx_base[i];
+ status = cur->status;
+ mp = sendmp = NULL;
+
+ if ((status & E1000_RXD_STAT_DD) == 0)
+ break;
+
+ len = le16toh(cur->length);
+ eop = (status & E1000_RXD_STAT_EOP) != 0;
+ count--;
+
+ if ((cur->errors & E1000_RXD_ERR_FRAME_ERR_MASK) == 0) {
/* Assign correct length to the current fragment */
+ mp = rxr->rx_buffers[i].m_head;
mp->m_len = len;
- if (adapter->fmp == NULL) {
+ if (rxr->fmp == NULL) {
mp->m_pkthdr.len = len;
- adapter->fmp = mp; /* Store the first mbuf */
- adapter->lmp = mp;
+ rxr->fmp = mp; /* Store the first mbuf */
+ rxr->lmp = mp;
} else {
/* Chain mbuf's together */
mp->m_flags &= ~M_PKTHDR;
- /*
- * Adjust length of previous mbuf in chain if
- * we received less than 4 bytes in the last
- * descriptor.
- */
- if (prev_len_adj > 0) {
- adapter->lmp->m_len -= prev_len_adj;
- adapter->fmp->m_pkthdr.len -=
- prev_len_adj;
- }
- adapter->lmp->m_next = mp;
- adapter->lmp = adapter->lmp->m_next;
- adapter->fmp->m_pkthdr.len += len;
+ rxr->lmp->m_next = mp;
+ rxr->lmp = rxr->lmp->m_next;
+ rxr->fmp->m_pkthdr.len += len;
}
if (eop) {
- adapter->fmp->m_pkthdr.rcvif = ifp;
+ rxr->fmp->m_pkthdr.rcvif = ifp;
ifp->if_ipackets++;
- em_receive_checksum(adapter, current_desc,
- adapter->fmp);
+ em_receive_checksum(cur, rxr->fmp);
#ifndef __NO_STRICT_ALIGNMENT
if (adapter->max_frame_size >
(MCLBYTES - ETHER_ALIGN) &&
- em_fixup_rx(adapter) != 0)
+ em_fixup_rx(rxr) != 0)
goto skip;
#endif
if (status & E1000_RXD_STAT_VP) {
-#if __FreeBSD_version < 700000
- VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
- (le16toh(current_desc->special) &
- E1000_RXD_SPC_VLAN_MASK));
-#else
- adapter->fmp->m_pkthdr.ether_vtag =
- (le16toh(current_desc->special) &
+ rxr->fmp->m_pkthdr.ether_vtag =
+ (le16toh(cur->special) &
E1000_RXD_SPC_VLAN_MASK);
- adapter->fmp->m_flags |= M_VLANTAG;
-#endif
+ rxr->fmp->m_flags |= M_VLANTAG;
}
+#ifdef EM_MULTIQUEUE
+ rxr->fmp->m_pkthdr.flowid = curcpu;
+ rxr->fmp->m_flags |= M_FLOWID;
+#endif
#ifndef __NO_STRICT_ALIGNMENT
skip:
#endif
- m = adapter->fmp;
- adapter->fmp = NULL;
- adapter->lmp = NULL;
+ sendmp = rxr->fmp;
+ rxr->fmp = NULL;
+ rxr->lmp = NULL;
}
} else {
ifp->if_ierrors++;
-discard:
/* Reuse loaded DMA map and just update mbuf chain */
- mp = adapter->rx_buffer_area[i].m_head;
+ mp = rxr->rx_buffers[i].m_head;
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
if (adapter->max_frame_size <=
(MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN);
- if (adapter->fmp != NULL) {
- m_freem(adapter->fmp);
- adapter->fmp = NULL;
- adapter->lmp = NULL;
+ if (rxr->fmp != NULL) {
+ m_freem(rxr->fmp);
+ rxr->fmp = NULL;
+ rxr->lmp = NULL;
}
- m = NULL;
+ sendmp = NULL;
}
/* Zero out the receive descriptors status. */
- current_desc->status = 0;
- bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ cur->status = 0;
+ ++rxdone; /* cumulative for POLL */
+ ++processed;
/* Advance our pointers to the next descriptor. */
if (++i == adapter->num_rx_desc)
i = 0;
- /* Call into the stack */
- if (m != NULL) {
- adapter->next_rx_desc_to_check = i;
- EM_RX_UNLOCK(adapter);
- (*ifp->if_input)(ifp, m);
- EM_RX_LOCK(adapter);
- rx_sent++;
- i = adapter->next_rx_desc_to_check;
+
+ /* Send to the stack */
+ if (sendmp != NULL)
+ (*ifp->if_input)(ifp, sendmp);
+
+ /* Only refresh mbufs every 8 descriptors */
+ if (processed == 8) {
+ em_refresh_mbufs(rxr, i);
+ processed = 0;
}
- current_desc = &adapter->rx_desc_base[i];
}
- adapter->next_rx_desc_to_check = i;
-
- /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
- if (--i < 0)
- i = adapter->num_rx_desc - 1;
- E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
- EM_RX_UNLOCK(adapter);
- return (rx_sent);
+
+ /* Catch any remaining refresh work */
+ if (processed != 0) {
+ em_refresh_mbufs(rxr, i);
+ processed = 0;
+ }
+
+ rxr->next_to_check = i;
+
+#ifdef DEVICE_POLLING
+ return (rxdone);
+#else
+ return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
+#endif
}
#ifndef __NO_STRICT_ALIGNMENT
@@ -4560,13 +4235,14 @@ discard:
* not used at all on architectures with strict alignment.
*/
static int
-em_fixup_rx(struct adapter *adapter)
+em_fixup_rx(struct rx_ring *rxr)
{
+ struct adapter *adapter = rxr->adapter;
struct mbuf *m, *n;
int error;
error = 0;
- m = adapter->fmp;
+ m = rxr->fmp;
if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
@@ -4579,11 +4255,11 @@ em_fixup_rx(struct adapter *adapter)
n->m_len = ETHER_HDR_LEN;
M_MOVE_PKTHDR(n, m);
n->m_next = m;
- adapter->fmp = n;
+ rxr->fmp = n;
} else {
adapter->dropped_pkts++;
- m_freem(adapter->fmp);
- adapter->fmp = NULL;
+ m_freem(rxr->fmp);
+ rxr->fmp = NULL;
error = ENOMEM;
}
}
@@ -4600,13 +4276,10 @@ em_fixup_rx(struct adapter *adapter)
*
*********************************************************************/
static void
-em_receive_checksum(struct adapter *adapter,
- struct e1000_rx_desc *rx_desc, struct mbuf *mp)
+em_receive_checksum(struct e1000_rx_desc *rx_desc, struct mbuf *mp)
{
- /* 82543 or newer only */
- if ((adapter->hw.mac.type < e1000_82543) ||
- /* Ignore Checksum bit is set */
- (rx_desc->status & E1000_RXD_STAT_IXSM)) {
+ /* Ignore Checksum bit is set */
+ if (rx_desc->status & E1000_RXD_STAT_IXSM) {
mp->m_pkthdr.csum_flags = 0;
return;
}
@@ -4633,7 +4306,6 @@ em_receive_checksum(struct adapter *adapter,
}
}
-#if __FreeBSD_version >= 700029
/*
* This routine is run via an vlan
* config EVENT
@@ -4720,7 +4392,6 @@ em_setup_vlan_hw_support(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
adapter->max_frame_size + VLAN_TAG_SIZE);
}
-#endif
static void
em_enable_intr(struct adapter *adapter)
@@ -4728,7 +4399,7 @@ em_enable_intr(struct adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 ims_mask = IMS_ENABLE_MASK;
- if (adapter->msix) {
+ if (hw->mac.type == e1000_82574) {
E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
ims_mask |= EM_MSIX_MASK;
}
@@ -4740,7 +4411,7 @@ em_disable_intr(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- if (adapter->msix)
+ if (hw->mac.type == e1000_82574)
E1000_WRITE_REG(hw, EM_EIAC, 0);
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
}
@@ -4763,15 +4434,12 @@ em_init_manageability(struct adapter *adapter)
manc &= ~(E1000_MANC_ARP_EN);
/* enable receiving management packets to the host */
- if (adapter->hw.mac.type >= e1000_82571) {
- manc |= E1000_MANC_EN_MNG2HOST;
+ manc |= E1000_MANC_EN_MNG2HOST;
#define E1000_MNG2HOST_PORT_623 (1 << 5)
#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
- E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
- }
-
+ manc2h |= E1000_MNG2HOST_PORT_623;
+ manc2h |= E1000_MNG2HOST_PORT_664;
+ E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
}
}
@@ -4788,9 +4456,7 @@ em_release_manageability(struct adapter *adapter)
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
-
- if (adapter->hw.mac.type >= e1000_82571)
- manc &= ~E1000_MANC_EN_MNG2HOST;
+ manc &= ~E1000_MANC_EN_MNG2HOST;
E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
}
@@ -4874,20 +4540,10 @@ em_get_wakeup(device_t dev)
apme_mask = EM_EEPROM_APME;
switch (adapter->hw.mac.type) {
- case e1000_82542:
- case e1000_82543:
- break;
- case e1000_82544:
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
- apme_mask = EM_82544_APME;
- break;
case e1000_82573:
case e1000_82583:
adapter->has_amt = TRUE;
/* Falls thru */
- case e1000_82546:
- case e1000_82546_rev_3:
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
@@ -4921,11 +4577,6 @@ em_get_wakeup(device_t dev)
*/
device_id = pci_get_device(dev);
switch (device_id) {
- case E1000_DEV_ID_82546GB_PCIE:
- adapter->wol = 0;
- break;
- case E1000_DEV_ID_82546EB_FIBER:
- case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
@@ -4933,7 +4584,6 @@ em_get_wakeup(device_t dev)
E1000_STATUS_FUNC_1)
adapter->wol = 0;
break;
- case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
@@ -4952,7 +4602,7 @@ em_get_wakeup(device_t dev)
/*
* Enable PCI Wake On Lan capability
*/
-void
+static void
em_enable_wakeup(device_t dev)
{
struct adapter *adapter = device_get_softc(dev);
@@ -4969,7 +4619,6 @@ em_enable_wakeup(device_t dev)
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
- /* ICH workaround code */
if ((adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_pchlan) ||
(adapter->hw.mac.type == e1000_ich9lan) ||
@@ -5105,60 +4754,20 @@ out:
return ret;
}
-
-/*********************************************************************
-* 82544 Coexistence issue workaround.
-* There are 2 issues.
-* 1. Transmit Hang issue.
-* To detect this issue, following equation can be used...
-* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
-* If SUM[3:0] is in between 1 to 4, we will have this issue.
-*
-* 2. DAC issue.
-* To detect this issue, following equation can be used...
-* SIZE[3:0] + ADDR[2:0] = SUM[3:0].
-* If SUM[3:0] is in between 9 to c, we will have this issue.
-*
-*
-* WORKAROUND:
-* Make sure we do not have ending address
-* as 1,2,3,4(Hang) or 9,a,b,c (DAC)
-*
-*************************************************************************/
-static u32
-em_fill_descriptors (bus_addr_t address, u32 length,
- PDESC_ARRAY desc_array)
+static void
+em_led_func(void *arg, int onoff)
{
- u32 safe_terminator;
-
- /* Since issue is sensitive to length and address.*/
- /* Let us first check the address...*/
- if (length <= 4) {
- desc_array->descriptor[0].address = address;
- desc_array->descriptor[0].length = length;
- desc_array->elements = 1;
- return (desc_array->elements);
- }
- safe_terminator = (u32)((((u32)address & 0x7) +
- (length & 0xF)) & 0xF);
- /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
- if (safe_terminator == 0 ||
- (safe_terminator > 4 &&
- safe_terminator < 9) ||
- (safe_terminator > 0xC &&
- safe_terminator <= 0xF)) {
- desc_array->descriptor[0].address = address;
- desc_array->descriptor[0].length = length;
- desc_array->elements = 1;
- return (desc_array->elements);
+ struct adapter *adapter = arg;
+
+ EM_CORE_LOCK(adapter);
+ if (onoff) {
+ e1000_setup_led(&adapter->hw);
+ e1000_led_on(&adapter->hw);
+ } else {
+ e1000_led_off(&adapter->hw);
+ e1000_cleanup_led(&adapter->hw);
}
-
- desc_array->descriptor[0].address = address;
- desc_array->descriptor[0].length = length - 4;
- desc_array->descriptor[1].address = address + (length - 4);
- desc_array->descriptor[1].length = 4;
- desc_array->elements = 2;
- return (desc_array->elements);
+ EM_CORE_UNLOCK(adapter);
}
/**********************************************************************
@@ -5270,6 +4879,8 @@ em_print_debug_info(struct adapter *adapter)
{
device_t dev = adapter->dev;
u8 *hw_addr = adapter->hw.hw_addr;
+ struct rx_ring *rxr = adapter->rx_rings;
+ struct tx_ring *txr = adapter->tx_rings;
device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
@@ -5287,29 +4898,33 @@ em_print_debug_info(struct adapter *adapter)
device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
E1000_READ_REG(&adapter->hw, E1000_RDTR),
E1000_READ_REG(&adapter->hw, E1000_RADV));
- device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
- (long long)adapter->tx_fifo_wrk_cnt,
- (long long)adapter->tx_fifo_reset_cnt);
- device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
- E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
- E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
- device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
- E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
- E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
- device_printf(dev, "Num Tx descriptors avail = %d\n",
- adapter->num_tx_desc_avail);
- device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
- adapter->no_tx_desc_avail1);
- device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
- adapter->no_tx_desc_avail2);
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ device_printf(dev, "Queue(%d) tdh = %d, tdt = %d\n", i,
+ E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
+ E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
+ device_printf(dev, "TX(%d) no descriptors avail event = %ld\n",
+ txr->me, txr->no_desc_avail);
+ device_printf(dev, "TX(%d) MSIX IRQ Handled = %ld\n",
+ txr->me, txr->tx_irq);
+ device_printf(dev, "Num Tx descriptors avail = %d\n",
+ txr->tx_avail);
+ device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
+ txr->no_desc_avail);
+ }
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ device_printf(dev, "RX(%d) MSIX IRQ Handled = %ld\n",
+ rxr->me, rxr->rx_irq);
+ device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
+ E1000_READ_REG(&adapter->hw, E1000_RDH(i)),
+ E1000_READ_REG(&adapter->hw, E1000_RDT(i)));
+ }
device_printf(dev, "Std mbuf failed = %ld\n",
adapter->mbuf_alloc_failed);
device_printf(dev, "Std mbuf cluster failed = %ld\n",
adapter->mbuf_cluster_failed);
device_printf(dev, "Driver dropped packets = %ld\n",
adapter->dropped_pkts);
- device_printf(dev, "Driver tx dma failure in encap = %ld\n",
- adapter->no_tx_dma_setup);
}
static void
@@ -5342,12 +4957,8 @@ em_print_hw_stats(struct adapter *adapter)
(long long)adapter->stats.algnerrc);
device_printf(dev, "Collision/Carrier extension errors = %lld\n",
(long long)adapter->stats.cexterr);
- device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
device_printf(dev, "watchdog timeouts = %ld\n",
adapter->watchdog_events);
- device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
- " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
- adapter->tx_irq , adapter->link_irq);
device_printf(dev, "XON Rcvd = %lld\n",
(long long)adapter->stats.xonrxc);
device_printf(dev, "XON Xmtd = %lld\n",
@@ -5451,9 +5062,7 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
struct em_int_delay_info *info;
struct adapter *adapter;
u32 regval;
- int error;
- int usecs;
- int ticks;
+ int error, usecs, ticks;
info = (struct em_int_delay_info *)arg1;
usecs = info->value;
@@ -5502,7 +5111,6 @@ em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
info, 0, em_sysctl_int_delay, "I", description);
}
-#ifndef EM_LEGACY_IRQ
static void
em_add_rx_process_limit(struct adapter *adapter, const char *name,
const char *description, int *limit, int value)
@@ -5512,6 +5120,5 @@ em_add_rx_process_limit(struct adapter *adapter, const char *name,
SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
-#endif