aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/e1000/if_em.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/e1000/if_em.c')
-rw-r--r--sys/dev/e1000/if_em.c127
1 files changed, 72 insertions, 55 deletions
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 247cf9d7fed3..bc841ba87ceb 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -1582,7 +1582,7 @@ em_if_init(if_ctx_t ctx)
E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
/* Clear bad data from Rx FIFOs */
- if (sc->hw.mac.type >= igb_mac_min)
+ if (sc->hw.mac.type >= igb_mac_min && !sc->vf_ifp)
e1000_rx_fifo_flush_base(&sc->hw);
/* Configure for OS presence */
@@ -1602,7 +1602,9 @@ em_if_init(if_ctx_t ctx)
/* Don't lose promiscuous settings */
em_if_set_promisc(ctx, if_getflags(ifp));
- e1000_clear_hw_cntrs_base_generic(&sc->hw);
+
+ if (sc->hw.mac.ops.clear_hw_cntrs != NULL)
+ sc->hw.mac.ops.clear_hw_cntrs(&sc->hw);
/* MSI-X configuration for 82574 */
if (sc->hw.mac.type == e1000_82574) {
@@ -2349,7 +2351,7 @@ em_if_stop(if_ctx_t ctx)
em_flush_desc_rings(sc);
e1000_reset_hw(&sc->hw);
- if (sc->hw.mac.type >= e1000_82544)
+ if (sc->hw.mac.type >= e1000_82544 && !sc->vf_ifp)
E1000_WRITE_REG(&sc->hw, E1000_WUFC, 0);
e1000_led_off(&sc->hw);
@@ -2408,6 +2410,9 @@ em_allocate_pci_resources(if_ctx_t ctx)
}
sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
+#ifdef INVARIANTS
+ sc->osdep.mem_bus_space_size = rman_get_size(sc->memory);
+#endif
sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
/* Only older adapters use IO mapping */
@@ -3075,9 +3080,13 @@ em_reset(if_ctx_t ctx)
case e1000_82573:
pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
break;
+ /* 82574/82583: Total Packet Buffer is 40K */
case e1000_82574:
case e1000_82583:
- pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
+ if (hw->mac.max_frame_size > 8192)
+ pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
+ else
+ pba = E1000_PBA_32K; /* 32K for RX, 8K for Tx */
break;
case e1000_ich8lan:
pba = E1000_PBA_8K;
@@ -3259,11 +3268,13 @@ em_reset(if_ctx_t ctx)
/* Issue a global reset */
e1000_reset_hw(hw);
- if (hw->mac.type >= igb_mac_min) {
- E1000_WRITE_REG(hw, E1000_WUC, 0);
- } else {
- E1000_WRITE_REG(hw, E1000_WUFC, 0);
- em_disable_aspm(sc);
+ if (!sc->vf_ifp) {
+ if (hw->mac.type >= igb_mac_min) {
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ } else {
+ E1000_WRITE_REG(hw, E1000_WUFC, 0);
+ em_disable_aspm(sc);
+ }
}
if (sc->flags & IGB_MEDIA_RESET) {
e1000_setup_init_funcs(hw, true);
@@ -3404,12 +3415,8 @@ igb_initialize_rss_mapping(struct e1000_softc *sc)
*/
mrqc = E1000_MRQC_ENABLE_RSS_MQ;
-#ifdef RSS
/* XXX ew typecasting */
rss_getkey((uint8_t *) &rss_key);
-#else
- arc4rand(&rss_key, sizeof(rss_key), 0);
-#endif
for (i = 0; i < 10; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key[i]);
@@ -3813,7 +3820,7 @@ em_initialize_receive_unit(if_ctx_t ctx)
sc->rx_int_delay.value);
}
- if (hw->mac.type >= em_mac_min) {
+ if (hw->mac.type >= em_mac_min && !sc->vf_ifp) {
uint32_t rfctl;
/* Use extended rx descriptor formats */
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
@@ -3833,33 +3840,38 @@ em_initialize_receive_unit(if_ctx_t ctx)
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
}
- /* Set up L3 and L4 csum Rx descriptor offloads */
- rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
- rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
- if (hw->mac.type > e1000_82575)
- rxcsum |= E1000_RXCSUM_CRCOFL;
- else if (hw->mac.type < em_mac_min &&
- if_getcapenable(ifp) & IFCAP_HWCSUM_IPV6)
- rxcsum |= E1000_RXCSUM_IPV6OFL;
- } else {
- rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
- if (hw->mac.type > e1000_82575)
- rxcsum &= ~E1000_RXCSUM_CRCOFL;
- else if (hw->mac.type < em_mac_min)
- rxcsum &= ~E1000_RXCSUM_IPV6OFL;
- }
+ /*
+ * Set up L3 and L4 csum Rx descriptor offloads only on Physical
+ * Functions. Virtual Functions have no access to this register.
+ */
+ if (!sc->vf_ifp) {
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+ if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
+ rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
+ if (hw->mac.type > e1000_82575)
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+ else if (hw->mac.type < em_mac_min &&
+ if_getcapenable(ifp) & IFCAP_HWCSUM_IPV6)
+ rxcsum |= E1000_RXCSUM_IPV6OFL;
+ } else {
+ rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ if (hw->mac.type > e1000_82575)
+ rxcsum &= ~E1000_RXCSUM_CRCOFL;
+ else if (hw->mac.type < em_mac_min)
+ rxcsum &= ~E1000_RXCSUM_IPV6OFL;
+ }
- if (sc->rx_num_queues > 1) {
- /* RSS hash needed in the Rx descriptor */
- rxcsum |= E1000_RXCSUM_PCSD;
+ if (sc->rx_num_queues > 1) {
+ /* RSS hash needed in the Rx descriptor */
+ rxcsum |= E1000_RXCSUM_PCSD;
- if (hw->mac.type >= igb_mac_min)
- igb_initialize_rss_mapping(sc);
- else
- em_initialize_rss_mapping(sc);
+ if (hw->mac.type >= igb_mac_min)
+ igb_initialize_rss_mapping(sc);
+ else
+ em_initialize_rss_mapping(sc);
+ }
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
}
- E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
@@ -4011,7 +4023,15 @@ em_if_vlan_register(if_ctx_t ctx, u16 vtag)
bit = vtag & 0x1F;
sc->shadow_vfta[index] |= (1 << bit);
++sc->num_vlans;
- em_if_vlan_filter_write(sc);
+ if (!sc->vf_ifp)
+ em_if_vlan_filter_write(sc);
+ else
+ /*
+ * Physical funtion may reject registering VLAN
+ * but we have no way to inform the stack
+ * about that.
+ */
+ e1000_vfta_set_vf(&sc->hw, vtag, true);
}
static void
@@ -4024,7 +4044,10 @@ em_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
bit = vtag & 0x1F;
sc->shadow_vfta[index] &= ~(1 << bit);
--sc->num_vlans;
- em_if_vlan_filter_write(sc);
+ if (!sc->vf_ifp)
+ em_if_vlan_filter_write(sc);
+ else
+ e1000_vfta_set_vf(&sc->hw, vtag, false);
}
static bool
@@ -4082,22 +4105,15 @@ em_if_vlan_filter_write(struct e1000_softc *sc)
{
struct e1000_hw *hw = &sc->hw;
- if (sc->vf_ifp)
- return;
+ KASSERT(!sc->vf_ifp, ("VLAN filter write on VF\n"));
/* Disable interrupts for lem(4) devices during the filter change */
if (hw->mac.type < em_mac_min)
em_if_intr_disable(sc->ctx);
for (int i = 0; i < EM_VFTA_SIZE; i++)
- if (sc->shadow_vfta[i] != 0) {
- /* XXXKB: incomplete VF support, we returned above */
- if (sc->vf_ifp)
- e1000_vfta_set_vf(hw, sc->shadow_vfta[i],
- true);
- else
- e1000_write_vfta(hw, i, sc->shadow_vfta[i]);
- }
+ if (sc->shadow_vfta[i] != 0)
+ e1000_write_vfta(hw, i, sc->shadow_vfta[i]);
/* Re-enable interrupts for lem-class devices */
if (hw->mac.type < em_mac_min)
@@ -4112,8 +4128,10 @@ em_setup_vlan_hw_support(if_ctx_t ctx)
if_t ifp = iflib_get_ifp(ctx);
u32 reg;
- /* XXXKB: Return early if we are a VF until VF decap and filter
- * management is ready and tested.
+ /*
+ * Only PFs have control over VLAN HW filtering
+ * configuration. VFs have to act as if it's always
+ * enabled.
*/
if (sc->vf_ifp)
return;
@@ -4367,6 +4385,8 @@ em_get_wakeup(if_ctx_t ctx)
switch (sc->hw.mac.type) {
case e1000_82542:
case e1000_82543:
+ case e1000_vfadapt:
+ case e1000_vfadapt_i350:
break;
case e1000_82544:
e1000_read_nvm(&sc->hw,
@@ -4412,8 +4432,6 @@ em_get_wakeup(if_ctx_t ctx)
case e1000_i354:
case e1000_i210:
case e1000_i211:
- case e1000_vfadapt:
- case e1000_vfadapt_i350:
apme_mask = E1000_WUC_APME;
sc->has_amt = true;
eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC);
@@ -4469,7 +4487,6 @@ em_get_wakeup(if_ctx_t ctx)
global_quad_port_a = 0;
break;
}
- return;
}