aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ixl/if_ixlv.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ixl/if_ixlv.c')
-rw-r--r--sys/dev/ixl/if_ixlv.c608
1 files changed, 374 insertions, 234 deletions
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index 390ef51e5ce8..5c86b93e7ff2 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,13 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.4.12-k";
+#define IXLV_DRIVER_VERSION_MAJOR 1
+#define IXLV_DRIVER_VERSION_MINOR 5
+#define IXLV_DRIVER_VERSION_BUILD 4
+
+char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
+ __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
+ __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
/*********************************************************************
* PCI Device ID Table
@@ -54,7 +60,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -64,7 +70,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
*********************************************************************/
static char *ixlv_strings[] = {
- "Intel(R) Ethernet Connection XL710/X722 VF Driver"
+ "Intel(R) Ethernet Connection 700 Series VF Driver"
};
@@ -86,6 +92,7 @@ static void ixlv_config_rss(struct ixlv_sc *);
static void ixlv_stop(struct ixlv_sc *);
static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
+static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
@@ -129,6 +136,9 @@ static int ixlv_vf_config(struct ixlv_sc *);
static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
+static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
+static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
+
static void ixlv_add_sysctls(struct ixlv_sc *);
#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -167,12 +177,17 @@ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
/*
** Number of descriptors per ring:
-** - TX and RX are the same size
+** - TX and RX sizes are independently configurable
*/
-static int ixlv_ringsz = IXL_DEFAULT_RING;
-TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
- &ixlv_ringsz, 0, "Descriptor Ring Size");
+static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
+TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
+ &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
+
+static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
+TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
+ &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
/* Set to zero to auto calculate */
int ixlv_max_queues = 0;
@@ -192,6 +207,17 @@ SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
&ixlv_txbrsz, 0, "TX Buf Ring Size");
/*
+ * Different method for processing TX descriptor
+ * completion.
+ */
+static int ixlv_enable_head_writeback = 0;
+TUNABLE_INT("hw.ixlv.enable_head_writeback",
+ &ixlv_enable_head_writeback);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
+ &ixlv_enable_head_writeback, 0,
+ "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
+
+/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
** - default values for static ITR
@@ -300,6 +326,9 @@ ixlv_attach(device_t dev)
/* Allocate filter lists */
ixlv_init_filters(sc);
+ /* Save this tunable */
+ vsi->enable_head_writeback = ixlv_enable_head_writeback;
+
/* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
@@ -402,7 +431,9 @@ ixlv_attach(device_t dev)
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
- sc->link_up = TRUE;
+ vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
+
+ ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
@@ -412,27 +443,32 @@ ixlv_attach(device_t dev)
goto out;
}
- /* Setup the stack interface */
- if (ixlv_setup_interface(dev, sc) != 0) {
- device_printf(dev, "%s: setup interface failed!\n",
+ /* Do queue interrupt setup */
+ if (ixlv_assign_msix(sc) != 0) {
+ device_printf(dev, "%s: allocating queue interrupts failed!\n",
__func__);
- error = EIO;
+ error = ENXIO;
goto out;
}
- INIT_DBG_DEV(dev, "Queue memory and interface setup");
+ INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
- /* Do queue interrupt setup */
- if (ixlv_assign_msix(sc) != 0) {
- device_printf(dev, "%s: allocating queue interrupts failed!\n",
+ /* Setup the stack interface */
+ if (ixlv_setup_interface(dev, sc) != 0) {
+ device_printf(dev, "%s: setup interface failed!\n",
__func__);
- error = ENXIO;
+ error = EIO;
goto out;
}
+ INIT_DBG_DEV(dev, "Interface setup complete");
+
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
+ /* We expect a link state message, so schedule the AdminQ task now */
+ taskqueue_enqueue(sc->tq, &sc->aq_irq);
+
/* Initialize stats */
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
ixlv_add_sysctls(sc);
@@ -456,6 +492,7 @@ ixlv_attach(device_t dev)
out:
ixlv_free_queues(vsi);
+ ixlv_teardown_adminq_msix(sc);
err_res_buf:
free(sc->vf_res, M_DEVBUF);
err_aq:
@@ -495,6 +532,9 @@ ixlv_detach(device_t dev)
return (EBUSY);
}
+ /* Remove all the media and link information */
+ ifmedia_removeall(&sc->media);
+
/* Stop driver */
ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
@@ -525,8 +565,8 @@ ixlv_detach(device_t dev)
if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
- ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
+ ixlv_free_pci_resources(sc);
ixlv_free_filters(sc);
bus_generic_detach(dev);
@@ -1138,8 +1178,8 @@ retry_config:
retried + 1);
if (!sc->vf_res) {
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
if (!sc->vf_res) {
device_printf(dev,
@@ -1318,29 +1358,10 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
-
sc->hw.back = &sc->osdep;
- /*
- ** Explicitly set the guest PCI BUSMASTER capability
- ** and we must rewrite the ENABLE in the MSIX control
- ** register again at this point to cause the host to
- ** successfully initialize us.
- **
- ** This must be set before accessing any registers.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
+ ixl_set_busmaster(dev);
+ ixl_set_msix_enable(dev);
/* Disable adminq interrupts (just in case) */
ixlv_disable_adminq_irq(&sc->hw);
@@ -1348,33 +1369,37 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
return (0);
}
+/*
+ * Free MSI-X related resources for a single queue
+ */
static void
-ixlv_free_pci_resources(struct ixlv_sc *sc)
+ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
{
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
- /* We may get here before stations are setup */
- if (que == NULL)
- goto early;
-
/*
** Release all msix queue resources:
*/
- for (int i = 0; i < vsi->num_queues; i++, que++) {
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
+ }
+ if (que->res != NULL) {
int rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- que->res = NULL;
- }
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ que->res = NULL;
}
-
-early:
+ if (que->tq != NULL) {
+ taskqueue_free(que->tq);
+ que->tq = NULL;
+ }
+}
+
+static void
+ixlv_free_pci_resources(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+
pci_release_msi(dev);
if (sc->msix_mem != NULL)
@@ -1437,7 +1462,7 @@ ixlv_assign_msix(struct ixlv_sc *sc)
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixlv_msix_que, que, &que->tag);
if (error) {
- que->res = NULL;
+ que->tag = NULL;
device_printf(dev, "Failed to register que handler");
return (error);
}
@@ -1518,8 +1543,8 @@ ixlv_reset_complete(struct i40e_hw *hw)
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((reg == I40E_VFR_VFACTIVE) ||
- (reg == I40E_VFR_COMPLETED))
+ if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
+ (reg == VIRTCHNL_VFR_COMPLETED))
return (0);
i40e_msec_pause(100);
}
@@ -1552,7 +1577,11 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
+#if __FreeBSD_version >= 1100000
ifp->if_baudrate = IF_Gbps(40);
+#else
+ if_initbaudrate(ifp, IF_Gbps(40));
+#endif
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1565,7 +1594,7 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+ ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
ether_ifattach(ifp, sc->hw.mac.addr);
@@ -1573,6 +1602,10 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
+ ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
+ ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
+ ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
+
/*
* Tell the upper layer(s) we support long frames.
*/
@@ -1607,7 +1640,12 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
ixlv_media_status);
- // JFV Add media types later?
+ /* Media types based on reported link speed over AdminQ */
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
@@ -1617,6 +1655,116 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
}
/*
+** Allocate and setup a single queue
+*/
+static int
+ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
+{
+ device_t dev = sc->dev;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rsize, tsize;
+ int error = I40E_SUCCESS;
+
+ txr = &que->txr;
+ txr->que = que;
+ txr->tail = I40E_QTX_TAIL1(que->me);
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+ /*
+ * Create the TX descriptor ring
+ *
+ * In Head Writeback mode, the descriptor ring is one bigger
+ * than the number of descriptors for space for the HW to
+ * write back index of last completed descriptor.
+ */
+ if (sc->vsi.enable_head_writeback) {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ } else {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)), DBA_ALIGN);
+ }
+ if (i40e_allocate_dma_mem(&sc->hw,
+ &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_destroy_tx_mtx;
+ }
+ txr->base = (struct i40e_tx_desc *)txr->dma.va;
+ bzero((void *)txr->base, tsize);
+ /* Now allocate transmit soft structs for the ring */
+ if (ixl_allocate_tx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up TX structures\n");
+ error = ENOMEM;
+ goto err_free_tx_dma;
+ }
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
+ M_WAITOK, &txr->mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up TX buf ring\n");
+ error = ENOMEM;
+ goto err_free_tx_data;
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(que->num_rx_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ rxr = &que->rxr;
+ rxr->que = que;
+ rxr->tail = I40E_QRX_TAIL1(que->me);
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (i40e_allocate_dma_mem(&sc->hw,
+ &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
+ device_printf(dev,
+ "Unable to allocate RX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_destroy_rx_mtx;
+ }
+ rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+ bzero((void *)rxr->base, rsize);
+
+ /* Allocate receive soft structs for the ring */
+ if (ixl_allocate_rx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up receive structs\n");
+ error = ENOMEM;
+ goto err_free_rx_dma;
+ }
+
+ return (0);
+
+err_free_rx_dma:
+ i40e_free_dma_mem(&sc->hw, &rxr->dma);
+err_destroy_rx_mtx:
+ mtx_destroy(&rxr->mtx);
+ /* err_free_tx_buf_ring */
+ buf_ring_free(txr->br, M_DEVBUF);
+err_free_tx_data:
+ ixl_free_que_tx(que);
+err_free_tx_dma:
+ i40e_free_dma_mem(&sc->hw, &txr->dma);
+err_destroy_tx_mtx:
+ mtx_destroy(&txr->mtx);
+
+ return (error);
+}
+
+/*
** Allocate and setup the interface queues
*/
static int
@@ -1625,9 +1773,7 @@ ixlv_setup_queues(struct ixlv_sc *sc)
device_t dev = sc->dev;
struct ixl_vsi *vsi;
struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
+ int i;
int error = I40E_SUCCESS;
vsi = &sc->vsi;
@@ -1640,104 +1786,30 @@ ixlv_setup_queues(struct ixlv_sc *sc)
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
+ return ENOMEM;
}
- for (int i = 0; i < vsi->num_queues; i++) {
+ for (i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
- que->num_desc = ixlv_ringsz;
+ que->num_tx_desc = vsi->num_tx_desc;
+ que->num_rx_desc = vsi->num_rx_desc;
que->me = i;
que->vsi = vsi;
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL1(que->me);
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /*
- ** Create the TX descriptor ring, the extra int is
- ** added as the location for HEAD WB.
- */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&sc->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
- M_WAITOK, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL1(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&sc->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring */
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
+ if (ixlv_setup_queue(sc, que)) {
error = ENOMEM;
- goto fail;
+ goto err_free_queues;
}
}
return (0);
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- }
+err_free_queues:
+ while (i--)
+ ixlv_free_queue(sc, &vsi->queues[i]);
+
free(vsi->queues, M_DEVBUF);
-early:
return (error);
}
@@ -2255,6 +2327,34 @@ ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
/* Hardware is always full-duplex */
ifmr->ifm_active |= IFM_FDX;
+
+ /* Based on the link speed reported by the PF over the AdminQ, choose a
+ * PHY type to report. This isn't 100% correct since we don't really
+ * know the underlying PHY type of the PF, but at least we can report
+ * a valid link speed...
+ */
+ switch (sc->link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ case VIRTCHNL_LINK_SPEED_25GB:
+ ifmr->ifm_active |= IFM_25G_SR;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+ default:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
+ }
+
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end");
return;
@@ -2279,8 +2379,10 @@ ixlv_media_change(struct ifnet * ifp)
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
+ if_printf(ifp, "Changing speed is not supported\n");
+
INIT_DBG_IF(ifp, "end");
- return (0);
+ return (ENODEV);
}
@@ -2342,7 +2444,7 @@ ixlv_add_multi(struct ixl_vsi *vsi)
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
- sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+ sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
sc);
@@ -2435,15 +2537,10 @@ ixlv_del_multi(struct ixl_vsi *vsi)
static void
ixlv_local_timer(void *arg)
{
- struct ixlv_sc *sc = arg;
+ struct ixlv_sc *sc = arg;
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = sc->dev;
- struct tx_ring *txr;
- int hung = 0;
- u32 mask, val;
- s32 timer, new_timer;
+ u32 val;
IXLV_CORE_LOCK_ASSERT(sc);
@@ -2455,9 +2552,9 @@ ixlv_local_timer(void *arg)
val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (val != I40E_VFR_VFACTIVE
- && val != I40E_VFR_COMPLETED) {
- DDPRINTF(dev, "reset in progress! (%d)", val);
+ if (val != VIRTCHNL_VFR_VFACTIVE
+ && val != VIRTCHNL_VFR_COMPLETED) {
+ DDPRINTF(sc->dev, "reset in progress! (%d)", val);
return;
}
@@ -2466,48 +2563,11 @@ ixlv_local_timer(void *arg)
/* clean and process any events */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
- /*
- ** Check status on the queues for a hang
- */
- mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- txr = &que->txr;
- timer = atomic_load_acq_32(&txr->watchdog_timer);
- if (timer > 0) {
- new_timer = timer - hz;
- if (new_timer <= 0) {
- atomic_store_rel_32(&txr->watchdog_timer, -1);
- device_printf(dev, "WARNING: queue %d "
- "appears to be hung!\n", que->me);
- ++hung;
- } else {
- /*
- * If this fails, that means something in the TX path has updated
- * the watchdog, so it means the TX path is still working and
- * the watchdog doesn't need to countdown.
- */
- atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
- /* Any queues with outstanding work get a sw irq */
- wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
- }
- }
- }
- /* Reset when a queue shows hung */
- if (hung)
- goto hung;
+ /* Increment stat when a queue shows hung */
+ if (ixl_queue_hang_check(vsi))
+ sc->watchdog_events++;
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
- return;
-
-hung:
- device_printf(dev, "WARNING: Resetting!\n");
- sc->init_state = IXLV_RESET_REQUIRED;
- sc->watchdog_events++;
- ixlv_stop(sc);
- ixlv_init_locked(sc);
}
/*
@@ -2524,8 +2584,8 @@ ixlv_update_link_status(struct ixlv_sc *sc)
if (sc->link_up){
if (vsi->link_active == FALSE) {
if (bootverbose)
- if_printf(ifp,"Link is Up, %d Gbps\n",
- (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
+ if_printf(ifp,"Link is Up, %s\n",
+ ixlv_vc_speed_to_string(sc->link_speed));
vsi->link_active = TRUE;
if_link_state_change(ifp, LINK_STATE_UP);
}
@@ -2573,6 +2633,33 @@ ixlv_stop(struct ixlv_sc *sc)
INIT_DBG_IF(ifp, "end");
}
+/* Free a single queue struct */
+static void
+ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+ return;
+ IXL_TX_LOCK(txr);
+ if (txr->br)
+ buf_ring_free(txr->br, M_DEVBUF);
+ ixl_free_que_tx(que);
+ if (txr->base)
+ i40e_free_dma_mem(&sc->hw, &txr->dma);
+ IXL_TX_UNLOCK(txr);
+ IXL_TX_LOCK_DESTROY(txr);
+
+ if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+ return;
+ IXL_RX_LOCK(rxr);
+ ixl_free_que_rx(que);
+ if (rxr->base)
+ i40e_free_dma_mem(&sc->hw, &rxr->dma);
+ IXL_RX_UNLOCK(rxr);
+ IXL_RX_LOCK_DESTROY(rxr);
+}
/*********************************************************************
*
@@ -2586,28 +2673,12 @@ ixlv_free_queues(struct ixl_vsi *vsi)
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
+ /* First, free the MSI-X resources */
+ ixlv_free_msix_resources(sc, que);
+ /* Then free other queue data */
+ ixlv_free_queue(sc, que);
}
+
free(vsi->queues, M_DEVBUF);
}
@@ -2716,10 +2787,10 @@ ixlv_config_rss_pf(struct ixlv_sc *sc)
static void
ixlv_config_rss(struct ixlv_sc *sc)
{
- if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
+ if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
ixlv_config_rss_reg(sc);
- } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
ixlv_config_rss_pf(sc);
} else
@@ -2823,7 +2894,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
- struct i40e_virtchnl_msg *v_msg;
+ struct virtchnl_msg *v_msg;
device_t dev = sc->dev;
u16 result = 0;
u32 reg, oldreg;
@@ -2834,7 +2905,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = sc->aq_buffer;
- v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40e_clean_arq_element(hw, &event, &result);
@@ -2917,13 +2988,25 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
struct rx_ring *rxr;
/* Driver statistics sysctls */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &sc->watchdog_events,
"Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &sc->admin_irq,
"Admin Queue IRQ Handled");
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
+ CTLFLAG_RD, &vsi->num_tx_desc, 0,
+ "TX ring size");
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
+ CTLFLAG_RD, &vsi->num_rx_desc, 0,
+ "RX ring size");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ixlv_sysctl_current_speed,
+ "A", "Current Port Speed");
+
/* VSI statistics sysctls */
vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
CTLFLAG_RD, NULL, "VSI-specific statistics");
@@ -3048,14 +3131,71 @@ ixlv_free_filters(struct ixlv_sc *sc)
SLIST_REMOVE_HEAD(sc->mac_filters, next);
free(f, M_DEVBUF);
}
+ free(sc->mac_filters, M_DEVBUF);
while (!SLIST_EMPTY(sc->vlan_filters)) {
v = SLIST_FIRST(sc->vlan_filters);
SLIST_REMOVE_HEAD(sc->vlan_filters, next);
free(v, M_DEVBUF);
}
+ free(sc->vlan_filters, M_DEVBUF);
return;
}
+static char *
+ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
+{
+ int index;
+
+ char *speeds[] = {
+ "Unknown",
+ "100 Mbps",
+ "1 Gbps",
+ "10 Gbps",
+ "40 Gbps",
+ "20 Gbps",
+ "25 Gbps",
+ };
+
+ switch (link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ index = 1;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ index = 2;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ index = 3;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ index = 4;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ index = 5;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ index = 6;
+ break;
+ case VIRTCHNL_LINK_SPEED_UNKNOWN:
+ default:
+ index = 0;
+ break;
+ }
+
+ return speeds[index];
+}
+
+static int
+ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
+ int error = 0;
+
+ error = sysctl_handle_string(oidp,
+ ixlv_vc_speed_to_string(sc->link_speed),
+ 8, req);
+ return (error);
+}
+
#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler