aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ixl
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ixl')
-rwxr-xr-xsys/dev/ixl/i40e_osdep.c1
-rwxr-xr-xsys/dev/ixl/i40e_osdep.h3
-rwxr-xr-xsys/dev/ixl/if_ixl.c348
-rw-r--r--sys/dev/ixl/if_ixlv.c838
-rw-r--r--sys/dev/ixl/ixl.h13
-rwxr-xr-xsys/dev/ixl/ixl_txrx.c11
-rw-r--r--sys/dev/ixl/ixlv.h50
-rw-r--r--sys/dev/ixl/ixlvc.c398
8 files changed, 1069 insertions, 593 deletions
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index 214dbfc925dc..b29db6492822 100755
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -107,6 +107,7 @@ i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
"error %u\n", err);
goto fail_2;
}
+ mem->nseg = 1;
mem->size = size;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index 895bf83cd686..83e89229c400 100755
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -147,8 +147,7 @@ void prefetch(void *x)
#define prefetch(x)
#endif
-struct i40e_osdep
-{
+struct i40e_osdep {
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index a1c14781c7e6..152425dfac95 100755
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -40,7 +40,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.2.2";
+char ixl_driver_version[] = "1.2.8";
/*********************************************************************
* PCI Device ID Table
@@ -109,6 +109,7 @@ static bool ixl_config_link(struct i40e_hw *);
static void ixl_config_rss(struct ixl_vsi *);
static void ixl_set_queue_rx_itr(struct ixl_queue *);
static void ixl_set_queue_tx_itr(struct ixl_queue *);
+static int ixl_set_advertised_speeds(struct ixl_pf *, int);
static void ixl_enable_rings(struct ixl_vsi *);
static void ixl_disable_rings(struct ixl_vsi *);
@@ -155,6 +156,7 @@ static void ixl_do_adminq(void *, int);
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
/* Statistics */
static void ixl_add_hw_stats(struct ixl_pf *);
@@ -176,7 +178,8 @@ static void ixl_stat_update32(struct i40e_hw *, u32, bool,
static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
#endif
@@ -276,6 +279,7 @@ int ixl_atr_rate = 20;
TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
#endif
+
static char *ixl_fc_string[6] = {
"None",
"Rx",
@@ -398,6 +402,11 @@ ixl_attach(device_t dev)
OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_current_speed, "A", "Current Port Speed");
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
+
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rx_itr", CTLFLAG_RW,
@@ -436,8 +445,13 @@ ixl_attach(device_t dev)
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
+ OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@@ -445,7 +459,7 @@ ixl_attach(device_t dev)
pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
#endif
- /* Save off the information about this board */
+ /* Save off the PCI information */
hw->vendor_id = pci_get_vendor(dev);
hw->device_id = pci_get_device(dev);
hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
@@ -593,6 +607,7 @@ ixl_attach(device_t dev)
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+ /* Set up VSI and queues */
if (ixl_setup_stations(pf) != 0) {
device_printf(dev, "setup stations failed!\n");
error = ENOMEM;
@@ -630,8 +645,11 @@ ixl_attach(device_t dev)
"an unqualified module was detected\n");
/* Setup OS specific network interface */
- if (ixl_setup_interface(dev, vsi) != 0)
+ if (ixl_setup_interface(dev, vsi) != 0) {
+ device_printf(dev, "interface setup failed!\n");
+ error = EIO;
goto err_late;
+ }
/* Get the bus configuration and set the shared code */
bus = ixl_get_bus_info(hw, dev);
@@ -642,25 +660,32 @@ ixl_attach(device_t dev)
ixl_update_stats_counters(pf);
ixl_add_hw_stats(pf);
+ /* Reset port's advertised speeds */
+ if (!i40e_is_40G_device(hw->device_id)) {
+ pf->advertised_speed = 0x7;
+ ixl_set_advertised_speeds(pf, 0x7);
+ }
+
/* Register for VLAN events */
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+
INIT_DEBUGOUT("ixl_attach: end");
return (0);
err_late:
- ixl_free_vsi(vsi);
+ if (vsi->ifp != NULL)
+ if_free(vsi->ifp);
err_mac_hmc:
i40e_shutdown_lan_hmc(hw);
err_get_cap:
i40e_shutdown_adminq(hw);
err_out:
- if (vsi->ifp != NULL)
- if_free(vsi->ifp);
ixl_free_pci_resources(pf);
+ ixl_free_vsi(vsi);
IXL_PF_LOCK_DESTROY(pf);
return (error);
}
@@ -725,6 +750,7 @@ ixl_detach(device_t dev)
ether_ifdetach(vsi->ifp);
callout_drain(&pf->timer);
+
ixl_free_pci_resources(pf);
bus_generic_detach(dev);
if_free(vsi->ifp);
@@ -2246,6 +2272,34 @@ early:
return;
}
+static void
+ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
+{
+ /* Display supported media types */
+ if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+}
/*********************************************************************
*
@@ -2276,7 +2330,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixl_ioctl;
-#if __FreeBSD_version >= 1100000
+#if __FreeBSD_version >= 1100036
if_setgetcounterfn(ifp, ixl_get_counter);
#endif
@@ -2286,8 +2340,6 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_snd.ifq_maxlen = que->num_desc - 2;
- ether_ifattach(ifp, hw->mac.addr);
-
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
@@ -2328,40 +2380,26 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ixl_media_status);
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
- if (aq_error) {
- printf("Error getting supported media types, AQ error %d\n", aq_error);
- return (EPERM);
- }
-
- /* Display supported media types */
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
-
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
-
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
- abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
-
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
- abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+ if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+ /* Need delay to detect fiber correctly */
+ i40e_msec_delay(200);
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
+ if (aq_error == I40E_ERR_UNKNOWN_PHY)
+ device_printf(dev, "Unknown PHY type detected!\n");
+ else
+ ixl_add_ifmedia(vsi, abilities_resp.phy_type);
+ } else if (aq_error) {
+ device_printf(dev, "Error getting supported media types, err %d,"
+ " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+ } else
+ ixl_add_ifmedia(vsi, abilities_resp.phy_type);
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
+ ether_ifattach(ifp, hw->mac.addr);
+
return (0);
}
@@ -3728,10 +3766,6 @@ ixl_update_stats_counters(struct ixl_pf *pf)
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
- ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
@@ -3915,8 +3949,8 @@ ixl_do_adminq(void *context, int pending)
u32 reg, loop = 0;
u16 opcode, result;
- event.msg_len = IXL_AQ_BUF_SZ;
- event.msg_buf = malloc(event.msg_len,
+ event.buf_len = IXL_AQ_BUF_SZ;
+ event.msg_buf = malloc(event.buf_len,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!event.msg_buf) {
printf("Unable to allocate adminq memory\n");
@@ -4300,6 +4334,52 @@ ixl_current_speed(SYSCTL_HANDLER_ARGS)
return (error);
}
+static int
+ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code aq_error = 0;
+
+ /* Get current capability information */
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
+ if (aq_error) {
+ device_printf(dev, "%s: Error getting phy capabilities %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EAGAIN);
+ }
+
+ /* Prepare new config */
+ bzero(&config, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities
+ | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ /* Translate into aq cmd link_speed */
+ if (speeds & 0x4)
+ config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (speeds & 0x2)
+ config.link_speed |= I40E_LINK_SPEED_1GB;
+ if (speeds & 0x1)
+ config.link_speed |= I40E_LINK_SPEED_100MB;
+
+ /* Do aq command & restart link */
+ aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (aq_error) {
+ device_printf(dev, "%s: Error setting new phy config %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EAGAIN);
+ }
+
+ return (0);
+}
+
/*
** Control link advertise speed:
** Flags:
@@ -4315,10 +4395,7 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- struct i40e_aq_get_phy_abilities_resp abilities;
- struct i40e_aq_set_phy_config config;
int requested_ls = 0;
- enum i40e_status_code aq_error = 0;
int error = 0;
/*
@@ -4343,39 +4420,9 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
if (pf->advertised_speed == requested_ls)
return (0);
- /* Get current capability information */
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
- if (aq_error) {
- device_printf(dev, "%s: Error getting phy capabilities %d,"
- " aq error: %d\n", __func__, aq_error,
- hw->aq.asq_last_status);
- return (EAGAIN);
- }
-
- /* Prepare new config */
- bzero(&config, sizeof(config));
- config.phy_type = abilities.phy_type;
- config.abilities = abilities.abilities
- | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- /* Translate into aq cmd link_speed */
- if (requested_ls & 0x4)
- config.link_speed |= I40E_LINK_SPEED_10GB;
- if (requested_ls & 0x2)
- config.link_speed |= I40E_LINK_SPEED_1GB;
- if (requested_ls & 0x1)
- config.link_speed |= I40E_LINK_SPEED_100MB;
-
- /* Do aq command & restart link */
- aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
- if (aq_error) {
- device_printf(dev, "%s: Error setting new phy config %d,"
- " aq error: %d\n", __func__, aq_error,
- hw->aq.asq_last_status);
- return (EAGAIN);
- }
+ error = ixl_set_advertised_speeds(pf, requested_ls);
+ if (error)
+ return (error);
pf->advertised_speed = requested_ls;
ixl_update_link_status(pf);
@@ -4454,6 +4501,26 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
return (link);
}
+static int
+ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
+ IXL_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
+ IXL_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack);
+ return (sysctl_handle_string(oidp, buf, strlen(buf), req));
+}
+
+
#ifdef IXL_DEBUG
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
@@ -4563,7 +4630,7 @@ ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
#define IXL_SW_RES_SIZE 0x14
static int
-ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
@@ -4620,7 +4687,120 @@ ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
device_printf(dev, "sysctl error: %d\n", error);
sbuf_delete(buf);
return error;
+}
+
+/*
+** Caller must init and delete sbuf; this function will clear and
+** finish it for caller.
+*/
+static char *
+ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
+{
+ sbuf_clear(s);
+
+ if (seid == 0 && uplink)
+ sbuf_cat(s, "Network");
+ else if (seid == 0)
+ sbuf_cat(s, "Host");
+ else if (seid == 1)
+ sbuf_cat(s, "EMP");
+ else if (seid <= 5)
+ sbuf_printf(s, "MAC %d", seid - 2);
+ else if (seid <= 15)
+ sbuf_cat(s, "Reserved");
+ else if (seid <= 31)
+ sbuf_printf(s, "PF %d", seid - 16);
+ else if (seid <= 159)
+ sbuf_printf(s, "VF %d", seid - 32);
+ else if (seid <= 287)
+ sbuf_cat(s, "Reserved");
+ else if (seid <= 511)
+ sbuf_cat(s, "Other"); // for other structures
+ else if (seid <= 895)
+ sbuf_printf(s, "VSI %d", seid - 512);
+ else if (seid <= 1023)
+ sbuf_printf(s, "Reserved");
+ else
+ sbuf_cat(s, "Invalid");
+
+ sbuf_finish(s);
+ return sbuf_data(s);
+}
+static int
+ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ struct sbuf *nmbuf;
+ int error = 0;
+ u8 aq_buf[I40E_AQ_LARGE_BUF];
+
+ u16 next = 0;
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ error = i40e_aq_get_switch_config(hw, sw_config,
+ sizeof(aq_buf), &next, NULL);
+ if (error) {
+ device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
+ __func__, error, hw->aq.asq_last_status);
+ sbuf_delete(buf);
+ return error;
+ }
+
+ nmbuf = sbuf_new_auto();
+ if (!nmbuf) {
+ device_printf(dev, "Could not allocate sbuf for name output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ // Assuming <= 255 elements in switch
+ sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
+ /* Exclude:
+ ** Revision -- all elements are revision 1 for now
+ */
+ sbuf_printf(buf,
+ "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
+ " | | | (uplink)\n");
+ for (int i = 0; i < sw_config->header.num_reported; i++) {
+ // "%4d (%8s) | %8s %8s %#8x",
+ sbuf_printf(buf, "%4d", sw_config->element[i].seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
+ sbuf_cat(buf, " | ");
+ sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
+ if (i < sw_config->header.num_reported - 1)
+ sbuf_cat(buf, "\n");
+ }
+ sbuf_delete(nmbuf);
+
+ error = sbuf_finish(buf);
+ if (error) {
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+ }
+
+ error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+ if (error)
+ device_printf(dev, "sysctl error: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
}
/*
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index bd3c202dd814..be3aaad62e90 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -40,7 +40,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.1.4";
+char ixlv_driver_version[] = "1.1.18";
/*********************************************************************
* PCI Device ID Table
@@ -87,7 +87,6 @@ static void ixlv_config_rss(struct ixlv_sc *);
static void ixlv_stop(struct ixlv_sc *);
static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
-static void ixlv_update_link_status(struct ixlv_sc *);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
@@ -97,18 +96,21 @@ static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
static void ixlv_local_timer(void *);
static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
+static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
static void ixlv_init_filters(struct ixlv_sc *);
static void ixlv_free_filters(struct ixlv_sc *);
static void ixlv_msix_que(void *);
static void ixlv_msix_adminq(void *);
static void ixlv_do_adminq(void *, int);
-static void ixlv_sched_aq(void *);
+static void ixlv_do_adminq_locked(struct ixlv_sc *sc);
static void ixlv_handle_que(void *, int);
static int ixlv_reset(struct ixlv_sc *);
static int ixlv_reset_complete(struct i40e_hw *);
static void ixlv_set_queue_rx_itr(struct ixl_queue *);
static void ixlv_set_queue_tx_itr(struct ixl_queue *);
+static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
+ enum i40e_status_code);
static void ixlv_enable_adminq_irq(struct i40e_hw *);
static void ixlv_disable_adminq_irq(struct i40e_hw *);
@@ -119,10 +121,16 @@ static void ixlv_setup_vlan_filters(struct ixlv_sc *);
static void ixlv_register_vlan(void *, struct ifnet *, u16);
static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
+static void ixlv_init_hw(struct ixlv_sc *);
+static int ixlv_setup_vc(struct ixlv_sc *);
+static int ixlv_vf_config(struct ixlv_sc *);
+
static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
-static void ixlv_add_stats_sysctls(struct ixlv_sc *);
+static void ixlv_add_sysctls(struct ixlv_sc *);
+static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
+static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -271,7 +279,7 @@ ixlv_attach(device_t dev)
struct ixlv_sc *sc;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
- int bufsz, error = 0, retries = 0;
+ int error = 0;
INIT_DBG_DEV(dev, "begin");
@@ -282,30 +290,18 @@ ixlv_attach(device_t dev)
vsi = &sc->vsi;
vsi->dev = dev;
+ /* Initialize hw struct */
+ ixlv_init_hw(sc);
+
/* Allocate filter lists */
ixlv_init_filters(sc);
/* Core Lock Init*/
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
- mtx_init(&sc->aq_task_mtx, device_get_nameunit(dev),
- "IXL AQ Task Lock", MTX_DEF);
- /* Set up the timer & aq watchdog callouts */
+ /* Set up the timer callout */
callout_init_mtx(&sc->timer, &sc->mtx, 0);
- callout_init_mtx(&sc->aq_task, &sc->aq_task_mtx, 0);
-
- /* Save off the information about this board */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
- hw->subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- hw->subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- hw->bus.device = pci_get_slot(dev);
- hw->bus.func = pci_get_function(dev);
/* Do PCI setup - map BAR0, etc */
if (ixlv_allocate_pci_resources(sc)) {
@@ -333,50 +329,16 @@ ixlv_attach(device_t dev)
INIT_DBG_DEV(dev, "VF Device is ready for configuration");
- hw->aq.num_arq_entries = IXL_AQ_LEN;
- hw->aq.num_asq_entries = IXL_AQ_LEN;
- hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
- hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
-
- error = i40e_init_adminq(hw);
+ error = ixlv_setup_vc(sc);
if (error) {
- device_printf(dev, "%s: init_adminq failed: %d\n",
+ device_printf(dev, "%s: Error setting up PF comms, %d\n",
__func__, error);
goto err_pci_res;
}
- INIT_DBG_DEV(dev, "Initialized Admin Queue");
-
- error = ixlv_send_api_ver(sc);
- if (error) {
- device_printf(dev, "%s: unable to send to PF (%d)\n",
- __func__, error);
- goto err_aq;
- }
-
- while (!i40e_asq_done(hw)) {
- if (++retries > IXLV_AQ_MAX_ERR) {
- device_printf(dev, "%s: Admin Queue timeout "
- "(waiting for send_api_ver)\n", __func__);
- error = ENXIO;
- goto err_aq;
- }
- i40e_msec_delay(10);
- }
-
- INIT_DBG_DEV(dev, "Sent API version message to PF");
-
- /* Wait for API version msg to arrive */
- error = ixlv_verify_api_ver(sc);
- if (error) {
- device_printf(dev,
- "%s: Unable to verify API version, error %d\n",
- __func__, error);
- goto err_aq;
- }
-
INIT_DBG_DEV(dev, "PF API version verified");
+ /* TODO: Figure out why MDD events occur when this reset is removed. */
/* Need API version before sending reset message */
error = ixlv_reset(sc);
if (error) {
@@ -387,49 +349,14 @@ ixlv_attach(device_t dev)
INIT_DBG_DEV(dev, "VF reset complete");
/* Ask for VF config from PF */
- error = ixlv_send_vf_config_msg(sc);
+ error = ixlv_vf_config(sc);
if (error) {
- device_printf(dev,
- "%s: Unable to send VF config request, error %d\n",
- __func__, error);
- goto err_aq;
- }
-
- retries = 0;
- while (!i40e_asq_done(hw)) {
- if (++retries > IXLV_AQ_MAX_ERR) {
- device_printf(dev, "%s: Admin Queue timeout "
- "(waiting for send_vf_config_msg)\n", __func__);
- error = ENXIO;
- goto err_aq;
- }
- i40e_msec_delay(10);
- }
-
- INIT_DBG_DEV(dev, "Sent VF config message to PF");
-
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
- sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
- if (!sc->vf_res) {
- device_printf(dev,
- "%s: Unable to allocate memory for VF configuration"
- " message from PF\n", __func__);
- error = ENOMEM;
+ device_printf(dev, "Error getting configuration from PF: %d\n",
+ error);
goto err_aq;
}
- /* Check for VF config response */
- error = ixlv_get_vf_config(sc);
- if (error) {
- device_printf(dev,
- "%s: Unable to get VF configuration from PF\n",
- __func__);
- error = EBUSY;
- goto err_res_buf;
- }
-
- INIT_DBG_DEV(dev, "Received valid VF config from PF");
+ INIT_DBG_DEV(dev, "VF config from PF:");
INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
sc->vf_res->num_vsis,
sc->vf_res->num_queue_pairs,
@@ -438,6 +365,7 @@ ixlv_attach(device_t dev)
INIT_DBG_DEV(dev, "Offload flags: %#010x",
sc->vf_res->vf_offload_flags);
+ // TODO: Move this into ixlv_vf_config?
/* got VF config message back from PF, now we can parse it */
for (int i = 0; i < sc->vf_res->num_vsis; i++) {
if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
@@ -445,6 +373,7 @@ ixlv_attach(device_t dev)
}
if (!sc->vsi_res) {
device_printf(dev, "%s: no LAN VSI found\n", __func__);
+ error = EIO;
goto err_res_buf;
}
@@ -461,14 +390,13 @@ ixlv_attach(device_t dev)
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
-
- /* Link in this virtual environment is always 'up' */
vsi->link_up = TRUE;
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
device_printf(dev, "%s: setup queues failed!\n",
__func__);
+ error = EIO;
goto out;
}
@@ -476,6 +404,7 @@ ixlv_attach(device_t dev)
if (ixlv_setup_interface(dev, sc) != 0) {
device_printf(dev, "%s: setup interface failed!\n",
__func__);
+ error = EIO;
goto out;
}
@@ -487,12 +416,9 @@ ixlv_attach(device_t dev)
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
- /* Start the admin queue scheduler timer */
- callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc);
-
/* Initialize stats */
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
- ixlv_add_stats_sysctls(sc);
+ ixlv_add_sysctls(sc);
/* Register for VLAN events */
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
@@ -504,10 +430,10 @@ ixlv_attach(device_t dev)
ixlv_enable_adminq_irq(hw);
/* Set things up to run init */
- sc->aq_pending = 0;
- sc->aq_required = 0;
sc->init_state = IXLV_INIT_READY;
+ ixl_vc_init_mgr(sc, &sc->vc_mgr);
+
INIT_DBG_DEV(dev, "end");
return (error);
@@ -521,7 +447,6 @@ err_pci_res:
ixlv_free_pci_resources(sc);
err_early:
mtx_destroy(&sc->mtx);
- mtx_destroy(&sc->aq_task_mtx);
ixlv_free_filters(sc);
INIT_DBG_DEV(dev, "end: error %d", error);
return (error);
@@ -542,7 +467,6 @@ ixlv_detach(device_t dev)
{
struct ixlv_sc *sc = device_get_softc(dev);
struct ixl_vsi *vsi = &sc->vsi;
- int retries = 0;
INIT_DBG_DEV(dev, "begin");
@@ -554,23 +478,11 @@ ixlv_detach(device_t dev)
}
/* Stop driver */
+ ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
mtx_lock(&sc->mtx);
ixlv_stop(sc);
mtx_unlock(&sc->mtx);
-
- /*
- ** Ensure queues are disabled before examining
- ** admin queue state later in detach.
- */
- while (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING
- && ++retries < IXLV_AQ_MAX_ERR) {
- i40e_msec_delay(10);
- }
-#ifdef IXL_DEBUG
- if (retries >= IXLV_AQ_MAX_ERR)
- device_printf(dev, "Issue disabling queues for detach\n");
-#endif
}
/* Unregister VLAN events */
@@ -579,37 +491,16 @@ ixlv_detach(device_t dev)
if (vsi->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
- /* Stop AQ callout */
- callout_drain(&sc->aq_task);
- callout_stop(&sc->aq_task);
-
-#ifdef IXL_DEBUG
- /* Report on possible AQ failures */
- if (sc->aq_required || sc->aq_pending) {
- device_printf(dev, "AQ status on detach:\n");
- device_printf(dev, "required : 0x%4b\n", sc->aq_required,
- IXLV_FLAGS);
- device_printf(dev, "pending : 0x%4b\n", sc->aq_pending,
- IXLV_FLAGS);
- device_printf(dev, "current_op: %d\n", sc->current_op);
- }
-#endif
+ /* Drain VC mgr */
+ callout_drain(&sc->vc_mgr.callout);
i40e_shutdown_adminq(&sc->hw);
- while (taskqueue_cancel(sc->tq, &sc->aq_irq, NULL) != 0)
- taskqueue_drain(sc->tq, &sc->aq_irq);
taskqueue_free(sc->tq);
-
- /* force the state down */
- vsi->ifp->if_flags &= ~IFF_UP;
- ether_ifdetach(vsi->ifp);
if_free(vsi->ifp);
-
free(sc->vf_res, M_DEVBUF);
ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
mtx_destroy(&sc->mtx);
- mtx_destroy(&sc->aq_task_mtx);
ixlv_free_filters(sc);
bus_generic_detach(dev);
@@ -754,7 +645,7 @@ ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
if (avoid_reset) {
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixlv_init(sc);
+ ixlv_init(vsi);
#ifdef INET
if (!(ifp->if_flags & IFF_NOARP))
arp_ifinit(ifp, ifa);
@@ -773,11 +664,10 @@ ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
} else {
IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
// ERJ: Interestingly enough, these types don't match
- ifp->if_mtu = ifr->ifr_mtu;
+ ifp->if_mtu = (u_long)ifr->ifr_mtu;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
-
ixlv_init_locked(sc);
}
mtx_unlock(&sc->mtx);
@@ -839,7 +729,7 @@ ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ixlv_init(sc);
+ ixlv_init(vsi);
}
VLAN_CAPABILITIES(ifp);
@@ -869,6 +759,7 @@ ixlv_reinit_locked(struct ixlv_sc *sc)
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ifnet *ifp = vsi->ifp;
+ struct ixlv_mac_filter *mf, *mf_temp;
struct ixlv_vlan_filter *vf;
int error = 0;
@@ -877,13 +768,25 @@ ixlv_reinit_locked(struct ixlv_sc *sc)
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixlv_stop(sc);
- if ((sc->init_state == IXLV_RESET_REQUIRED) ||
- (sc->init_state == IXLV_RESET_PENDING))
- error = ixlv_reset(sc);
+ error = ixlv_reset(sc);
+
+ INIT_DBG_IF(ifp, "VF was reset");
/* set the state in case we went thru RESET */
sc->init_state = IXLV_RUNNING;
+ /*
+ ** Resetting the VF drops all filters from hardware;
+ ** we need to mark them to be re-added in init.
+ */
+ SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
+ if (mf->flags & IXL_FILTER_DEL) {
+ SLIST_REMOVE(sc->mac_filters, mf,
+ ixlv_mac_filter, next);
+ free(mf, M_DEVBUF);
+ } else
+ mf->flags |= IXL_FILTER_ADD;
+ }
if (vsi->num_vlans != 0)
SLIST_FOREACH(vf, sc->vlan_filters, next)
vf->flags = IXL_FILTER_ADD;
@@ -896,13 +799,31 @@ ixlv_reinit_locked(struct ixlv_sc *sc)
}
ixlv_enable_adminq_irq(hw);
- sc->aq_pending = 0;
- sc->aq_required = 0;
+ ixl_vc_flush(&sc->vc_mgr);
INIT_DBG_IF(ifp, "end");
return (error);
}
+static void
+ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
+ enum i40e_status_code code)
+{
+ struct ixlv_sc *sc;
+
+ sc = arg;
+
+ /*
+ * Ignore "Adapter Stopped" message as that happens if an ifconfig down
+ * happens while a command is in progress, so we don't print an error
+ * in that case.
+ */
+ if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
+ if_printf(sc->vsi.ifp,
+ "Error %d waiting for PF to complete operation %d\n",
+ code, cmd->request);
+ }
+}
static void
ixlv_init_locked(struct ixlv_sc *sc)
@@ -915,12 +836,7 @@ ixlv_init_locked(struct ixlv_sc *sc)
INIT_DBG_IF(ifp, "begin");
- /* Verify we have the core lock */
- if (!mtx_owned(&sc->mtx)) {
- if_printf(ifp, "%s: sc mutex not owned; acquire"
- "before calling this function!\n", __func__);
- goto init_done;
- }
+ IXLV_CORE_LOCK_ASSERT(sc);
/* Do a reinit first if an init has already been done */
if ((sc->init_state == IXLV_RUNNING) ||
@@ -931,6 +847,15 @@ ixlv_init_locked(struct ixlv_sc *sc)
if (error)
goto init_done;
+ /* Remove existing MAC filter if new MAC addr is set */
+ if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
+ error = ixlv_del_mac_filter(sc, hw->mac.addr);
+ if (error == 0)
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
+ IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
+ sc);
+ }
+
/* Check for an LAA mac address... */
bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
@@ -943,27 +868,23 @@ ixlv_init_locked(struct ixlv_sc *sc)
ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
/* Add mac filter for this VF to PF */
- error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
-
- // send message, then enqueue another task
- if (!error || error == EEXIST) {
- sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
- callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
- ixlv_sched_aq, sc);
+ if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
+ error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
+ if (!error || error == EEXIST)
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
+ IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
+ sc);
}
/* Setup vlan's if needed */
ixlv_setup_vlan_filters(sc);
- /*
- ** Prepare the queues for operation
- */
+ /* Prepare the queues for operation */
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
ixl_init_tx_ring(que);
- /* Need to set mbuf size now */
if (vsi->max_frame_size <= 2048)
rxr->mbuf_sz = MCLBYTES;
else
@@ -972,22 +893,19 @@ ixlv_init_locked(struct ixlv_sc *sc)
}
/* Configure queues */
- sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
- callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
- ixlv_sched_aq, sc);
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
+ IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
/* Set up RSS */
ixlv_config_rss(sc);
/* Map vectors */
- sc->aq_required |= IXLV_FLAG_AQ_MAP_VECTORS;
- callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
- ixlv_sched_aq, sc);
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
+ IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
/* Enable queues */
- sc->aq_required |= IXLV_FLAG_AQ_ENABLE_QUEUES;
- callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
- ixlv_sched_aq, sc);
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
+ IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
/* Start the local timer */
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
@@ -1005,12 +923,209 @@ init_done:
void
ixlv_init(void *arg)
{
- struct ixlv_sc *sc = arg;
+ struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
+ struct ixlv_sc *sc = vsi->back;
+ int retries = 0;
mtx_lock(&sc->mtx);
ixlv_init_locked(sc);
mtx_unlock(&sc->mtx);
- return;
+
+ /* Wait for init_locked to finish */
+ while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ && ++retries < 100) {
+ i40e_msec_delay(10);
+ }
+ if (retries >= IXLV_AQ_MAX_ERR)
+ if_printf(vsi->ifp,
+ "Init failed to complete in alloted time!\n");
+}
+
+/*
+ * ixlv_attach() helper function; gathers information about
+ * the (virtual) hardware for use elsewhere in the driver.
+ */
+static void
+ixlv_init_hw(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+
+ /* Save off the information about this board */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+}
+
+/*
+ * ixlv_attach() helper function; initalizes the admin queue
+ * and attempts to establish contact with the PF by
+ * retrying the initial "API version" message several times
+ * or until the PF responds.
+ */
+static int
+ixlv_setup_vc(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int error = 0, ret_error = 0, asq_retries = 0;
+ bool send_api_ver_retried = 0;
+
+ /* Need to set these AQ paramters before initializing AQ */
+ hw->aq.num_arq_entries = IXL_AQ_LEN;
+ hw->aq.num_asq_entries = IXL_AQ_LEN;
+ hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+
+ for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
+ /* Initialize admin queue */
+ error = i40e_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: init_adminq failed: %d\n",
+ __func__, error);
+ ret_error = 1;
+ continue;
+ }
+
+ INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
+
+retry_send:
+ /* Send VF's API version */
+ error = ixlv_send_api_ver(sc);
+ if (error) {
+ i40e_shutdown_adminq(hw);
+ ret_error = 2;
+ device_printf(dev, "%s: unable to send api"
+ " version to PF on attempt %d, error %d\n",
+ __func__, i+1, error);
+ }
+
+ asq_retries = 0;
+ while (!i40e_asq_done(hw)) {
+ if (++asq_retries > IXLV_AQ_MAX_ERR) {
+ i40e_shutdown_adminq(hw);
+ DDPRINTF(dev, "Admin Queue timeout "
+ "(waiting for send_api_ver), %d more retries...",
+ IXLV_AQ_MAX_ERR - (i + 1));
+ ret_error = 3;
+ break;
+ }
+ i40e_msec_delay(10);
+ }
+ if (asq_retries > IXLV_AQ_MAX_ERR)
+ continue;
+
+ INIT_DBG_DEV(dev, "Sent API version message to PF");
+
+ /* Verify that the VF accepts the PF's API version */
+ error = ixlv_verify_api_ver(sc);
+ if (error == ETIMEDOUT) {
+ if (!send_api_ver_retried) {
+ /* Resend message, one more time */
+ send_api_ver_retried++;
+ device_printf(dev,
+ "%s: Timeout while verifying API version on first"
+ " try!\n", __func__);
+ goto retry_send;
+ } else {
+ device_printf(dev,
+ "%s: Timeout while verifying API version on second"
+ " try!\n", __func__);
+ ret_error = 4;
+ break;
+ }
+ }
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to verify API version,"
+ " error %d\n", __func__, error);
+ ret_error = 5;
+ }
+ break;
+ }
+
+ if (ret_error >= 4)
+ i40e_shutdown_adminq(hw);
+ return (ret_error);
+}
+
+/*
+ * ixlv_attach() helper function; asks the PF for this VF's
+ * configuration, and saves the information if it receives it.
+ */
+static int
+ixlv_vf_config(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int bufsz, error = 0, ret_error = 0;
+ int asq_retries, retried = 0;
+
+retry_config:
+ error = ixlv_send_vf_config_msg(sc);
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to send VF config request, attempt %d,"
+ " error %d\n", __func__, retried + 1, error);
+ ret_error = 2;
+ }
+
+ asq_retries = 0;
+ while (!i40e_asq_done(hw)) {
+ if (++asq_retries > IXLV_AQ_MAX_ERR) {
+ device_printf(dev, "%s: Admin Queue timeout "
+ "(waiting for send_vf_config_msg), attempt %d\n",
+ __func__, retried + 1);
+ ret_error = 3;
+ goto fail;
+ }
+ i40e_msec_delay(10);
+ }
+
+ INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
+ retried + 1);
+
+ if (!sc->vf_res) {
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
+ if (!sc->vf_res) {
+ device_printf(dev,
+ "%s: Unable to allocate memory for VF configuration"
+ " message from PF on attempt %d\n", __func__, retried + 1);
+ ret_error = 1;
+ goto fail;
+ }
+ }
+
+ /* Check for VF config response */
+ error = ixlv_get_vf_config(sc);
+ if (error == ETIMEDOUT) {
+ /* The 1st time we timeout, send the configuration message again */
+ if (!retried) {
+ retried++;
+ goto retry_config;
+ }
+ }
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to get VF configuration from PF after %d tries!\n",
+ __func__, retried + 1);
+ ret_error = 4;
+ }
+ goto done;
+
+fail:
+ free(sc->vf_res, M_DEVBUF);
+done:
+ return (ret_error);
}
/*
@@ -1142,7 +1257,7 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->hw.back = &sc->osdep;
- /* May need to pre-emptively disable adminq interrupts */
+ /* Disable adminq interrupts */
ixlv_disable_adminq_irq(&sc->hw);
/*
@@ -1204,12 +1319,14 @@ early:
return;
}
+/*
+ * Create taskqueue and tasklet for Admin Queue interrupts.
+ */
static int
ixlv_init_taskqueue(struct ixlv_sc *sc)
{
int error = 0;
- /* Tasklet for AQ Interrupts */
TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
@@ -1270,7 +1387,9 @@ ixlv_assign_msix(struct ixlv_sc *sc)
}
/*
-** XXX: Assumes the vf's admin queue has been initialized.
+** Requests a VF reset from the PF.
+**
+** Requires the VF's Admin Queue to be initialized.
*/
static int
ixlv_reset(struct ixlv_sc *sc)
@@ -1320,7 +1439,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
if ((reg == I40E_VFR_VFACTIVE) ||
(reg == I40E_VFR_COMPLETED))
return (0);
- i40e_usec_delay(20);
+ i40e_msec_delay(100);
}
return (EBUSY);
@@ -1343,7 +1462,8 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp = vsi->ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
+ device_printf(dev, "%s: could not allocate ifnet"
+ " structure!\n", __func__);
return (-1);
}
@@ -1574,7 +1694,8 @@ ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
v->vlan = vtag;
v->flags = IXL_FILTER_ADD;
- sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
+ IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
mtx_unlock(&sc->mtx);
return;
}
@@ -1607,7 +1728,8 @@ ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
}
}
if (i)
- sc->aq_required |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
+ IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
mtx_unlock(&sc->mtx);
return;
}
@@ -1620,8 +1742,10 @@ ixlv_get_mac_filter(struct ixlv_sc *sc)
{
struct ixlv_mac_filter *f;
- f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
- SLIST_INSERT_HEAD(sc->mac_filters, f, next);
+ f = malloc(sizeof(struct ixlv_mac_filter),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (f)
+ SLIST_INSERT_HEAD(sc->mac_filters, f, next);
return (f);
}
@@ -1654,8 +1778,9 @@ static void
ixlv_msix_adminq(void *arg)
{
struct ixlv_sc *sc = arg;
- struct i40e_hw *hw = &sc->hw;
- u32 reg, mask;
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u32 reg, mask, oldreg;
reg = rd32(hw, I40E_VFINT_ICR01);
mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
@@ -1664,6 +1789,39 @@ ixlv_msix_adminq(void *arg)
reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, reg);
+ /* check for Admin queue errors */
+ oldreg = reg = rd32(hw, hw->aq.arq.len);
+ if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ device_printf(dev, "ARQ VF Error detected\n");
+ reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ }
+ if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ device_printf(dev, "ARQ Overflow Error detected\n");
+ reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ device_printf(dev, "ARQ Critical Error detected\n");
+ reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldreg != reg)
+ wr32(hw, hw->aq.arq.len, reg);
+
+ oldreg = reg = rd32(hw, hw->aq.asq.len);
+ if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ device_printf(dev, "ASQ VF Error detected\n");
+ reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ }
+ if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ device_printf(dev, "ASQ Overflow Error detected\n");
+ reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ device_printf(dev, "ASQ Critical Error detected\n");
+ reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldreg != reg)
+ wr32(hw, hw->aq.asq.len, reg);
+
/* re-enable interrupt causes */
wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
@@ -2050,7 +2208,9 @@ ixlv_init_multi(struct ixl_vsi *vsi)
}
}
if (mcnt > 0)
- sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
+ IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
+ sc);
IOCTL_DBG_IF(vsi->ifp, "end");
}
@@ -2077,11 +2237,14 @@ ixlv_add_multi(struct ixl_vsi *vsi)
}
if_maddr_runlock(ifp);
+ // TODO: Remove -- cannot set promiscuous mode in a VF
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
- sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_PROMISC;
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
+ IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
+ sc);
IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
return;
}
@@ -2102,7 +2265,9 @@ ixlv_add_multi(struct ixl_vsi *vsi)
** added to hw list
*/
if (mcnt > 0)
- sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
+ IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
+ sc);
IOCTL_DBG_IF(ifp, "end");
}
@@ -2151,7 +2316,9 @@ ixlv_del_multi(struct ixl_vsi *vsi)
if_maddr_runlock(ifp);
if (mcnt > 0)
- sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
+ IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
+ sc);
IOCTL_DBG_IF(ifp, "end");
}
@@ -2173,9 +2340,9 @@ ixlv_local_timer(void *arg)
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
int hung = 0;
- u32 mask, val, oldval;
+ u32 mask, val;
- mtx_assert(&sc->mtx, MA_OWNED);
+ IXLV_CORE_LOCK_ASSERT(sc);
/* If Reset is in progress just bail */
if (sc->init_state == IXLV_RESET_PENDING)
@@ -2187,47 +2354,11 @@ ixlv_local_timer(void *arg)
if (val != I40E_VFR_VFACTIVE
&& val != I40E_VFR_COMPLETED) {
-#ifdef IXL_DEBUG
- device_printf(dev, "%s: reset in progress! (%d)\n",
- __func__, val);
-#endif
+ DDPRINTF(dev, "reset in progress! (%d)", val);
return;
}
- /* check for Admin queue errors */
- val = rd32(hw, hw->aq.arq.len);
- oldval = val;
- if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
- device_printf(dev, "ARQ VF Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
- }
- if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
- device_printf(dev, "ARQ Overflow Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
- }
- if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
- device_printf(dev, "ARQ Critical Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
- }
- if (oldval != val)
- wr32(hw, hw->aq.arq.len, val);
-
- val = rd32(hw, hw->aq.asq.len);
- oldval = val;
- if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
- device_printf(dev, "ASQ VF Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
- }
- if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
- device_printf(dev, "ASQ Overflow Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
- }
- if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
- device_printf(dev, "ASQ Critical Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
- }
- if (oldval != val)
- wr32(hw, hw->aq.asq.len, val);
+ ixlv_request_stats(sc);
/* clean and process any events */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
@@ -2281,7 +2412,7 @@ hung:
** the real check of the hardware only happens with
** a link interrupt.
*/
-static void
+void
ixlv_update_link_status(struct ixlv_sc *sc)
{
struct ixl_vsi *vsi = &sc->vsi;
@@ -2318,18 +2449,26 @@ ixlv_update_link_status(struct ixlv_sc *sc)
static void
ixlv_stop(struct ixlv_sc *sc)
{
- mtx_assert(&sc->mtx, MA_OWNED);
+ struct ifnet *ifp;
+ int start;
- INIT_DBG_IF(&sc->vsi->ifp, "begin");
+ ifp = sc->vsi.ifp;
+ INIT_DBG_IF(ifp, "begin");
+
+ IXLV_CORE_LOCK_ASSERT(sc);
+
+ ixl_vc_flush(&sc->vc_mgr);
+ ixlv_disable_queues(sc);
- sc->aq_required |= IXLV_FLAG_AQ_DISABLE_QUEUES;
- callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
- ixlv_sched_aq, sc);
+ start = ticks;
+ while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
+ ((ticks - start) < hz/10))
+ ixlv_do_adminq_locked(sc);
/* Stop the local timer */
callout_stop(&sc->timer);
- INIT_DBG_IF(&sc->vsi->ifp, "end");
+ INIT_DBG_IF(ifp, "end");
}
@@ -2373,6 +2512,8 @@ ixlv_free_queues(struct ixl_vsi *vsi)
/*
** ixlv_config_rss - setup RSS
+**
+** RSS keys and table are cleared on VF reset.
*/
static void
ixlv_config_rss(struct ixlv_sc *sc)
@@ -2390,6 +2531,14 @@ ixlv_config_rss(struct ixlv_sc *sc)
0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
0x4954b126 };
+ /* Don't set up RSS if using a single queue */
+ if (vsi->num_queues == 1) {
+ wr32(hw, I40E_VFQF_HENA(0), 0);
+ wr32(hw, I40E_VFQF_HENA(1), 0);
+ ixl_flush(hw);
+ return;
+ }
+
/* Fill out hash function seed */
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
@@ -2415,15 +2564,17 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- for (i = j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; j++) {
if (j == vsi->num_queues)
j = 0;
/* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
- ((0x1 << hw->func_caps.rss_table_entry_width) - 1));
+ lut = (lut << 8) | (j & 0xF);
/* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ if ((j & 3) == 3) {
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
+ DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
+ i++;
+ }
}
ixl_flush(hw);
}
@@ -2449,18 +2600,16 @@ ixlv_setup_vlan_filters(struct ixlv_sc *sc)
SLIST_FOREACH(f, sc->vlan_filters, next)
if (f->flags & IXL_FILTER_ADD)
cnt++;
- if (cnt == 0)
- return;
-
- sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
- return;
+ if (cnt > 0)
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
+ IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
}
/*
** This routine adds new MAC filters to the sc's list;
-** these are later added in hardware by the periodic
-** aq task.
+** these are later added in hardware by sending a virtual
+** channel message.
*/
static int
ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
@@ -2494,6 +2643,22 @@ ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
}
/*
+** Marks a MAC filter for deletion.
+*/
+static int
+ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
+{
+ struct ixlv_mac_filter *f;
+
+ f = ixlv_find_mac_filter(sc, macaddr);
+ if (f == NULL)
+ return (ENOENT);
+
+ f->flags |= IXL_FILTER_DEL;
+ return (0);
+}
+
+/*
** Tasklet handler for MSIX Adminq interrupts
** - done outside interrupt context since it might sleep
*/
@@ -2501,24 +2666,28 @@ static void
ixlv_do_adminq(void *context, int pending)
{
struct ixlv_sc *sc = context;
+
+ mtx_lock(&sc->mtx);
+ ixlv_do_adminq_locked(sc);
+ mtx_unlock(&sc->mtx);
+ return;
+}
+
+static void
+ixlv_do_adminq_locked(struct ixlv_sc *sc)
+{
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
i40e_status ret;
u16 result = 0;
+ IXLV_CORE_LOCK_ASSERT(sc);
event.buf_len = IXL_AQ_BUF_SZ;
- event.msg_buf = malloc(event.buf_len,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!event.msg_buf) {
- printf("Unable to allocate adminq memory\n");
- return;
- }
+ event.msg_buf = sc->aq_buffer;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
- mtx_lock(&sc->mtx);
- /* clean and process any events */
do {
ret = i40e_clean_arq_element(hw, &event, &result);
if (ret)
@@ -2530,88 +2699,10 @@ ixlv_do_adminq(void *context, int pending)
} while (result);
ixlv_enable_adminq_irq(hw);
- free(event.msg_buf, M_DEVBUF);
- mtx_unlock(&sc->mtx);
- return;
-}
-
-/*
-** ixlv_sched_aq - Periodic scheduling tasklet
-**
-*/
-static void
-ixlv_sched_aq(void *context)
-{
- struct ixlv_sc *sc = context;
- struct ixl_vsi *vsi = &sc->vsi;
-
- /* This is driven by a callout, don't spin */
- if (!mtx_trylock(&sc->mtx))
- goto done_nolock;
-
- if (sc->init_state == IXLV_RESET_PENDING)
- goto done;
-
- /* Process requested admin queue tasks */
- if (sc->aq_pending)
- goto done;
-
- if (sc->aq_required & IXLV_FLAG_AQ_MAP_VECTORS) {
- ixlv_map_queues(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_ADD_MAC_FILTER) {
- ixlv_add_ether_filters(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_ADD_VLAN_FILTER) {
- ixlv_add_vlans(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_DEL_MAC_FILTER) {
- ixlv_del_ether_filters(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_DEL_VLAN_FILTER) {
- ixlv_del_vlans(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_CONFIGURE_QUEUES) {
- ixlv_configure_queues(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_DISABLE_QUEUES) {
- ixlv_disable_queues(sc);
- goto done;
- }
-
- if (sc->aq_required & IXLV_FLAG_AQ_ENABLE_QUEUES) {
- ixlv_enable_queues(sc);
- goto done;
- }
-
- /* Do stats request only if no other AQ operations requested */
- if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixlv_request_stats(sc);
-
-done:
- mtx_unlock(&sc->mtx);
-done_nolock:
- if (sc->aq_required) /* Reschedule */
- callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
- ixlv_sched_aq, sc);
- else
- callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc);
}
static void
-ixlv_add_stats_sysctls(struct ixlv_sc *sc)
+ixlv_add_sysctls(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
@@ -2631,7 +2722,7 @@ ixlv_add_stats_sysctls(struct ixlv_sc *sc)
struct tx_ring *txr;
struct rx_ring *rxr;
- /* Driver statistics */
+ /* Driver statistics sysctls */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &sc->watchdog_events,
"Watchdog timeouts");
@@ -2639,7 +2730,7 @@ ixlv_add_stats_sysctls(struct ixlv_sc *sc)
CTLFLAG_RD, &sc->admin_irq,
"Admin Queue IRQ Handled");
- /* VSI statistics */
+ /* VSI statistics sysctls */
vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
CTLFLAG_RD, NULL, "VSI-specific statistics");
vsi_list = SYSCTL_CHILDREN(vsi_node);
@@ -2654,13 +2745,14 @@ ixlv_add_stats_sysctls(struct ixlv_sc *sc)
{&es->rx_broadcast, "bcast_pkts_rcvd",
"Broadcast Packets Received"},
{&es->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
{&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
{&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
{&es->tx_multicast, "mcast_pkts_txd",
"Multicast Packets Transmitted"},
{&es->tx_broadcast, "bcast_pkts_txd",
"Broadcast Packets Transmitted"},
- {&es->tx_discards, "tx_discards", "Discarded TX packets"},
+ {&es->tx_errors, "tx_errors", "TX packet errors"},
// end
{0,0,0}
};
@@ -2673,7 +2765,7 @@ ixlv_add_stats_sysctls(struct ixlv_sc *sc)
entry++;
}
- /* Queue statistics */
+ /* Queue sysctls */
for (int q = 0; q < vsi->num_queues; q++) {
snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
@@ -2713,6 +2805,18 @@ ixlv_add_stats_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
+
+ /* Examine queue state */
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixlv_sysctl_qtx_tail_handler, "IU",
+ "Queue Transmit Descriptor Tail");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixlv_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
}
}
@@ -2747,3 +2851,47 @@ ixlv_free_filters(struct ixlv_sc *sc)
return;
}
+/**
+ * ixlv_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL1 value from hardware
+ * for a sysctl.
+ */
+static int
+ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->txr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
+/**
+ * ixlv_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL1 value from hardware
+ * for a sysctl.
+ */
+static int
+ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->rxr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index c240b026e355..2b640ce08081 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -162,7 +162,9 @@
/*
** Default number of entries in Tx queue buf_ring.
*/
-#define DEFAULT_TXBRSZ (4096 * 4096)
+#define SMALL_TXBRSZ 4096
+/* This may require mbuf cluster tuning */
+#define DEFAULT_TXBRSZ (SMALL_TXBRSZ * SMALL_TXBRSZ)
/* Alignment for rings */
#define DBA_ALIGN 128
@@ -194,7 +196,7 @@
#define MAX_MULTICAST_ADDR 128
-#define IXL_BAR 3
+#define IXL_BAR 3
#define IXL_ADM_LIMIT 2
#define IXL_TSO_SIZE 65535
#define IXL_TX_BUF_SZ ((u32) 1514)
@@ -208,7 +210,7 @@
#define IXL_ITR_NONE 3
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 0x2600
-#define IXL_MAX_TX_SEGS 8
+#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 66
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
@@ -292,7 +294,6 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
-
/*
*****************************************************************************
* vendor_info_array
@@ -476,6 +477,7 @@ struct ixl_vsi {
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
bool stat_offsets_loaded;
+ /* VSI stat counters */
u64 ipackets;
u64 ierrors;
u64 opackets;
@@ -523,7 +525,8 @@ ixl_get_filter(struct ixl_vsi *vsi)
/* create a new empty filter */
f = malloc(sizeof(struct ixl_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
- SLIST_INSERT_HEAD(&vsi->ftl, f, next);
+ if (f)
+ SLIST_INSERT_HEAD(&vsi->ftl, f, next);
return (f);
}
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index b804c76f9840..ea43af85ae59 100755
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -238,6 +238,11 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
maxsegs = IXL_MAX_TSO_SEGS;
if (ixl_tso_detect_sparse(m_head)) {
m = m_defrag(m_head, M_NOWAIT);
+ if (m == NULL) {
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
*m_headp = m;
}
}
@@ -791,6 +796,7 @@ ixl_txeof(struct ixl_queue *que)
mtx_assert(&txr->mtx, MA_OWNED);
+
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
que->busy = 0;
@@ -1186,6 +1192,9 @@ skip_head:
rxr->bytes = 0;
rxr->discard = FALSE;
+ wr32(vsi->hw, rxr->tail, que->num_desc - 1);
+ ixl_flush(vsi->hw);
+
#if defined(INET6) || defined(INET)
/*
** Now set up the LRO interface:
@@ -1365,6 +1374,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
IXL_RX_LOCK(rxr);
+
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
u32 rsc, status, error;
@@ -1660,3 +1670,4 @@ ixl_get_counter(if_t ifp, ift_counter cnt)
}
}
#endif
+
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index a5bfe13fbe66..77a02fa7e879 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -36,11 +36,13 @@
#ifndef _IXLV_H_
#define _IXLV_H_
-#define IXLV_AQ_MAX_ERR 100
+#include "ixlv_vc_mgr.h"
+
+#define IXLV_AQ_MAX_ERR 1000
#define IXLV_MAX_FILTERS 128
-#define IXLV_MAX_QUEUES 16
-#define IXLV_AQ_TIMEOUT (1 * hz)
-#define IXLV_CALLOUT_TIMO (hz / 50) // 20 msec
+#define IXLV_MAX_QUEUES 16
+#define IXLV_AQ_TIMEOUT (1 * hz)
+#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
@@ -51,8 +53,8 @@
#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
-#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
-#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
+#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
+#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
/* printf %b arg */
#define IXLV_FLAGS \
@@ -61,6 +63,9 @@
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
+/* Hack for compatibility with 1.0.x linux pf driver */
+#define I40E_VIRTCHNL_OP_EVENT 17
+
/* Driver state */
enum ixlv_state_t {
IXLV_START,
@@ -111,12 +116,10 @@ struct ixlv_sc {
struct ifmedia media;
struct callout timer;
- struct callout aq_task;
int msix;
int if_flags;
struct mtx mtx;
- struct mtx aq_task_mtx;
u32 qbase;
u32 admvec;
@@ -127,10 +130,8 @@ struct ixlv_sc {
struct ixl_vsi vsi;
- /* Mac Filter List */
+ /* Filter lists */
struct mac_list *mac_filters;
-
- /* Vlan Filter List */
struct vlan_list *vlan_filters;
/* Promiscuous mode */
@@ -138,11 +139,19 @@ struct ixlv_sc {
/* Admin queue task flags */
u32 aq_wait_count;
- u32 aq_required;
- u32 aq_pending;
+
+ struct ixl_vc_mgr vc_mgr;
+ struct ixl_vc_cmd add_mac_cmd;
+ struct ixl_vc_cmd del_mac_cmd;
+ struct ixl_vc_cmd config_queues_cmd;
+ struct ixl_vc_cmd map_vectors_cmd;
+ struct ixl_vc_cmd enable_queues_cmd;
+ struct ixl_vc_cmd add_vlan_cmd;
+ struct ixl_vc_cmd del_vlan_cmd;
+ struct ixl_vc_cmd add_multi_cmd;
+ struct ixl_vc_cmd del_multi_cmd;
/* Virtual comm channel */
- enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res;
struct i40e_virtchnl_vsi_resource *vsi_res;
@@ -150,16 +159,10 @@ struct ixlv_sc {
u64 watchdog_events;
u64 admin_irq;
- /* Signaling channels */
- u8 init_done;
- u8 config_queues_done;
- u8 map_vectors_done;
- u8 enable_queues_done;
- u8 disable_queues_done;
- u8 add_ether_done;
- u8 del_ether_done;
+ u8 aq_buffer[IXL_AQ_BUF_SZ];
};
+#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
@@ -174,7 +177,7 @@ ixlv_check_ether_addr(u8 *addr)
status = FALSE;
return (status);
}
-
+
/*
** VF Common function prototypes
*/
@@ -201,5 +204,6 @@ void ixlv_add_vlans(struct ixlv_sc *);
void ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
+void ixlv_update_link_status(struct ixlv_sc *);
#endif /* _IXLV_H_ */
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index ef69a82f610b..aa81bc13890a 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -47,6 +47,13 @@
#define IXLV_BUSY_WAIT_DELAY 10
#define IXLV_BUSY_WAIT_COUNT 50
+static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
+ enum i40e_status_code);
+static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
+static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
+static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
+
+#ifdef IXL_DEBUG
/*
** Validate VF messages
*/
@@ -140,6 +147,7 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
else
return 0;
}
+#endif
/*
** ixlv_send_pf_msg
@@ -153,16 +161,17 @@ ixlv_send_pf_msg(struct ixlv_sc *sc,
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
i40e_status err;
- int val_err;
+#ifdef IXL_DEBUG
/*
- ** Pre-validating messages to the PF, this might be
- ** removed for performance later?
+ ** Pre-validating messages to the PF
*/
+ int val_err;
val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
if (val_err)
device_printf(dev, "Error validating msg to PF for op %d,"
" msglen %d: error %d\n", op, len, val_err);
+#endif
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
if (err)
@@ -198,7 +207,8 @@ ixlv_send_api_ver(struct ixlv_sc *sc)
** initialized. Returns 0 if API versions match, EIO if
** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
*/
-int ixlv_verify_api_ver(struct ixlv_sc *sc)
+int
+ixlv_verify_api_ver(struct ixlv_sc *sc)
{
struct i40e_virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &sc->hw;
@@ -232,6 +242,8 @@ int ixlv_verify_api_ver(struct ixlv_sc *sc)
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_VERSION) {
+ DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
+ le32toh(event.desc.cookie_high));
err = EIO;
goto out_alloc;
}
@@ -289,15 +301,15 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
goto out;
}
- do {
+ for (;;) {
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
- i40e_msec_delay(100);
+ i40e_msec_delay(10);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
- device_printf(dev, "%s: Received a response from PF,"
- " opcode %d, error %d\n", __func__,
+ DDPRINTF(dev, "Received a response from PF,"
+ " opcode %d, error %d",
le32toh(event.desc.cookie_high),
le32toh(event.desc.cookie_low));
retries++;
@@ -312,16 +324,17 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
err = EIO;
goto out_alloc;
}
+ /* We retrieved the config message, with no errors */
break;
}
if (retries > IXLV_AQ_MAX_ERR) {
INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
retries);
+ err = ETIMEDOUT;
goto out_alloc;
}
-
- } while (err);
+ }
memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
i40e_vf_parse_hw_config(hw, sc->vf_res);
@@ -345,28 +358,18 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
struct rx_ring *rxr;
- int len, pairs;;
+ int len, pairs;
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
-
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
- /* bail because we already have a command pending */
-#ifdef IXL_DEBUG
- device_printf(dev, "%s: command %d pending\n",
- __func__, sc->current_op);
-#endif
- return;
- }
-
pairs = vsi->num_queues;
- sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!vqci) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
vqci->vsi_id = sc->vsi_res->vsi_id;
@@ -375,7 +378,7 @@ ixlv_configure_queues(struct ixlv_sc *sc)
/* Size check is not needed here - HW max is 16 queue pairs, and we
* can fit info for 31 of them into the AQ buffer before it overflows.
*/
- for (int i = 0; i < pairs; i++, que++) {
+ for (int i = 0; i < pairs; i++, que++, vqpi++) {
txr = &que->txr;
rxr = &que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
@@ -393,14 +396,12 @@ ixlv_configure_queues(struct ixlv_sc *sc)
vqpi->rxq.dma_ring_addr = rxr->dma.pa;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
- vqpi++;
+ vqpi->rxq.splithdr_enabled = 0;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
free(vqci, M_DEVBUF);
- sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
- sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES;
}
/*
@@ -413,22 +414,11 @@ ixlv_enable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
- /* we already have a command pending */
-#ifdef IXL_DEBUG
- device_printf(sc->dev, "%s: command %d pending\n",
- __func__, sc->current_op);
-#endif
- return;
- }
- sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
- sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES;
- sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES;
}
/*
@@ -441,22 +431,11 @@ ixlv_disable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
- /* we already have a command pending */
-#ifdef IXL_DEBUG
- device_printf(sc->dev, "%s: command %d pending\n",
- __func__, sc->current_op);
-#endif
- return;
- }
- sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
- sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES;
- sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES;
}
/*
@@ -473,16 +452,6 @@ ixlv_map_queues(struct ixlv_sc *sc)
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
- /* we already have a command pending */
-#ifdef IXL_DEBUG
- device_printf(sc->dev, "%s: command %d pending\n",
- __func__, sc->current_op);
-#endif
- return;
- }
- sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
-
/* How many queue vectors, adminq uses one */
q = sc->msix - 1;
@@ -491,6 +460,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm = malloc(len, M_DEVBUF, M_NOWAIT);
if (!vm) {
printf("%s: unable to allocate memory\n", __func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@@ -501,6 +471,8 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].vector_id = i + 1; /* first is adminq */
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
+ vm->vecmap[i].rxitr_idx = 0;
+ vm->vecmap[i].txitr_idx = 0;
}
/* Misc vector last - this is only for AdminQ messages */
@@ -508,12 +480,12 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].vector_id = 0;
vm->vecmap[i].txq_map = 0;
vm->vecmap[i].rxq_map = 0;
+ vm->vecmap[i].rxitr_idx = 0;
+ vm->vecmap[i].txitr_idx = 0;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vm, len);
free(vm, M_DEVBUF);
- sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS;
- sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS;
}
/*
@@ -529,11 +501,6 @@ ixlv_add_vlans(struct ixlv_sc *sc)
device_t dev = sc->dev;
int len, i = 0, cnt = 0;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
- return;
-
- sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
-
/* Get count of VLAN filters to add */
SLIST_FOREACH(f, sc->vlan_filters, next) {
if (f->flags & IXL_FILTER_ADD)
@@ -541,8 +508,8 @@ ixlv_add_vlans(struct ixlv_sc *sc)
}
if (!cnt) { /* no work... */
- sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
+ I40E_SUCCESS);
return;
}
@@ -552,6 +519,7 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (len > IXL_AQ_BUF_SZ) {
device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
__func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@@ -559,6 +527,7 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (!v) {
device_printf(dev, "%s: unable to allocate memory\n",
__func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@@ -575,16 +544,17 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
- if (i == 0) { /* Should not happen... */
- device_printf(dev, "%s: i == 0?\n", __func__);
- return;
- }
+ // ERJ: Should this be taken out?
+ if (i == 0) { /* Should not happen... */
+ device_printf(dev, "%s: i == 0?\n", __func__);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
+ I40E_SUCCESS);
+ return;
+ }
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
- sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
- sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
}
/*
@@ -600,11 +570,6 @@ ixlv_del_vlans(struct ixlv_sc *sc)
struct ixlv_vlan_filter *f, *ftmp;
int len, i = 0, cnt = 0;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
- return;
-
- sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
-
/* Get count of VLAN filters to delete */
SLIST_FOREACH(f, sc->vlan_filters, next) {
if (f->flags & IXL_FILTER_DEL)
@@ -612,8 +577,8 @@ ixlv_del_vlans(struct ixlv_sc *sc)
}
if (!cnt) { /* no work... */
- sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
+ I40E_SUCCESS);
return;
}
@@ -623,6 +588,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (len > IXL_AQ_BUF_SZ) {
device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
__func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@@ -630,6 +596,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (!v) {
device_printf(dev, "%s: unable to allocate memory\n",
__func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@@ -647,16 +614,17 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
- if (i == 0) { /* Should not happen... */
- device_printf(dev, "%s: i == 0?\n", __func__);
- return;
- }
+ // ERJ: Take this out?
+ if (i == 0) { /* Should not happen... */
+ device_printf(dev, "%s: i == 0?\n", __func__);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
+ I40E_SUCCESS);
+ return;
+ }
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
- sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
- sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
}
@@ -673,11 +641,6 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
device_t dev = sc->dev;
int len, j = 0, cnt = 0;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
- return;
-
- sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
-
/* Get count of MAC addresses to add */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_ADD)
@@ -685,9 +648,8 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
}
if (cnt == 0) { /* Should not happen... */
DDPRINTF(dev, "cnt == 0, exiting...");
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
- sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
- wakeup(&sc->add_ether_done);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
+ I40E_SUCCESS);
return;
}
@@ -698,6 +660,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
if (a == NULL) {
device_printf(dev, "%s: Failed to get memory for "
"virtchnl_ether_addr_list\n", __func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
a->vsi_id = sc->vsi.id;
@@ -722,8 +685,6 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
/* add stats? */
free(a, M_DEVBUF);
- sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
- sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
return;
}
@@ -740,11 +701,6 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
struct ixlv_mac_filter *f, *f_temp;
int len, j = 0, cnt = 0;
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
- return;
-
- sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
-
/* Get count of MAC addresses to delete */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_DEL)
@@ -752,9 +708,8 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
}
if (cnt == 0) {
DDPRINTF(dev, "cnt == 0, exiting...");
- sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
- wakeup(&sc->del_ether_done);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
+ I40E_SUCCESS);
return;
}
@@ -765,6 +720,7 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
if (d == NULL) {
device_printf(dev, "%s: Failed to get memory for "
"virtchnl_ether_addr_list\n", __func__);
+ ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
d->vsi_id = sc->vsi.id;
@@ -787,8 +743,6 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
/* add stats? */
free(d, M_DEVBUF);
- sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
- sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
return;
}
@@ -806,7 +760,6 @@ ixlv_request_reset(struct ixlv_sc *sc)
*/
wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
/*
@@ -817,18 +770,11 @@ void
ixlv_request_stats(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
- int error = 0;
-
- if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
- return;
- sc->current_op = I40E_VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = sc->vsi_res->vsi_id;
- error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
+ /* Low priority, we don't need to error check */
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs));
- /* Low priority, ok if it fails */
- if (error)
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
/*
@@ -889,10 +835,16 @@ ixlv_vc_completion(struct ixlv_sc *sc,
switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+#ifdef IXL_DEBUG
+ device_printf(dev, "Link change: status %d, speed %d\n",
+ vpe->event_data.link_event.link_status,
+ vpe->event_data.link_event.link_speed);
+#endif
vsi->link_up =
vpe->event_data.link_event.link_status;
vsi->link_speed =
vpe->event_data.link_event.link_speed;
+ ixlv_update_link_status(sc);
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
@@ -908,14 +860,6 @@ ixlv_vc_completion(struct ixlv_sc *sc,
return;
}
- if (v_opcode != sc->current_op
- && sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) {
- device_printf(dev, "%s: Pending op is %d, received %d.\n",
- __func__, sc->current_op, v_opcode);
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
- return;
- }
-
/* Catch-all error response */
if (v_retval) {
device_printf(dev,
@@ -933,27 +877,35 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
+ v_retval);
if (v_retval) {
device_printf(dev, "WARNING: Error adding VF mac filter!\n");
device_printf(dev, "WARNING: Device may not receive traffic!\n");
}
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
+ v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
+ v_retval);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
+ v_retval);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
+ v_retval);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
+ v_retval);
if (v_retval == 0) {
+ /* Update link status */
+ ixlv_update_link_status(sc);
/* Turn on all interrupts */
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
@@ -962,7 +914,8 @@ ixlv_vc_completion(struct ixlv_sc *sc,
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
+ v_retval);
if (v_retval == 0) {
/* Turn off all interrupts */
ixlv_disable_intr(vsi);
@@ -971,10 +924,12 @@ ixlv_vc_completion(struct ixlv_sc *sc,
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
+ v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS);
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
+ v_retval);
break;
default:
device_printf(dev,
@@ -982,6 +937,181 @@ ixlv_vc_completion(struct ixlv_sc *sc,
__func__, v_opcode);
break;
}
- sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}
+
+static void
+ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
+{
+
+ switch (request) {
+ case IXLV_FLAG_AQ_MAP_VECTORS:
+ ixlv_map_queues(sc);
+ break;
+
+ case IXLV_FLAG_AQ_ADD_MAC_FILTER:
+ ixlv_add_ether_filters(sc);
+ break;
+
+ case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
+ ixlv_add_vlans(sc);
+ break;
+
+ case IXLV_FLAG_AQ_DEL_MAC_FILTER:
+ ixlv_del_ether_filters(sc);
+ break;
+
+ case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
+ ixlv_del_vlans(sc);
+ break;
+
+ case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
+ ixlv_configure_queues(sc);
+ break;
+
+ case IXLV_FLAG_AQ_DISABLE_QUEUES:
+ ixlv_disable_queues(sc);
+ break;
+
+ case IXLV_FLAG_AQ_ENABLE_QUEUES:
+ ixlv_enable_queues(sc);
+ break;
+ }
+}
+
+void
+ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
+{
+ mgr->sc = sc;
+ mgr->current = NULL;
+ TAILQ_INIT(&mgr->pending);
+ callout_init_mtx(&mgr->callout, &sc->mtx, 0);
+}
+
+static void
+ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
+{
+ struct ixl_vc_cmd *cmd;
+
+ cmd = mgr->current;
+ mgr->current = NULL;
+ cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
+
+ cmd->callback(cmd, cmd->arg, err);
+ ixl_vc_process_next(mgr);
+}
+
+static void
+ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
+ enum i40e_status_code err)
+{
+ struct ixl_vc_cmd *cmd;
+
+ cmd = mgr->current;
+ if (cmd == NULL || cmd->request != request)
+ return;
+
+ callout_stop(&mgr->callout);
+ ixl_vc_process_completion(mgr, err);
+}
+
+static void
+ixl_vc_cmd_timeout(void *arg)
+{
+ struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
+
+ IXLV_CORE_LOCK_ASSERT(mgr->sc);
+ ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
+}
+
+static void
+ixl_vc_cmd_retry(void *arg)
+{
+ struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
+
+ IXLV_CORE_LOCK_ASSERT(mgr->sc);
+ ixl_vc_send_current(mgr);
+}
+
+static void
+ixl_vc_send_current(struct ixl_vc_mgr *mgr)
+{
+ struct ixl_vc_cmd *cmd;
+
+ cmd = mgr->current;
+ ixl_vc_send_cmd(mgr->sc, cmd->request);
+ callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
+}
+
+static void
+ixl_vc_process_next(struct ixl_vc_mgr *mgr)
+{
+ struct ixl_vc_cmd *cmd;
+
+ if (mgr->current != NULL)
+ return;
+
+ if (TAILQ_EMPTY(&mgr->pending))
+ return;
+
+ cmd = TAILQ_FIRST(&mgr->pending);
+ TAILQ_REMOVE(&mgr->pending, cmd, next);
+
+ mgr->current = cmd;
+ ixl_vc_send_current(mgr);
+}
+
+static void
+ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
+{
+
+ callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
+}
+
+void
+ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
+ uint32_t req, ixl_vc_callback_t *callback, void *arg)
+{
+ IXLV_CORE_LOCK_ASSERT(mgr->sc);
+
+ if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
+ if (mgr->current == cmd)
+ mgr->current = NULL;
+ else
+ TAILQ_REMOVE(&mgr->pending, cmd, next);
+ }
+
+ cmd->request = req;
+ cmd->callback = callback;
+ cmd->arg = arg;
+ cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
+ TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
+
+ ixl_vc_process_next(mgr);
+}
+
+void
+ixl_vc_flush(struct ixl_vc_mgr *mgr)
+{
+ struct ixl_vc_cmd *cmd;
+
+ IXLV_CORE_LOCK_ASSERT(mgr->sc);
+ KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
+ ("ixlv: pending commands waiting but no command in progress"));
+
+ cmd = mgr->current;
+ if (cmd != NULL) {
+ mgr->current = NULL;
+ cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
+ cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
+ }
+
+ while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
+ TAILQ_REMOVE(&mgr->pending, cmd, next);
+ cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
+ cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
+ }
+
+ callout_stop(&mgr->callout);
+}
+