aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ixl/ixl_pf_main.c
diff options
context:
space:
mode:
authorEric Joyner <erj@FreeBSD.org>2018-05-01 18:50:12 +0000
committerEric Joyner <erj@FreeBSD.org>2018-05-01 18:50:12 +0000
commitceebc2f348c028b21bf9bcc99f7a3c4b0cb7d926 (patch)
tree6b8d325ba633c156d1794daed40069c3f4ec914a /sys/dev/ixl/ixl_pf_main.c
parent7631477269f3f92c6517594de2a152b5792eff36 (diff)
downloadsrc-ceebc2f348c028b21bf9bcc99f7a3c4b0cb7d926.tar.gz
src-ceebc2f348c028b21bf9bcc99f7a3c4b0cb7d926.zip
ixl(4): Update to 1.9.9-k
Refresh upstream driver before impending conversion to iflib. Major changes: - Support for descriptor writeback mode (required by ixlv(4) for AVF support) - Ability to disable firmware LLDP agent by user (PR 221530) - Fix for TX queue hang when using TSO (PR 221919) - Separate descriptor ring sizes for TX and RX rings PR: 221530, 221919 Submitted by: Krzysztof Galazka <krzysztof.galazka@intel.com> Reviewed by: #IntelNetworking MFC after: 1 day Relnotes: Yes Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D14985
Notes
Notes: svn path=/head/; revision=333149
Diffstat (limited to 'sys/dev/ixl/ixl_pf_main.c')
-rw-r--r--sys/dev/ixl/ixl_pf_main.c1117
1 files changed, 789 insertions, 328 deletions
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
index 1befa463154e..0351d5b0852c 100644
--- a/sys/dev/ixl/ixl_pf_main.c
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -50,14 +50,16 @@
#include <dev/netmap/netmap_kern.h>
#endif /* DEV_NETMAP */
-static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
+static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
static u64 ixl_max_aq_speed_to_value(u8);
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
+static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
/* Sysctls */
-static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
-static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
@@ -80,6 +82,8 @@ static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
#ifdef IXL_DEBUG
static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -87,6 +91,7 @@ static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
#ifdef IXL_IW
extern int ixl_enable_iwarp;
+extern int ixl_limit_iwarp_msix;
#endif
const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
@@ -101,6 +106,12 @@ const char * const ixl_fc_string[6] = {
"Default"
};
+static char *ixl_fec_string[3] = {
+ "CL108 RS-FEC",
+ "CL74 FC-FEC/BASE-R",
+ "None"
+};
+
MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
void
@@ -242,12 +253,13 @@ ixl_init_locked(struct ixl_pf *pf)
/* Get the latest mac address... User might use a LAA */
bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
- I40E_ETH_LENGTH_OF_ADDRESS);
+ ETH_ALEN);
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+ device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n");
ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
bcopy(tmpaddr, hw->mac.addr,
- I40E_ETH_LENGTH_OF_ADDRESS);
+ ETH_ALEN);
ret = i40e_aq_mac_address_write(hw,
I40E_AQC_WRITE_TYPE_LAA_ONLY,
hw->mac.addr, NULL);
@@ -328,7 +340,6 @@ ixl_init_locked(struct ixl_pf *pf)
"initialize iwarp failed, code %d\n", ret);
}
#endif
-
}
@@ -387,19 +398,20 @@ retry:
hw->func_caps.num_rx_qp,
hw->func_caps.base_queue);
#endif
+ struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
+ osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
+ if (osdep->i2c_intfc_num != -1)
+ pf->has_i2c = true;
+
/* Print a subset of the capability information. */
device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
(hw->func_caps.mdio_port_mode == 2) ? "I2C" :
+ (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
(hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
"MDIO shared");
- struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
- osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
- if (osdep->i2c_intfc_num != -1)
- pf->has_i2c = true;
-
return (error);
}
@@ -477,25 +489,25 @@ ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
/* For the set_advertise sysctl */
void
-ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
+ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- enum i40e_status_code status;
- struct i40e_aq_get_phy_abilities_resp abilities;
+ int err;
- /* Set initial sysctl values */
- status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
- NULL);
- if (status) {
+ /* Make sure to initialize the device to the complete list of
+ * supported speeds on driver load, to ensure unloading and
+ * reloading the driver will restore this value.
+ */
+ err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
+ if (err) {
/* Non-fatal error */
- device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
- __func__, status);
+ device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
+ __func__, err);
return;
}
pf->advertised_speed =
- ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
+ ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
}
int
@@ -515,10 +527,8 @@ ixl_teardown_hw_structs(struct ixl_pf *pf)
}
}
- // XXX: This gets called when we know the adminq is inactive;
- // so we already know it's setup when we get here.
-
/* Shutdown admin queue */
+ ixl_disable_intr0(hw);
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
@@ -540,7 +550,7 @@ ixl_reset(struct ixl_pf *pf)
i40e_clear_hw(hw);
error = i40e_pf_reset(hw);
if (error) {
- device_printf(dev, "init: PF reset failure");
+ device_printf(dev, "init: PF reset failure\n");
error = EIO;
goto err_out;
}
@@ -548,7 +558,7 @@ ixl_reset(struct ixl_pf *pf)
error = i40e_init_adminq(hw);
if (error) {
device_printf(dev, "init: Admin queue init failure;"
- " status code %d", error);
+ " status code %d\n", error);
error = EIO;
goto err_out;
}
@@ -618,6 +628,12 @@ ixl_reset(struct ixl_pf *pf)
}
+ /* Re-enable admin queue interrupt */
+ if (pf->msix > 1) {
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ }
+
err_out:
return (error);
}
@@ -630,6 +646,7 @@ ixl_handle_que(void *context, int pending)
{
struct ixl_queue *que = context;
struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
struct ifnet *ifp = vsi->ifp;
@@ -648,9 +665,11 @@ ixl_handle_que(void *context, int pending)
}
}
- /* Reenable this interrupt - hmmm */
- ixl_enable_queue(hw, que->me);
- return;
+ /* Re-enable queue interrupt */
+ if (pf->msix > 1)
+ ixl_enable_queue(hw, que->me);
+ else
+ ixl_enable_intr0(hw);
}
@@ -669,13 +688,15 @@ ixl_intr(void *arg)
struct ifnet *ifp = vsi->ifp;
struct tx_ring *txr = &que->txr;
u32 icr0;
- bool more_tx, more_rx;
+ bool more;
pf->admin_irq++;
- /* Protect against spurious interrupts */
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
+ /* Clear PBA at start of ISR if using legacy interrupts */
+ if (pf->msix == 0)
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
icr0 = rd32(hw, I40E_PFINT_ICR0);
@@ -685,20 +706,22 @@ ixl_intr(void *arg)
taskqueue_enqueue(pf->tq, &pf->vflr_task);
#endif
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
taskqueue_enqueue(pf->tq, &pf->adminq);
- }
- if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
++que->irqs;
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+ more = ixl_rxeof(que, IXL_RX_LIMIT);
IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
+ ixl_txeof(que);
if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
+ ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
+
+ if (more)
+ taskqueue_enqueue(que->tq, &que->task);
}
ixl_enable_intr0(hw);
@@ -713,7 +736,7 @@ ixl_intr(void *arg)
void
ixl_msix_que(void *arg)
{
- struct ixl_queue *que = arg;
+ struct ixl_queue *que = arg;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
@@ -846,26 +869,25 @@ ixl_set_promisc(struct ixl_vsi *vsi)
int err, mcnt = 0;
bool uni = FALSE, multi = FALSE;
- if (ifp->if_flags & IFF_ALLMULTI)
- multi = TRUE;
+ if (ifp->if_flags & IFF_PROMISC)
+ uni = multi = TRUE;
+ else if (ifp->if_flags & IFF_ALLMULTI)
+ multi = TRUE;
else { /* Need to count the multicast addresses */
struct ifmultiaddr *ifma;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_MULTICAST_ADDR)
- break;
- mcnt++;
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (mcnt == MAX_MULTICAST_ADDR) {
+ multi = TRUE;
+ break;
+ }
+ mcnt++;
}
if_maddr_runlock(ifp);
}
- if (mcnt >= MAX_MULTICAST_ADDR)
- multi = TRUE;
- if (ifp->if_flags & IFF_PROMISC)
- uni = TRUE;
-
err = i40e_aq_set_vsi_unicast_promiscuous(hw,
vsi->seid, uni, NULL, TRUE);
err = i40e_aq_set_vsi_multicast_promiscuous(hw,
@@ -965,11 +987,10 @@ ixl_del_multi(struct ixl_vsi *vsi)
ixl_del_hw_filters(vsi, mcnt);
}
-
/*********************************************************************
* Timer routine
*
- * This routine checks for link status,updates statistics,
+ * This routine checks for link status, updates statistics,
* and runs the watchdog check.
*
* Only runs when the driver is configured UP and RUNNING.
@@ -980,14 +1001,6 @@ void
ixl_local_timer(void *arg)
{
struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
- struct tx_ring *txr;
- int hung = 0;
- u32 mask;
- s32 timer, new_timer;
IXL_PF_LOCK_ASSERT(pf);
@@ -997,44 +1010,11 @@ ixl_local_timer(void *arg)
/* Update stats */
ixl_update_stats_counters(pf);
- /* Check status of the queues */
- mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- txr = &que->txr;
- timer = atomic_load_acq_32(&txr->watchdog_timer);
- if (timer > 0) {
- new_timer = timer - hz;
- if (new_timer <= 0) {
- atomic_store_rel_32(&txr->watchdog_timer, -1);
- device_printf(dev, "WARNING: queue %d "
- "appears to be hung!\n", que->me);
- ++hung;
- } else {
- /*
- * If this fails, that means something in the TX path has updated
- * the watchdog, so it means the TX path is still working and
- * the watchdog doesn't need to countdown.
- */
- atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
- /* Any queues with outstanding work get a sw irq */
- wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- }
- }
- }
- /* Reset when a queue shows hung */
- if (hung)
- goto hung;
+ /* Increment stat when a queue shows hung */
+ if (ixl_queue_hang_check(&pf->vsi))
+ pf->watchdog_events++;
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
- return;
-
-hung:
- device_printf(dev, "WARNING: Resetting!\n");
- pf->watchdog_events++;
- ixl_init_locked(pf);
}
void
@@ -1042,13 +1022,29 @@ ixl_link_up_msg(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ifnet *ifp = pf->vsi.ifp;
+ char *req_fec_string, *neg_fec_string;
+ u8 fec_abilities;
+
+ fec_abilities = hw->phy.link_info.req_fec_info;
+ /* If both RS and KR are requested, only show RS */
+ if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
+ req_fec_string = ixl_fec_string[0];
+ else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
+ req_fec_string = ixl_fec_string[1];
+ else
+ req_fec_string = ixl_fec_string[2];
- log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+ neg_fec_string = ixl_fec_string[0];
+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+ neg_fec_string = ixl_fec_string[1];
+ else
+ neg_fec_string = ixl_fec_string[2];
+
+ log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
ifp->if_xname,
ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
- (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
- "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
- "Clause 108 RS-FEC" : "None",
+ req_fec_string, neg_fec_string,
(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
(hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
@@ -1072,9 +1068,16 @@ ixl_update_link_status(struct ixl_pf *pf)
if (pf->link_up) {
if (vsi->link_active == FALSE) {
vsi->link_active = TRUE;
+#if __FreeBSD_version >= 1100000
ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
+#else
+ if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed));
+#endif
if_link_state_change(ifp, LINK_STATE_UP);
ixl_link_up_msg(pf);
+#ifdef PCI_IOV
+ ixl_broadcast_link_state(pf);
+#endif
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
@@ -1082,10 +1085,11 @@ ixl_update_link_status(struct ixl_pf *pf)
device_printf(dev, "Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
vsi->link_active = FALSE;
+#ifdef PCI_IOV
+ ixl_broadcast_link_state(pf);
+#endif
}
}
-
- return;
}
/*********************************************************************
@@ -1348,37 +1352,6 @@ ixl_setup_queue_msix(struct ixl_vsi *vsi)
}
/*
- * When used in a virtualized environment PCI BUSMASTER capability may not be set
- * so explicity set it here and rewrite the ENABLE in the MSIX control register
- * at this point to cause the host to successfully initialize us.
- */
-void
-ixl_set_busmaster(device_t dev)
-{
- u16 pci_cmd_word;
-
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
-}
-
-/*
- * rewrite the ENABLE in the MSIX control register
- * to cause the host to successfully initialize us.
- */
-void
-ixl_set_msix_enable(device_t dev)
-{
- int msix_ctrl, rid;
-
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
-}
-
-/*
* Allocate MSI/X vectors from the OS.
* Returns 0 for legacy, 1 for MSI, >1 for MSIX.
*/
@@ -1387,10 +1360,15 @@ ixl_init_msix(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
+#ifdef IXL_IW
+#if __FreeBSD_version >= 1100000
+ cpuset_t cpu_set;
+#endif
+#endif
int auto_max_queues;
int rid, want, vectors, queues, available;
#ifdef IXL_IW
- int iw_want, iw_vectors;
+ int iw_want=0, iw_vectors;
pf->iw_msix = 0;
#endif
@@ -1399,9 +1377,6 @@ ixl_init_msix(struct ixl_pf *pf)
if (!pf->enable_msix)
goto no_msix;
- /* Ensure proper operation in virtualized environment */
- ixl_set_busmaster(dev);
-
/* First try MSI/X */
rid = PCIR_BAR(IXL_MSIX_BAR);
pf->msix_mem = bus_alloc_resource_any(dev,
@@ -1416,6 +1391,7 @@ ixl_init_msix(struct ixl_pf *pf)
available = pci_msix_count(dev);
if (available < 2) {
/* system has msix disabled (0), or only one vector (1) */
+ device_printf(pf->dev, "Less than two MSI-X vectors available\n");
bus_release_resource(dev, SYS_RES_MEMORY,
rid, pf->msix_mem);
pf->msix_mem = NULL;
@@ -1470,9 +1446,20 @@ ixl_init_msix(struct ixl_pf *pf)
}
#ifdef IXL_IW
- if (ixl_enable_iwarp) {
- /* iWARP wants additional vector for CQP */
- iw_want = mp_ncpus + 1;
+ if (ixl_enable_iwarp && hw->func_caps.iwarp) {
+#if __FreeBSD_version >= 1100000
+ if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0)
+ {
+ iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX);
+ }
+#endif
+ if(!iw_want)
+ iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX);
+ if(ixl_limit_iwarp_msix > 0)
+ iw_want = min(iw_want, ixl_limit_iwarp_msix);
+ else
+ iw_want = min(iw_want, 1);
+
available -= vectors;
if (available > 0) {
iw_vectors = (available >= iw_want) ?
@@ -1489,8 +1476,13 @@ ixl_init_msix(struct ixl_pf *pf)
"Using MSIX interrupts with %d vectors\n", vectors);
pf->msix = vectors;
#ifdef IXL_IW
- if (ixl_enable_iwarp)
+ if (ixl_enable_iwarp && hw->func_caps.iwarp)
+ {
pf->iw_msix = iw_vectors;
+ device_printf(pf->dev,
+ "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n",
+ iw_vectors);
+ }
#endif
pf->vsi.num_queues = queues;
@@ -1547,6 +1539,7 @@ ixl_configure_intr0_msix(struct ixl_pf *pf)
I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -1627,8 +1620,6 @@ ixl_configure_legacy(struct ixl_pf *pf)
| I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
| I40E_PFINT_ICR0_ENA_GRST_MASK
| I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
- | I40E_PFINT_ICR0_ENA_GPIO_MASK
- | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
| I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
| I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
| I40E_PFINT_ICR0_ENA_VFLR_MASK
@@ -1646,7 +1637,7 @@ ixl_configure_legacy(struct ixl_pf *pf)
/* Associate the queue pair to the vector and enable the q int */
reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
| (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
- | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), reg);
reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
@@ -1671,6 +1662,8 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
return (ENXIO);
}
+ /* Ensure proper PCI device operation */
+ ixl_set_busmaster(dev);
/* Save off the PCI information */
hw->vendor_id = pci_get_vendor(dev);
@@ -1844,7 +1837,7 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
|| phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
@@ -1867,7 +1860,11 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
}
/*********************************************************************
@@ -1907,7 +1904,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_qflush = ixl_qflush;
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+ ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
@@ -1916,7 +1913,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
/* Set TSO limits */
ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
- ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
+ ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
/*
* Tell the upper layer(s) we support long frames.
@@ -1968,12 +1965,16 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
device_printf(dev,
"Error getting supported media types, err %d,"
" AQ error %d\n", aq_error, hw->aq.asq_last_status);
- return (0);
- }
- pf->supported_speeds = abilities.link_speed;
- ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
+ } else {
+ pf->supported_speeds = abilities.link_speed;
+#if __FreeBSD_version >= 1100000
+ ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
+#else
+ if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
+#endif
- ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ }
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -2001,6 +2002,7 @@ ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
/* Print out message if an unqualified module is found */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (pf->advertised_speed) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
device_printf(dev, "Link failed because "
@@ -2157,17 +2159,27 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
u16 size;
/* Setup the HMC TX Context */
- size = que->num_desc * sizeof(struct i40e_tx_desc);
- memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
+ size = que->num_tx_desc * sizeof(struct i40e_tx_desc);
+ bzero(&tctx, sizeof(tctx));
tctx.new_context = 1;
tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = que->num_desc;
- tctx.fc_ena = 0;
- tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
- /* Enable HEAD writeback */
- tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.qlen = que->num_tx_desc;
+ tctx.fc_ena = 0; /* Disable FCoE */
+ /*
+ * This value needs to pulled from the VSI that this queue
+ * is assigned to. Index into array is traffic class.
+ */
+ tctx.rdylist = vsi->info.qs_handle[0];
+ /*
+ * Set these to enable Head Writeback
+ * - Address is last entry in TX ring (reserved for HWB index)
+ * Leave these as 0 for Descriptor Writeback
+ */
+ if (vsi->enable_head_writeback) {
+ tctx.head_wb_ena = 1;
+ tctx.head_wb_addr = txr->dma.pa +
+ (que->num_tx_desc * sizeof(struct i40e_tx_desc));
+ }
tctx.rdylist_act = 0;
err = i40e_clear_lan_tx_queue_context(hw, i);
if (err) {
@@ -2205,20 +2217,20 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
vsi->max_frame_size : max_rxmax;
rctx.dtype = 0;
- rctx.dsize = 1; /* do 32byte descriptors */
- rctx.hsplit_0 = 0; /* no HDR split initially */
+ rctx.dsize = 1; /* do 32byte descriptors */
+ rctx.hsplit_0 = 0; /* no header split */
rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = que->num_desc;
+ rctx.qlen = que->num_rx_desc;
rctx.tphrdesc_ena = 1;
rctx.tphwdesc_ena = 1;
- rctx.tphdata_ena = 0;
- rctx.tphhead_ena = 0;
- rctx.lrxqthresh = 2;
+ rctx.tphdata_ena = 0; /* Header Split related */
+ rctx.tphhead_ena = 0; /* Header Split related */
+ rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */
rctx.crcstrip = 1;
rctx.l2tsel = 1;
- rctx.showiv = 1;
- rctx.fc_ena = 0;
- rctx.prefena = 1;
+ rctx.showiv = 1; /* Strip inner VLAN header */
+ rctx.fc_ena = 0; /* Disable FCoE */
+ rctx.prefena = 1; /* Prefetch descriptors */
err = i40e_clear_lan_rx_queue_context(hw, i);
if (err) {
@@ -2245,26 +2257,22 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
} else
#endif /* DEV_NETMAP */
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1);
}
return (err);
}
-/*********************************************************************
- *
- * Free all VSI structs.
- *
- **********************************************************************/
+
+
void
-ixl_free_vsi(struct ixl_vsi *vsi)
+ixl_vsi_free_queues(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ixl_queue *que = vsi->queues;
- /* Free station queues */
- if (!vsi->queues)
- goto free_filters;
+ if (NULL == vsi->queues)
+ return;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
@@ -2273,6 +2281,8 @@ ixl_free_vsi(struct ixl_vsi *vsi)
if (!mtx_initialized(&txr->mtx)) /* uninitialized */
continue;
IXL_TX_LOCK(txr);
+ if (txr->br)
+ buf_ring_free(txr->br, M_DEVBUF);
ixl_free_que_tx(que);
if (txr->base)
i40e_free_dma_mem(&pf->hw, &txr->dma);
@@ -2288,9 +2298,23 @@ ixl_free_vsi(struct ixl_vsi *vsi)
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
}
- free(vsi->queues, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ * Free all VSI structs.
+ *
+ **********************************************************************/
+void
+ixl_free_vsi(struct ixl_vsi *vsi)
+{
+
+ /* Free station queues */
+ ixl_vsi_free_queues(vsi);
+ if (vsi->queues)
+ free(vsi->queues, M_DEVBUF);
-free_filters:
/* Free VSI filter list */
ixl_free_mac_filters(vsi);
}
@@ -2311,17 +2335,18 @@ ixl_free_mac_filters(struct ixl_vsi *vsi)
* Fill out fields in queue struct and setup tx/rx memory and structs
*/
static int
-ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
+ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index)
{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
int error = 0;
int rsize, tsize;
- que->num_desc = pf->ringsz;
+ que->num_tx_desc = vsi->num_tx_desc;
+ que->num_rx_desc = vsi->num_rx_desc;
que->me = index;
que->vsi = vsi;
@@ -2332,16 +2357,27 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /* Create the TX descriptor ring */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
+ /*
+ * Create the TX descriptor ring
+ *
+ * In Head Writeback mode, the descriptor ring is one bigger
+ * than the number of descriptors for space for the HW to
+ * write back index of last completed descriptor.
+ */
+ if (vsi->enable_head_writeback) {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ } else {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)), DBA_ALIGN);
+ }
if (i40e_allocate_dma_mem(hw,
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
device_printf(dev,
"Unable to allocate TX Descriptor memory\n");
error = ENOMEM;
- goto fail;
+ goto err_destroy_tx_mtx;
}
txr->base = (struct i40e_tx_desc *)txr->dma.va;
bzero((void *)txr->base, tsize);
@@ -2350,7 +2386,7 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Critical Failure setting up TX structures\n");
error = ENOMEM;
- goto fail;
+ goto err_free_tx_dma;
}
/* Allocate a buf ring */
txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
@@ -2359,10 +2395,10 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Critical Failure setting up TX buf ring\n");
error = ENOMEM;
- goto fail;
+ goto err_free_tx_data;
}
- rsize = roundup2(que->num_desc *
+ rsize = roundup2(que->num_rx_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
rxr->que = que;
rxr->tail = I40E_QRX_TAIL(que->me);
@@ -2377,7 +2413,7 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Unable to allocate RX Descriptor memory\n");
error = ENOMEM;
- goto fail;
+ goto err_destroy_rx_mtx;
}
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
@@ -2386,27 +2422,43 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Critical Failure setting up receive structs\n");
error = ENOMEM;
- goto fail;
+ goto err_free_rx_dma;
}
return (0);
-fail:
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- if (mtx_initialized(&rxr->mtx))
- mtx_destroy(&rxr->mtx);
- if (txr->br) {
- buf_ring_free(txr->br, M_DEVBUF);
- txr->br = NULL;
- }
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- if (mtx_initialized(&txr->mtx))
- mtx_destroy(&txr->mtx);
+err_free_rx_dma:
+ i40e_free_dma_mem(&pf->hw, &rxr->dma);
+err_destroy_rx_mtx:
+ mtx_destroy(&rxr->mtx);
+ /* err_free_tx_buf_ring */
+ buf_ring_free(txr->br, M_DEVBUF);
+err_free_tx_data:
+ ixl_free_que_tx(que);
+err_free_tx_dma:
+ i40e_free_dma_mem(&pf->hw, &txr->dma);
+err_destroy_tx_mtx:
+ mtx_destroy(&txr->mtx);
+
+ return (error);
+}
+
+int
+ixl_vsi_setup_queues(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ error = ixl_vsi_setup_queue(vsi, que, i);
+ if (error)
+ break;
+ }
return (error);
}
+
/*********************************************************************
*
* Allocate memory for the VSI (virtual station interface) and their
@@ -2419,7 +2471,6 @@ ixl_setup_stations(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct ixl_vsi *vsi;
- struct ixl_queue *que;
int error = 0;
vsi = &pf->vsi;
@@ -2429,24 +2480,22 @@ ixl_setup_stations(struct ixl_pf *pf)
vsi->num_vlans = 0;
vsi->back = pf;
+ if (pf->msix > 1)
+ vsi->flags |= IXL_FLAGS_USES_MSIX;
+
/* Get memory for the station queues */
if (!(vsi->queues =
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
error = ENOMEM;
- return (error);
+ goto ixl_setup_stations_err;
}
/* Then setup each queue */
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- error = ixl_setup_queue(que, pf, i);
- if (error)
- return (error);
- }
-
- return (0);
+ error = ixl_vsi_setup_queues(vsi);
+ixl_setup_stations_err:
+ return (error);
}
/*
@@ -2749,10 +2798,10 @@ ixl_add_hw_stats(struct ixl_pf *pf)
char queue_namebuf[QUEUE_NAME_LEN];
/* Driver statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &pf->watchdog_events,
"Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &pf->admin_irq,
"Admin Queue IRQ Handled");
@@ -2809,6 +2858,15 @@ ixl_add_hw_stats(struct ixl_pf *pf)
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
#ifdef IXL_DEBUG
+ SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog",
+ CTLFLAG_RD, &(txr->watchdog_timer), 0,
+ "Ticks before watchdog timer causes interface reinit");
+ SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail",
+ CTLFLAG_RD, &(txr->next_avail), 0,
+ "Next TX descriptor to be used");
+ SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean",
+ CTLFLAG_RD, &(txr->next_to_clean), 0,
+ "Next TX descriptor to be cleaned");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
CTLFLAG_RD, &(rxr->not_done),
"Queue Rx Descriptors not Done");
@@ -2818,16 +2876,16 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
CTLFLAG_RD, &(rxr->next_check), 0,
"Queue Rx Descriptors not Done");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixl_sysctl_qtx_tail_handler, "IU",
"Queue Transmit Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
#endif
}
@@ -2935,29 +2993,25 @@ ixl_set_rss_key(struct ixl_pf *pf)
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
enum i40e_status_code status;
-#ifdef RSS
- u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
-#else
- u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c,
- 0x35897377, 0x328b25e1, 0x4fa98922,
- 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
- 0x0, 0x0, 0x0};
-#endif
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
+#else
+ ixl_get_default_rss_key(rss_seed);
#endif
/* Fill out hash function seed */
if (hw->mac.type == I40E_MAC_X722) {
struct i40e_aqc_get_set_rss_key_data key_data;
- bcopy(rss_seed, key_data.standard_rss_key, 40);
+ bcopy(rss_seed, &key_data, 52);
status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
if (status)
- device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
- i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ device_printf(dev,
+ "i40e_aq_set_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
@@ -3016,10 +3070,7 @@ ixl_set_rss_hlut(struct ixl_pf *pf)
u32 lut = 0;
enum i40e_status_code status;
- if (hw->mac.type == I40E_MAC_X722)
- lut_entry_width = 7;
- else
- lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
+ lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
/* Populate the LUT with max no. of queues in round robin fashion */
u8 hlut_buf[512];
@@ -3330,7 +3381,7 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
** the add bit.
*/
SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags == flags) {
+ if ((f->flags & flags) == flags) {
b = &a[j]; // a pox on fvl long names :)
bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
if (f->vlan == IXL_VLAN_ANY) {
@@ -3391,8 +3442,13 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
if (f->flags & IXL_FILTER_DEL) {
e = &d[j]; // a pox on fvl long names :)
bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
- e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ if (f->vlan == IXL_VLAN_ANY) {
+ e->vlan_tag = 0;
+ e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ } else {
+ e->vlan_tag = f->vlan;
+ }
/* delete entry from vsi list */
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
free(f, M_DEVBUF);
@@ -3445,7 +3501,7 @@ ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
break;
- i40e_msec_delay(10);
+ i40e_usec_delay(10);
}
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
device_printf(pf->dev, "TX queue %d still disabled!\n",
@@ -3479,7 +3535,7 @@ ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
break;
- i40e_msec_delay(10);
+ i40e_usec_delay(10);
}
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
device_printf(pf->dev, "RX queue %d still disabled!\n",
@@ -3519,6 +3575,9 @@ ixl_enable_rings(struct ixl_vsi *vsi)
return (error);
}
+/*
+ * Returns error on first ring that is detected hung.
+ */
int
ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
@@ -3551,6 +3610,9 @@ ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
return (error);
}
+/*
+ * Returns error on first ring that is detected hung.
+ */
int
ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
@@ -3661,14 +3723,14 @@ ixl_handle_mdd_event(struct ixl_pf *pf)
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
device_printf(dev,
- "MDD TX event is for this function!");
+ "MDD TX event is for this function!\n");
pf_mdd_detected = true;
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
device_printf(dev,
- "MDD RX event is for this function!");
+ "MDD RX event is for this function!\n");
pf_mdd_detected = true;
}
}
@@ -3923,81 +3985,185 @@ ixl_update_stats_counters(struct ixl_pf *pf)
}
int
-ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
+ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
- bool is_up = false;
int error = 0;
- is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
-
/* Teardown */
if (is_up)
ixl_stop(pf);
+
+ ixl_teardown_queue_msix(vsi);
+
error = i40e_shutdown_lan_hmc(hw);
if (error)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", error);
+
ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
+
error = i40e_shutdown_adminq(hw);
if (error)
device_printf(dev,
"Shutdown Admin queue failed with code %d\n", error);
+ callout_drain(&pf->timer);
+
+ /* Free ring buffers, locks and filters */
+ ixl_vsi_free_queues(vsi);
+
+ /* Free VSI filter list */
+ ixl_free_mac_filters(vsi);
+
+ ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
+
+ return (error);
+}
+
+int
+ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ device_printf(dev, "Rebuilding driver state...\n");
+
+ error = i40e_pf_reset(hw);
+ if (error) {
+ device_printf(dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, error));
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
/* Setup */
error = i40e_init_adminq(hw);
if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
- error = ixl_setup_adminq_msix(pf);
+
+ i40e_clear_pxe_mode(hw);
+
+ error = ixl_get_hw_capabilities(pf);
if (error) {
- device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
- error);
+ device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
- ixl_configure_intr0_msix(pf);
- ixl_enable_intr0(hw);
+
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
+
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (error) {
device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
+
+ /* reserve a contiguous allocation for the PF's VSI */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
+ if (error) {
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
+ /* TODO: error handling */
+ }
+
+ device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
+ pf->qtag.num_allocated, pf->qtag.num_active);
+
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ if (ixl_vsi_setup_queues(vsi)) {
+ device_printf(dev, "setup queues failed!\n");
+ error = ENOMEM;
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ if (pf->msix > 1) {
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+
+ error = ixl_setup_queue_msix(vsi);
+ if (error) {
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+ } else {
+ error = ixl_setup_legacy(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_legacy() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+ }
+
+ /* Determine link state */
+ if (ixl_attach_get_link_status(pf)) {
+ error = EINVAL;
+ /* TODO: error handling */
+ }
+
+ i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
+ ixl_get_fw_lldp_status(pf);
+
if (is_up)
ixl_init(pf);
+ device_printf(dev, "Rebuilding driver state done.\n");
return (0);
+
+ixl_rebuild_hw_structs_after_reset_err:
+ device_printf(dev, "Reload the driver to recover\n");
+ return (error);
}
void
ixl_handle_empr_reset(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
int count = 0;
u32 reg;
+ ixl_prepare_for_reset(pf, is_up);
+
/* Typically finishes within 3-4 seconds */
while (count++ < 100) {
reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
+ & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
if (reg)
i40e_msec_delay(100);
else
break;
}
ixl_dbg(pf, IXL_DBG_INFO,
- "EMPR reset wait count: %d\n", count);
+ "EMPR reset wait count: %d\n", count);
- device_printf(dev, "Rebuilding driver state...\n");
- ixl_rebuild_hw_structs_after_reset(pf);
- device_printf(dev, "Rebuilding driver state done.\n");
+ ixl_rebuild_hw_structs_after_reset(pf, is_up);
atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
}
@@ -4269,15 +4435,19 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
/* Set up sysctls */
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
+ pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
+ pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_current_speed, "A", "Current Port Speed");
+ pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
@@ -4306,38 +4476,45 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "tx_ring_size", CTLFLAG_RD,
+ &pf->vsi.num_tx_desc, 0, "TX ring size");
+
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "rx_ring_size", CTLFLAG_RD,
+ &pf->vsi.num_rx_desc, 0, "RX ring size");
+
/* Add FEC sysctls for 25G adapters */
- /*
- * XXX: These settings can be changed, but that isn't supported,
- * so these are read-only for now.
- */
- if (hw->device_id == I40E_DEV_ID_25G_B
- || hw->device_id == I40E_DEV_ID_25G_SFP28) {
+ if (i40e_is_25G_device(hw->device_id)) {
fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
fec_list = SYSCTL_CHILDREN(fec_node);
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
}
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
+
/* Add sysctls meant to print debug information, but don't list them
* in "sysctl -a" output. */
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
@@ -4388,6 +4565,10 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
+
if (pf->has_i2c) {
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
@@ -4430,7 +4611,7 @@ ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
** 3 - full
*/
int
-ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
@@ -4513,7 +4694,7 @@ ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
}
int
-ixl_current_speed(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
@@ -4527,6 +4708,10 @@ ixl_current_speed(SYSCTL_HANDLER_ARGS)
return (error);
}
+/*
+ * Converts 8-bit speeds value to and from sysctl flags and
+ * Admin Queue flags.
+ */
static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
{
@@ -4551,7 +4736,7 @@ ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
}
int
-ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
+ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
@@ -4572,7 +4757,10 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
/* Prepare new config */
bzero(&config, sizeof(config));
- config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
+ if (from_aq)
+ config.link_speed = speeds;
+ else
+ config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
config.phy_type = abilities.phy_type;
config.phy_type_ext = abilities.phy_type_ext;
config.abilities = abilities.abilities
@@ -4580,6 +4768,7 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -4595,6 +4784,25 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
}
/*
+** Supported link speedsL
+** Flags:
+** 0x1 - 100 Mb
+** 0x2 - 1G
+** 0x4 - 10G
+** 0x8 - 20G
+** 0x10 - 25G
+** 0x20 - 40G
+*/
+static int
+ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
+
+ return sysctl_handle_int(oidp, NULL, supported, req);
+}
+
+/*
** Control link advertise speed:
** Flags:
** 0x1 - advertise 100 Mb
@@ -4607,10 +4815,9 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
** Set to 0 to disable link
*/
int
-ixl_set_advertise(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u8 converted_speeds;
int requested_ls = 0;
@@ -4621,18 +4828,16 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- /* Check if changing speeds is supported */
- switch (hw->device_id) {
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- device_printf(dev, "Changing advertised speeds not supported"
- " on this device.\n");
+
+ /* Error out if bits outside of possible flag range are set */
+ if ((requested_ls & ~((u8)0x3F)) != 0) {
+ device_printf(dev, "Input advertised speed out of range; "
+ "valid flags are: 0x%02x\n",
+ ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
return (EINVAL);
}
- if (requested_ls < 0 || requested_ls > 0xff) {
- }
- /* Check for valid value */
+ /* Check if adapter supports input value */
converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
device_printf(dev, "Invalid advertised speed; "
@@ -4641,7 +4846,7 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
- error = ixl_set_advertised_speeds(pf, requested_ls);
+ error = ixl_set_advertised_speeds(pf, requested_ls, false);
if (error)
return (error);
@@ -4741,7 +4946,7 @@ ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
sbuf_finish(sbuf);
sbuf_delete(sbuf);
- return 0;
+ return (0);
}
void
@@ -4793,7 +4998,7 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
ifd->ifd_data == NULL) {
device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
__func__);
- device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
+ device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
__func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
device_printf(dev, "%s: data pointer: %p\n", __func__,
ifd->ifd_data);
@@ -4822,7 +5027,8 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
perrno = -EBUSY;
}
- if (status)
+ /* Let the nvmupdate report errors, show them only when debug is enabled */
+ if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
i40e_stat_str(hw, status), perrno);
@@ -4843,6 +5049,9 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
* This routine is called whenever the user queries the status of
* the interface using ifconfig.
*
+ * When adding new media types here, make sure to add them to
+ * ixl_add_ifmedia(), too.
+ *
**********************************************************************/
void
ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
@@ -4852,9 +5061,13 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
struct i40e_hw *hw = &pf->hw;
INIT_DEBUGOUT("ixl_media_status: begin");
+
+ /* Don't touch PF during reset */
+ if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING)
+ return;
+
IXL_PF_LOCK(pf);
- hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
@@ -4887,7 +5100,7 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_1000_LX;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ifmr->ifm_active |= IFM_OTHER;
+ ifmr->ifm_active |= IFM_1000_T;
break;
/* 10 G */
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
@@ -4904,8 +5117,10 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
case I40E_PHY_TYPE_10GBASE_AOC:
- ifmr->ifm_active |= IFM_OTHER;
+ ifmr->ifm_active |= IFM_10G_AOC;
break;
/* 25 G */
case I40E_PHY_TYPE_25GBASE_KR:
@@ -4918,7 +5133,13 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_25G_SR;
break;
case I40E_PHY_TYPE_25GBASE_LR:
- ifmr->ifm_active |= IFM_UNKNOWN;
+ ifmr->ifm_active |= IFM_25G_LR;
+ break;
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ ifmr->ifm_active |= IFM_25G_AOC;
+ break;
+ case I40E_PHY_TYPE_25GBASE_ACC:
+ ifmr->ifm_active |= IFM_25G_ACC;
break;
/* 40 G */
case I40E_PHY_TYPE_40GBASE_CR4:
@@ -5240,8 +5461,8 @@ ixl_phy_type_string(u32 bit_pos, bool ext)
"XLPPI",
"40GBASE-CR4",
"10GBASE-CR1",
- "Reserved (12)",
- "Reserved (13)",
+ "SFP+ Active DA",
+ "QSFP+ Active DA",
"Reserved (14)",
"Reserved (15)",
"Reserved (16)",
@@ -5261,14 +5482,18 @@ ixl_phy_type_string(u32 bit_pos, bool ext)
"20GBASE-KR2",
"Reserved (31)"
};
- static char * ext_phy_types_str[4] = {
+ static char * ext_phy_types_str[8] = {
"25GBASE-KR",
"25GBASE-CR",
"25GBASE-SR",
- "25GBASE-LR"
+ "25GBASE-LR",
+ "25GBASE-AOC",
+ "25GBASE-ACC",
+ "Reserved (6)",
+ "Reserved (7)"
};
- if (ext && bit_pos > 3) return "Invalid_Ext";
+ if (ext && bit_pos > 7) return "Invalid_Ext";
if (bit_pos > 31) return "Invalid";
return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
@@ -5330,7 +5555,6 @@ ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
return (error);
}
- /* TODO: Add 25G types */
sbuf_printf(buf, "\n"
"PHY Type : 0x%02x<%s>\n"
"Speed : 0x%02x\n"
@@ -5429,8 +5653,8 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
abilities.phy_id[0], abilities.phy_id[1],
abilities.phy_id[2], abilities.phy_id[3],
abilities.module_type[0], abilities.module_type[1],
- abilities.module_type[2], abilities.phy_type_ext >> 5,
- abilities.phy_type_ext & 0x1F,
+ abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
+ abilities.fec_cfg_curr_mod_ext_info & 0x1F,
abilities.ext_comp_code);
error = sbuf_finish(buf);
@@ -5744,21 +5968,23 @@ ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
+ bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
+
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
- bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
if (status)
device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
} else {
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
- sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
}
}
+ ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
+
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
@@ -5767,6 +5993,52 @@ ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
return (error);
}
+static void
+ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
+{
+ int i, j, k, width;
+ char c;
+
+ if (length < 1 || buf == NULL) return;
+
+ int byte_stride = 16;
+ int lines = length / byte_stride;
+ int rem = length % byte_stride;
+ if (rem > 0)
+ lines++;
+
+ for (i = 0; i < lines; i++) {
+ width = (rem > 0 && i == lines - 1)
+ ? rem : byte_stride;
+
+ sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
+
+ for (j = 0; j < width; j++)
+ sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
+
+ if (width < byte_stride) {
+ for (k = 0; k < (byte_stride - width); k++)
+ sbuf_printf(sb, " ");
+ }
+
+ if (!text) {
+ sbuf_printf(sb, "\n");
+ continue;
+ }
+
+ for (j = 0; j < width; j++) {
+ c = (char)buf[i * byte_stride + j];
+ if (c < 32 || c > 126)
+ sbuf_printf(sb, ".");
+ else
+ sbuf_printf(sb, "%c", c);
+
+ if (j == width - 1)
+ sbuf_printf(sb, "\n");
+ }
+ }
+}
+
static int
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
{
@@ -5785,20 +6057,20 @@ ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
+ bzero(hlut, sizeof(hlut));
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
- bzero(hlut, sizeof(hlut));
status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
if (status)
device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- sbuf_printf(buf, "%512D", (u_char *)hlut, "");
} else {
for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
reg = rd32(hw, I40E_PFQF_HLUT(i));
- sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ bcopy(&reg, &hlut[i << 2], 4);
}
}
+ ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
error = sbuf_finish(buf);
if (error)
@@ -5958,7 +6230,7 @@ ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abi
return (EIO);
}
- *is_set = !!(abilities->phy_type_ext & bit_pos);
+ *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
return (0);
}
@@ -5973,10 +6245,10 @@ ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abi
/* Set new PHY config */
memset(&config, 0, sizeof(config));
- config.fec_config = abilities->phy_type_ext & ~(bit_pos);
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
if (set)
config.fec_config |= bit_pos;
- if (config.fec_config != abilities->phy_type_ext) {
+ if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.phy_type = abilities->phy_type;
config.phy_type_ext = abilities->phy_type_ext;
@@ -6005,7 +6277,7 @@ ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6023,7 +6295,7 @@ ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6041,7 +6313,7 @@ ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6059,7 +6331,7 @@ ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6077,7 +6349,7 @@ ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6088,3 +6360,192 @@ ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
}
+static int
+ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+ enum i40e_status_code status;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ u8 *final_buff;
+ /* This amount is only necessary if reading the entire cluster into memory */
+#define IXL_FINAL_BUFF_SIZE (1280 * 1024)
+ final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
+ if (final_buff == NULL) {
+ device_printf(dev, "Could not allocate memory for output.\n");
+ goto out;
+ }
+ int final_buff_len = 0;
+
+ u8 cluster_id = 1;
+ bool more = true;
+
+ u8 dump_buf[4096];
+ u16 curr_buff_size = 4096;
+ u8 curr_next_table = 0;
+ u32 curr_next_index = 0;
+
+ u16 ret_buff_size;
+ u8 ret_next_table;
+ u32 ret_next_index;
+
+ sbuf_cat(buf, "\n");
+
+ while (more) {
+ status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
+ dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
+ if (status) {
+ device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto free_out;
+ }
+
+ /* copy info out of temp buffer */
+ bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
+ final_buff_len += ret_buff_size;
+
+ if (ret_next_table != curr_next_table) {
+ /* We're done with the current table; we can dump out read data. */
+ sbuf_printf(buf, "%d:", curr_next_table);
+ int bytes_printed = 0;
+ while (bytes_printed <= final_buff_len) {
+ sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
+ bytes_printed += 16;
+ }
+ sbuf_cat(buf, "\n");
+
+ /* The entire cluster has been read; we're finished */
+ if (ret_next_table == 0xFF)
+ break;
+
+ /* Otherwise clear the output buffer and continue reading */
+ bzero(final_buff, IXL_FINAL_BUFF_SIZE);
+ final_buff_len = 0;
+ }
+
+ if (ret_next_index == 0xFFFFFFFF)
+ ret_next_index = 0;
+
+ bzero(dump_buf, sizeof(dump_buf));
+ curr_next_table = ret_next_table;
+ curr_next_index = ret_next_index;
+ }
+
+free_out:
+ free(final_buff, M_DEVBUF);
+out:
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+ int state, new_state;
+ enum i40e_status_code status;
+ state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
+
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &new_state, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ /* Already in requested state */
+ if (new_state == state)
+ return (error);
+
+ if (new_state == 0) {
+ if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
+ device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
+ return (EINVAL);
+ }
+
+ if (pf->hw.aq.api_maj_ver < 1 ||
+ (pf->hw.aq.api_maj_ver == 1 &&
+ pf->hw.aq.api_min_ver < 7)) {
+ device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
+ return (EINVAL);
+ }
+
+ i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+ atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else {
+ status = i40e_aq_start_lldp(&pf->hw, NULL);
+ if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
+ device_printf(dev, "FW LLDP agent is already running\n");
+ atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ }
+
+ return (0);
+}
+
+/*
+ * Get FW LLDP Agent status
+ */
+int
+ixl_get_fw_lldp_status(struct ixl_pf *pf)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u8 adminstatus = 0;
+
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xf;
+
+ /* Check if LLDP agent is disabled */
+ if (!adminstatus) {
+ device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
+ atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else
+ atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+
+ return (0);
+}
+
+int
+ixl_attach_get_link_status(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return error;
+ }
+ }
+
+ /* Determine link state */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ return (0);
+}