aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Joyner <erj@FreeBSD.org>2017-02-10 01:04:11 +0000
committerEric Joyner <erj@FreeBSD.org>2017-02-10 01:04:11 +0000
commitcb6b8299fdda0ccd5c9c9b0d29cd9c005f6d780b (patch)
tree3179310eb492a68ec5315d5970f3ed824adc1dca
parente628e1b919cbfce77767eb2e6e91c917250d09bf (diff)
downloadsrc-cb6b8299fdda0ccd5c9c9b0d29cd9c005f6d780b.tar.gz
src-cb6b8299fdda0ccd5c9c9b0d29cd9c005f6d780b.zip
ixl(4): Update to 1.7.12-k
Refresh upstream driver before impending conversion to iflib. Major new features: - Support for Fortville-based 25G adapters - Support for I2C reads/writes (To prevent getting or sending corrupt data, you should set dev.ixl.0.debug.disable_fw_link_management=1 when using I2C [this will disable link!], then set it to 0 when done. The driver implements the SIOCGI2C ioctl, so ifconfig -v works for reading I2C data, but there are read_i2c and write_i2c sysctls under the .debug sysctl tree [the latter being useful for upper page support in QSFP+]). - Addition of an iWARP client interface (so the future iWARP driver for X722 devices can communicate with the base driver). - Compiling this option in is enabled by default, with "options IXL_IW" in GENERIC. Differential Revision: https://reviews.freebsd.org/D9227 Reviewed by: sbruno MFC after: 2 weeks Sponsored by: Intel Corporation
Notes
Notes: svn path=/head/; revision=313497
-rw-r--r--sys/amd64/conf/GENERIC1
-rw-r--r--sys/amd64/conf/NOTES1
-rw-r--r--sys/conf/files.amd644
-rw-r--r--sys/conf/options.amd643
-rw-r--r--sys/dev/ixl/i40e_adminq.c4
-rw-r--r--sys/dev/ixl/i40e_adminq_cmd.h39
-rw-r--r--sys/dev/ixl/i40e_common.c407
-rw-r--r--sys/dev/ixl/i40e_devids.h1
-rw-r--r--sys/dev/ixl/i40e_lan_hmc.c5
-rw-r--r--sys/dev/ixl/i40e_nvm.c35
-rw-r--r--sys/dev/ixl/i40e_osdep.c58
-rw-r--r--sys/dev/ixl/i40e_osdep.h3
-rw-r--r--sys/dev/ixl/i40e_prototype.h24
-rw-r--r--sys/dev/ixl/i40e_type.h52
-rw-r--r--sys/dev/ixl/i40e_virtchnl.h5
-rw-r--r--sys/dev/ixl/if_ixl.c135
-rw-r--r--sys/dev/ixl/if_ixlv.c154
-rw-r--r--sys/dev/ixl/ixl.h31
-rw-r--r--sys/dev/ixl/ixl_iw.c469
-rw-r--r--sys/dev/ixl/ixl_iw.h75
-rw-r--r--sys/dev/ixl/ixl_iw_int.h71
-rw-r--r--sys/dev/ixl/ixl_pf.h57
-rw-r--r--sys/dev/ixl/ixl_pf_i2c.c605
-rw-r--r--sys/dev/ixl/ixl_pf_iov.c68
-rw-r--r--sys/dev/ixl/ixl_pf_iov.h3
-rw-r--r--sys/dev/ixl/ixl_pf_main.c1254
-rw-r--r--sys/dev/ixl/ixl_txrx.c73
-rw-r--r--sys/dev/ixl/ixlv.h3
-rw-r--r--sys/dev/ixl/ixlvc.c17
-rw-r--r--sys/modules/ixl/Makefile6
-rw-r--r--sys/modules/ixlv/Makefile4
31 files changed, 2987 insertions, 680 deletions
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index ba5bfe688fc0..784e1cdf1115 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -233,6 +233,7 @@ device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
+options IXL_IW # Enable iWARP Client Interface in ixl(4)
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device ti # Alteon Networks Tigon I/II gigabit Ethernet
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index dcda19c1b9c6..da8c2cab36a7 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -335,6 +335,7 @@ device ipw # Intel 2100 wireless NICs.
device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs.
device iwn # Intel 4965/1000/5000/6000 wireless NICs.
device ixl # Intel XL710 40Gbe PCIE Ethernet
+options IXL_IW # Enable iWARP Client Interface in ixl(4)
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index c4ec21c4f0c1..49f26c78ae6f 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -256,6 +256,10 @@ dev/ixl/ixl_pf_qmgr.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_iov.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_pf_i2c.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/ixl_iw.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/if_ixlv.c optional ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixlvc.c optional ixlv pci \
diff --git a/sys/conf/options.amd64 b/sys/conf/options.amd64
index 42349ebd6f32..892a37817d0b 100644
--- a/sys/conf/options.amd64
+++ b/sys/conf/options.amd64
@@ -48,6 +48,9 @@ AGP_DEBUG opt_agp.h
ATKBD_DFLT_KEYMAP opt_atkbd.h
+# iWARP client interface support in ixl
+IXL_IW opt_ixl.h
+
# -------------------------------
# EOF
# -------------------------------
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index cfffc1f01909..6e922f5bdb23 100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -1020,11 +1020,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 764ce11fb772..88da079bdf59 100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -154,6 +154,7 @@ enum i40e_admin_queue_opc {
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
@@ -535,7 +536,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -556,6 +558,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -594,6 +597,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
@@ -1757,6 +1761,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1768,11 +1774,20 @@ struct i40e_aq_get_phy_abilities_resp {
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 mod_type_ext;
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
@@ -1796,11 +1811,15 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 eeer;
u8 low_power_ctrl;
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 reserved[2];
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1890,6 +1909,8 @@ struct i40e_aqc_get_link_status {
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 power_desc;
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index 79229752ca29..d405cddf4ccd 100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -78,7 +78,6 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
case I40E_DEV_ID_X722_A0_VF:
hw->mac.type = I40E_MAC_X722_VF;
break;
@@ -1088,7 +1087,8 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
+ I40E_NONDMA_TO_NONDMA);
return status;
}
@@ -1111,7 +1111,8 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1224,6 +1225,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1238,6 +1241,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1245,6 +1249,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1725,10 +1730,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -1888,6 +1896,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
@@ -1910,12 +1920,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = FALSE;
- if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = TRUE;
else
hw_link_info->lse_enable = FALSE;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@@ -2280,6 +2291,43 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2348,6 +2396,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2680,14 +2762,17 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, FALSE, false,
&abilities, NULL);
if (status)
return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
@@ -3537,6 +3622,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
@@ -3765,7 +3858,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
- p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
@@ -3806,8 +3898,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
@@ -4292,11 +4386,15 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
+ * Little Endian order.
**/
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
@@ -5905,9 +6003,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
@@ -5916,7 +6011,92 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -5925,9 +6105,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -5937,8 +6116,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -5960,8 +6139,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -5991,7 +6170,7 @@ phy_read_end:
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6000,9 +6179,8 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6012,8 +6190,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6037,8 +6215,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6059,6 +6237,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -6100,14 +6350,16 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@@ -6119,20 +6371,18 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
i40e_msec_delay(interval);
@@ -6140,8 +6390,9 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@@ -6172,8 +6423,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, &reg_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
if (status)
return status;
*val = reg_val;
@@ -6206,41 +6459,42 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
@@ -6485,10 +6739,13 @@ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
@@ -6519,10 +6776,13 @@ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
@@ -6569,9 +6829,11 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
+
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
- buff_len = sizeof(*filter);
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
+
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
@@ -6582,6 +6844,12 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
@@ -6618,3 +6886,24 @@ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
return status;
}
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
diff --git a/sys/dev/ixl/i40e_devids.h b/sys/dev/ixl/i40e_devids.h
index 5725cb96754c..12ba99f83823 100644
--- a/sys/dev/ixl/i40e_devids.h
+++ b/sys/dev/ixl/i40e_devids.h
@@ -63,7 +63,6 @@
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/sys/dev/ixl/i40e_lan_hmc.c b/sys/dev/ixl/i40e_lan_hmc.c
index f7bee6a94b57..a6716a913ce0 100644
--- a/sys/dev/ixl/i40e_lan_hmc.c
+++ b/sys/dev/ixl/i40e_lan_hmc.c
@@ -1240,11 +1240,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
- if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
- DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
- goto exit;
- }
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
index 151691ec97e2..3d36c6433a9e 100644
--- a/sys/dev/ixl/i40e_nvm.c
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -220,14 +220,14 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
return ret_code;
}
@@ -886,9 +886,20 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return I40E_SUCCESS;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1247,6 +1258,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
@@ -1409,7 +1425,8 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
}
}
@@ -1482,7 +1499,7 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
@@ -1496,7 +1513,7 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index 2a771515a9ac..a2b25ccf1437 100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -189,15 +189,71 @@ void
i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
{
va_list args;
+ device_t dev;
if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
return;
+ dev = ((struct i40e_osdep *)hw->back)->dev;
+
+ /* Re-implement device_printf() */
+ device_print_prettyname(dev);
va_start(args, fmt);
- device_printf(((struct i40e_osdep *)hw->back)->dev, fmt, args);
+ vprintf(fmt, args);
va_end(args);
}
+const char *
+ixl_vc_opcode_str(uint16_t op)
+{
+ switch (op) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ return ("VERSION");
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ return ("RESET_VF");
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ return ("GET_VF_RESOURCES");
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ return ("CONFIG_TX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ return ("CONFIG_RX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ return ("CONFIG_VSI_QUEUES");
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ return ("CONFIG_IRQ_MAP");
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ return ("ENABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ return ("DISABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ return ("ADD_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ return ("DEL_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ return ("ADD_VLAN");
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ return ("DEL_VLAN");
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ return ("CONFIG_PROMISCUOUS_MODE");
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return ("GET_STATS");
+ case I40E_VIRTCHNL_OP_FCOE:
+ return ("FCOE");
+ case I40E_VIRTCHNL_OP_EVENT:
+ return ("EVENT");
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ return ("CONFIG_RSS_KEY");
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ return ("CONFIG_RSS_LUT");
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ return ("GET_RSS_HENA_CAPS");
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ return ("SET_RSS_HENA");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
u16
i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg)
{
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index 7f9873d94aba..5467745ba857 100644
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -151,6 +151,7 @@ struct i40e_osdep {
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
uint32_t flush_reg;
+ int i2c_intfc_num;
device_t dev;
};
@@ -185,6 +186,8 @@ extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask,
/* Non-busy-wait that uses kern_yield() */
void i40e_msec_pause(int);
+const char * ixl_vc_opcode_str(uint16_t op);
+
/*
** This hardware supports either 16 or 32 byte rx descriptors;
** the driver only uses the 32 byte kind.
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index 01d11d6335b2..11dc5aae2f8c 100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -166,12 +166,18 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -517,10 +523,20 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index fa8c7192e99f..73af9653b5d4 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -146,15 +146,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
@@ -192,7 +199,6 @@ enum i40e_memcpy_type {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@@ -251,6 +257,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -317,10 +324,22 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -330,9 +349,9 @@ enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
-#define I40E_WOL_SUPPORT_MASK 1
-#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
-#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+#define I40E_WOL_SUPPORT_MASK 0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define I40E_PROXY_SUPPORT_MASK 0x4
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -342,6 +361,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -457,6 +480,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -535,6 +559,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
@@ -1432,6 +1457,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
diff --git a/sys/dev/ixl/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h
index 4ebe578d1972..32af9c8e28c3 100644
--- a/sys/dev/ixl/i40e_virtchnl.h
+++ b/sys/dev/ixl/i40e_virtchnl.h
@@ -168,6 +168,11 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index a9221d323a26..f814855c187c 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -35,6 +35,11 @@
#include "ixl.h"
#include "ixl_pf.h"
+#ifdef IXL_IW
+#include "ixl_iw.h"
+#include "ixl_iw_int.h"
+#endif
+
#ifdef PCI_IOV
#include "ixl_pf_iov.h"
#endif
@@ -42,7 +47,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.6.6-k";
+char ixl_driver_version[] = "1.7.12-k";
/*********************************************************************
* PCI Device ID Table
@@ -70,6 +75,8 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -119,9 +126,11 @@ static driver_t ixl_driver = {
devclass_t ixl_devclass;
DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
+MODULE_VERSION(ixl, 1);
+
MODULE_DEPEND(ixl, pci, 1, 1, 1);
MODULE_DEPEND(ixl, ether, 1, 1, 1);
-#ifdef DEV_NETMAP
+#if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000
MODULE_DEPEND(ixl, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
@@ -145,7 +154,7 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
** Number of descriptors per ring:
** - TX and RX are the same size
*/
-static int ixl_ring_size = DEFAULT_RING;
+static int ixl_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixl_ring_size, 0, "Descriptor Ring Size");
@@ -206,6 +215,11 @@ TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixl_tx_itr, 0, "TX Interrupt Rate");
+#ifdef IXL_IW
+int ixl_enable_iwarp = 0;
+TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
+#endif
+
#ifdef DEV_NETMAP
#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
#include <dev/netmap/if_ixl_netmap.h>
@@ -296,12 +310,9 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
/* Save tunable information */
pf->enable_msix = ixl_enable_msix;
pf->max_queues = ixl_max_queues;
- pf->ringsz = ixl_ring_size;
pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
- pf->tx_itr = ixl_tx_itr;
- pf->rx_itr = ixl_rx_itr;
pf->dbg_mask = ixl_core_debug_mask;
pf->hw.debug_mask = ixl_shared_debug_mask;
@@ -313,8 +324,35 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
device_printf(dev, "ring_size must be between %d and %d, "
"inclusive, and must be a multiple of %d\n",
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
- return (EINVAL);
- }
+ device_printf(dev, "Using default value of %d instead\n",
+ IXL_DEFAULT_RING);
+ pf->ringsz = IXL_DEFAULT_RING;
+ } else
+ pf->ringsz = ixl_ring_size;
+
+ if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
+ device_printf(dev, "Invalid tx_itr value of %d set!\n",
+ ixl_tx_itr);
+ device_printf(dev, "tx_itr must be between %d and %d, "
+ "inclusive\n",
+ 0, IXL_MAX_ITR);
+ device_printf(dev, "Using default value of %d instead\n",
+ IXL_ITR_4K);
+ pf->tx_itr = IXL_ITR_4K;
+ } else
+ pf->tx_itr = ixl_tx_itr;
+
+ if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
+ device_printf(dev, "Invalid rx_itr value of %d set!\n",
+ ixl_rx_itr);
+ device_printf(dev, "rx_itr must be between %d and %d, "
+ "inclusive\n",
+ 0, IXL_MAX_ITR);
+ device_printf(dev, "Using default value of %d instead\n",
+ IXL_ITR_8K);
+ pf->rx_itr = IXL_ITR_8K;
+ } else
+ pf->rx_itr = ixl_rx_itr;
return (0);
}
@@ -529,7 +567,7 @@ ixl_attach(device_t dev)
}
/* Get the bus configuration and set the shared code's config */
- ixl_get_bus_info(hw, dev);
+ ixl_get_bus_info(pf);
/*
* In MSI-X mode, initialize the Admin Queue interrupt,
@@ -539,20 +577,50 @@ ixl_attach(device_t dev)
if (pf->msix > 1) {
error = ixl_setup_adminq_msix(pf);
if (error) {
- device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
+ device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
error);
goto err_late;
}
error = ixl_setup_adminq_tq(pf);
if (error) {
- device_printf(dev, "ixl_setup_adminq_tq error: %d\n",
+ device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
error);
goto err_late;
}
ixl_configure_intr0_msix(pf);
- ixl_enable_adminq(hw);
+ ixl_enable_intr0(hw);
+
+ error = ixl_setup_queue_msix(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ error = ixl_setup_queue_tqs(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
+ } else {
+ error = ixl_setup_legacy(pf);
+
+ error = ixl_setup_adminq_tq(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
+ error);
+ goto err_late;
+ }
+
+ error = ixl_setup_queue_tqs(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
}
+ if (error) {
+ device_printf(dev, "interrupt setup error: %d\n", error);
+ }
+
+ /* Set initial advertised speed sysctl value */
+ ixl_get_initial_advertised_speeds(pf);
+
/* Initialize statistics & add sysctls */
ixl_add_device_sysctls(pf);
@@ -573,6 +641,27 @@ ixl_attach(device_t dev)
#ifdef DEV_NETMAP
ixl_netmap_attach(vsi);
#endif /* DEV_NETMAP */
+
+#ifdef IXL_IW
+ if (hw->func_caps.iwarp && ixl_enable_iwarp) {
+ pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
+ if (pf->iw_enabled) {
+ error = ixl_iw_pf_attach(pf);
+ if (error) {
+ device_printf(dev,
+ "interfacing to iwarp driver failed: %d\n",
+ error);
+ goto err_late;
+ }
+ } else
+ device_printf(dev,
+ "iwarp disabled on this device (no msix vectors)\n");
+ } else {
+ pf->iw_enabled = false;
+ device_printf(dev, "The device is not iWARP enabled\n");
+ }
+#endif
+
INIT_DEBUGOUT("ixl_attach: end");
return (0);
@@ -609,7 +698,7 @@ ixl_detach(device_t dev)
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
enum i40e_status_code status;
-#ifdef PCI_IOV
+#if defined(PCI_IOV) || defined(IXL_IW)
int error;
#endif
@@ -633,18 +722,19 @@ ixl_detach(device_t dev)
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
ixl_stop(pf);
- ixl_free_queue_tqs(vsi);
-
/* Shutdown LAN HMC */
status = i40e_shutdown_lan_hmc(hw);
if (status)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", status);
+ /* Teardown LAN queue resources */
+ ixl_teardown_queue_msix(vsi);
+ ixl_free_queue_tqs(vsi);
/* Shutdown admin queue */
- ixl_disable_adminq(hw);
- ixl_free_adminq_tq(pf);
+ ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
+ ixl_free_adminq_tq(pf);
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
@@ -657,6 +747,17 @@ ixl_detach(device_t dev)
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
callout_drain(&pf->timer);
+
+#ifdef IXL_IW
+ if (ixl_enable_iwarp && pf->iw_enabled) {
+ error = ixl_iw_pf_detach(pf);
+ if (error == EBUSY) {
+ device_printf(dev, "iwarp in use; stop it first.\n");
+ return (error);
+ }
+ }
+#endif
+
#ifdef DEV_NETMAP
netmap_detach(vsi->ifp);
#endif /* DEV_NETMAP */
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index c447c34689ee..d6a81b1d8542 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -38,7 +38,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.4.6-k";
+char ixlv_driver_version[] = "1.4.12-k";
/*********************************************************************
* PCI Device ID Table
@@ -53,10 +53,8 @@ char ixlv_driver_version[] = "1.4.6-k";
static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -90,6 +88,7 @@ static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
+static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
static int ixlv_media_change(struct ifnet *);
static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
@@ -170,7 +169,7 @@ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
** Number of descriptors per ring:
** - TX and RX are the same size
*/
-static int ixlv_ringsz = DEFAULT_RING;
+static int ixlv_ringsz = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixlv_ringsz, 0, "Descriptor Ring Size");
@@ -485,13 +484,14 @@ ixlv_detach(device_t dev)
{
struct ixlv_sc *sc = device_get_softc(dev);
struct ixl_vsi *vsi = &sc->vsi;
+ struct i40e_hw *hw = &sc->hw;
+ enum i40e_status_code status;
INIT_DBG_DEV(dev, "begin");
/* Make sure VLANS are not using driver */
if (vsi->ifp->if_vlantrunk != NULL) {
if_printf(vsi->ifp, "Vlan in use, detach first\n");
- INIT_DBG_DEV(dev, "end");
return (EBUSY);
}
@@ -512,16 +512,25 @@ ixlv_detach(device_t dev)
/* Drain VC mgr */
callout_drain(&sc->vc_mgr.callout);
- i40e_shutdown_adminq(&sc->hw);
+ ixlv_disable_adminq_irq(hw);
+ ixlv_teardown_adminq_msix(sc);
+ /* Drain admin queue taskqueue */
taskqueue_free(sc->tq);
+ status = i40e_shutdown_adminq(&sc->hw);
+ if (status != I40E_SUCCESS) {
+ device_printf(dev,
+ "i40e_shutdown_adminq() failed with status %s\n",
+ i40e_stat_str(hw, status));
+ }
+
if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
- mtx_destroy(&sc->mtx);
ixlv_free_filters(sc);
bus_generic_detach(dev);
+ mtx_destroy(&sc->mtx);
INIT_DBG_DEV(dev, "end");
return (0);
}
@@ -963,10 +972,10 @@ ixlv_init(void *arg)
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
- && ++retries < IXLV_AQ_MAX_ERR) {
+ && ++retries < IXLV_MAX_INIT_WAIT) {
i40e_msec_pause(25);
}
- if (retries >= IXLV_AQ_MAX_ERR) {
+ if (retries >= IXLV_MAX_INIT_WAIT) {
if_printf(vsi->ifp,
"Init failed to complete in allotted time!\n");
}
@@ -1177,7 +1186,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
int rid, want, vectors, queues, available;
int auto_max_queues;
- rid = PCIR_BAR(IXL_BAR);
+ rid = PCIR_BAR(IXL_MSIX_BAR);
sc->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!sc->msix_mem) {
@@ -1263,11 +1272,11 @@ ixlv_init_msix(struct ixlv_sc *sc)
}
/* Next we need to setup the vector for the Admin Queue */
- rid = 1; // zero vector + 1
+ rid = 1; /* zero vector + 1 */
sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_SHAREABLE | RF_ACTIVE);
if (sc->res == NULL) {
- device_printf(dev,"Unable to allocate"
+ device_printf(dev, "Unable to allocate"
" bus resource: AQ interrupt \n");
goto fail;
}
@@ -1366,21 +1375,11 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
}
early:
- /* Clean the AdminQ interrupt */
- if (sc->tag != NULL) {
- bus_teardown_intr(dev, sc->res, sc->tag);
- sc->tag = NULL;
- }
- if (sc->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
- sc->res = NULL;
- }
-
pci_release_msi(dev);
if (sc->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(IXL_BAR), sc->msix_mem);
+ PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -1650,8 +1649,6 @@ ixlv_setup_queues(struct ixlv_sc *sc)
que->num_desc = ixlv_ringsz;
que->me = i;
que->vsi = vsi;
- /* mark the queue as active */
- vsi->active_queues |= (u64)1 << que->me;
txr = &que->txr;
txr->que = que;
@@ -1854,6 +1851,35 @@ ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
return (f);
}
+static int
+ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ int error = 0;
+
+ if (sc->tag != NULL) {
+ bus_teardown_intr(dev, sc->res, sc->tag);
+ if (error) {
+ device_printf(dev, "bus_teardown_intr() for"
+ " interrupt 0 failed\n");
+ // return (ENXIO);
+ }
+ sc->tag = NULL;
+ }
+ if (sc->res != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
+ if (error) {
+ device_printf(dev, "bus_release_resource() for"
+ " interrupt 0 failed\n");
+ // return (ENXIO);
+ }
+ sc->res = NULL;
+ }
+
+ return (0);
+
+}
+
/*
** Admin Queue interrupt handler
*/
@@ -2024,7 +2050,7 @@ ixlv_set_queue_rx_itr(struct ixl_queue *que)
/* do an exponential smoothing */
rx_itr = (10 * rx_itr * rxr->itr) /
((9 * rx_itr) + rxr->itr);
- rxr->itr = rx_itr & IXL_MAX_ITR;
+ rxr->itr = min(rx_itr, IXL_MAX_ITR);
wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
que->me), rxr->itr);
}
@@ -2097,7 +2123,7 @@ ixlv_set_queue_tx_itr(struct ixl_queue *que)
/* do an exponential smoothing */
tx_itr = (10 * tx_itr * txr->itr) /
((9 * tx_itr) + txr->itr);
- txr->itr = tx_itr & IXL_MAX_ITR;
+ txr->itr = min(tx_itr, IXL_MAX_ITR);
wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
que->me), txr->itr);
}
@@ -2414,8 +2440,10 @@ ixlv_local_timer(void *arg)
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
+ struct tx_ring *txr;
int hung = 0;
u32 mask, val;
+ s32 timer, new_timer;
IXLV_CORE_LOCK_ASSERT(sc);
@@ -2445,41 +2473,40 @@ ixlv_local_timer(void *arg)
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
- for (int i = 0; i < vsi->num_queues; i++,que++) {
- /* Any queues with outstanding work get a sw irq */
- if (que->busy)
- wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
- /*
- ** Each time txeof runs without cleaning, but there
- ** are uncleaned descriptors it increments busy. If
- ** we get to 5 we declare it hung.
- */
- if (que->busy == IXL_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- vsi->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
- vsi->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXL_MAX_TX_BUSY) {
- device_printf(dev,"Warning queue %d "
- "appears to be hung!\n", i);
- que->busy = IXL_QUEUE_HUNG;
- ++hung;
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ txr = &que->txr;
+ timer = atomic_load_acq_32(&txr->watchdog_timer);
+ if (timer > 0) {
+ new_timer = timer - hz;
+ if (new_timer <= 0) {
+ atomic_store_rel_32(&txr->watchdog_timer, -1);
+ device_printf(dev, "WARNING: queue %d "
+ "appears to be hung!\n", que->me);
+ ++hung;
+ } else {
+ /*
+ * If this fails, that means something in the TX path has updated
+ * the watchdog, so it means the TX path is still working and
+ * the watchdog doesn't need to countdown.
+ */
+ atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
+ /* Any queues with outstanding work get a sw irq */
+ wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
+ }
}
}
- /* Only reset when all queues show hung */
- if (hung == vsi->num_queues)
+ /* Reset when a queue shows hung */
+ if (hung)
goto hung;
+
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
return;
hung:
- device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
+ device_printf(dev, "WARNING: Resetting!\n");
sc->init_state = IXLV_RESET_REQUIRED;
+ sc->watchdog_events++;
+ ixlv_stop(sc);
ixlv_init_locked(sc);
}
@@ -2634,7 +2661,7 @@ ixlv_config_rss_reg(struct ixlv_sc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
- set_hena = IXL_DEFAULT_RSS_HENA;
+ set_hena = IXL_DEFAULT_RSS_HENA_XL710;
#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@@ -2801,6 +2828,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
u16 result = 0;
u32 reg, oldreg;
i40e_status ret;
+ bool aq_error = false;
IXLV_CORE_LOCK_ASSERT(sc);
@@ -2823,14 +2851,17 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
device_printf(dev, "ARQ VF Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
+ aq_error = true;
}
if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
device_printf(dev, "ARQ Overflow Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
+ aq_error = true;
}
if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
device_printf(dev, "ARQ Critical Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
+ aq_error = true;
}
if (oldreg != reg)
wr32(hw, hw->aq.arq.len, reg);
@@ -2839,18 +2870,28 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
device_printf(dev, "ASQ VF Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
+ aq_error = true;
}
if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
device_printf(dev, "ASQ Overflow Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
+ aq_error = true;
}
if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
device_printf(dev, "ASQ Critical Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
+ aq_error = true;
}
if (oldreg != reg)
wr32(hw, hw->aq.asq.len, reg);
+ if (aq_error) {
+ /* Need to reset adapter */
+ device_printf(dev, "WARNING: Resetting!\n");
+ sc->init_state = IXLV_RESET_REQUIRED;
+ ixlv_stop(sc);
+ ixlv_init_locked(sc);
+ }
ixlv_enable_adminq_irq(hw);
}
@@ -2977,6 +3018,9 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
sizeof(struct ixl_queue),
ixlv_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
+ SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
+ CTLFLAG_RD, &(txr.watchdog_timer), 0,
+ "Ticks before watchdog event is triggered");
#endif
}
}
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 1a92edbc15e7..28340ef201c9 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -39,6 +39,7 @@
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
+#include "opt_ixl.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -51,6 +52,7 @@
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
+#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
@@ -170,6 +172,7 @@ enum ixl_dbg_mask {
IXL_DBG_IOV_VC = 0x00002000,
IXL_DBG_SWITCH_INFO = 0x00010000,
+ IXL_DBG_I2C = 0x00020000,
IXL_DBG_ALL = 0xFFFFFFFF
};
@@ -184,7 +187,7 @@ enum ixl_dbg_mask {
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
-#define DEFAULT_RING 1024
+#define IXL_DEFAULT_RING 1024
#define IXL_MAX_RING 8160
#define IXL_MIN_RING 32
#define IXL_RING_INCREMENT 32
@@ -216,7 +219,7 @@ enum ixl_dbg_mask {
#define MAX_MULTICAST_ADDR 128
-#define IXL_BAR 3
+#define IXL_MSIX_BAR 3
#define IXL_ADM_LIMIT 2
#define IXL_TSO_SIZE 65535
#define IXL_AQ_BUF_SZ ((u32) 4096)
@@ -231,6 +234,7 @@ enum ixl_dbg_mask {
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
+#define IXL_MIN_TSO_MSS 64
#define IXL_RSS_KEY_SIZE_REG 13
#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
@@ -252,13 +256,15 @@ enum ixl_dbg_mask {
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
/*
- * Interrupt Moderation parameters
+ * Interrupt Moderation parameters
+ * Multiply ITR values by 2 for real ITR value
*/
-#define IXL_MAX_ITR 0x07FF
+#define IXL_MAX_ITR 0x0FF0
#define IXL_ITR_100K 0x0005
#define IXL_ITR_20K 0x0019
#define IXL_ITR_8K 0x003E
#define IXL_ITR_4K 0x007A
+#define IXL_ITR_1K 0x01F4
#define IXL_ITR_DYNAMIC 0x8000
#define IXL_LOW_LATENCY 0
#define IXL_AVE_LATENCY 1
@@ -311,7 +317,7 @@ enum ixl_dbg_mask {
#define IXL_END_OF_INTR_LNKLST 0x7FF
-#define IXL_DEFAULT_RSS_HENA (\
+#define IXL_DEFAULT_RSS_HENA_BASE (\
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
@@ -324,6 +330,17 @@ enum ixl_dbg_mask {
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+#define IXL_DEFAULT_RSS_HENA_XL710 IXL_DEFAULT_RSS_HENA_BASE
+
+#define IXL_DEFAULT_RSS_HENA_X722 (\
+ IXL_DEFAULT_RSS_HENA_BASE | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
+
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@@ -429,6 +446,7 @@ struct tx_ring {
bus_dma_tag_t tso_tag;
char mtx_name[16];
struct buf_ring *br;
+ s32 watchdog_timer;
/* Used for Dynamic ITR calculation */
u32 packets;
@@ -488,7 +506,6 @@ struct ixl_queue {
struct resource *res;
void *tag;
int num_desc; /* both tx and rx */
- int busy;
struct tx_ring txr;
struct rx_ring rxr;
struct task task;
@@ -503,6 +520,7 @@ struct ixl_queue {
u64 mbuf_pkt_failed;
u64 tx_dmamap_failed;
u64 dropped_pkts;
+ u64 mss_too_small;
};
/*
@@ -563,7 +581,6 @@ struct ixl_vsi {
u64 hw_filters_add;
/* Misc. */
- u64 active_queues;
u64 flags;
struct sysctl_oid *vsi_node;
};
diff --git a/sys/dev/ixl/ixl_iw.c b/sys/dev/ixl/ixl_iw.c
new file mode 100644
index 000000000000..e1b99e48eb48
--- /dev/null
+++ b/sys/dev/ixl/ixl_iw.c
@@ -0,0 +1,469 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixl.h"
+#include "ixl_pf.h"
+#include "ixl_iw.h"
+#include "ixl_iw_int.h"
+
+#ifdef IXL_IW
+
+#define IXL_IW_VEC_BASE(pf) ((pf)->msix - (pf)->iw_msix)
+#define IXL_IW_VEC_COUNT(pf) ((pf)->iw_msix)
+#define IXL_IW_VEC_LIMIT(pf) ((pf)->msix)
+
+extern int ixl_enable_iwarp;
+
+static struct ixl_iw_state ixl_iw;
+static int ixl_iw_ref_cnt;
+
+static void
+ixl_iw_pf_msix_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+ int vec;
+
+ for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
+ reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
+ wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
+ }
+
+ return;
+}
+
+static void
+ixl_iw_invoke_op(void *context, int pending)
+{
+ struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context;
+ struct ixl_iw_pf info;
+ bool initialize;
+ int err;
+
+ INIT_DEBUGOUT("begin");
+
+ mtx_lock(&ixl_iw.mtx);
+ if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) &&
+ (pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF))
+ initialize = true;
+ else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) &&
+ (pf_entry->state.iw_current == IXL_IW_PF_STATE_ON))
+ initialize = false;
+ else {
+ /* nothing to be done, so finish here */
+ mtx_unlock(&ixl_iw.mtx);
+ return;
+ }
+ info = pf_entry->pf_info;
+ mtx_unlock(&ixl_iw.mtx);
+
+ if (initialize) {
+ err = ixl_iw.ops->init(&info);
+ if (err)
+ device_printf(pf_entry->pf->dev,
+ "%s: failed to initialize iwarp (err %d)\n",
+ __func__, err);
+ else
+ pf_entry->state.iw_current = IXL_IW_PF_STATE_ON;
+ } else {
+ err = ixl_iw.ops->stop(&info);
+ if (err)
+ device_printf(pf_entry->pf->dev,
+ "%s: failed to stop iwarp (err %d)\n",
+ __func__, err);
+ else {
+ ixl_iw_pf_msix_reset(pf_entry->pf);
+ pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
+ }
+ }
+ return;
+}
+
+static void
+ixl_iw_uninit(void)
+{
+ INIT_DEBUGOUT("begin");
+
+ mtx_destroy(&ixl_iw.mtx);
+
+ return;
+}
+
+static void
+ixl_iw_init(void)
+{
+ INIT_DEBUGOUT("begin");
+
+ LIST_INIT(&ixl_iw.pfs);
+ mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF);
+ ixl_iw.registered = false;
+
+ return;
+}
+
+/******************************************************************************
+ * if_ixl internal API
+ *****************************************************************************/
+
+int
+ixl_iw_pf_init(struct ixl_pf *pf)
+{
+ struct ixl_iw_pf_entry *pf_entry;
+ struct ixl_iw_pf *pf_info;
+ int err = 0;
+
+ INIT_DEBUGOUT("begin");
+
+ mtx_lock(&ixl_iw.mtx);
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ if (pf_entry->pf == pf)
+ break;
+ if (pf_entry == NULL) {
+ /* attempt to initialize PF not yet attached - sth is wrong */
+ device_printf(pf->dev, "%s: PF not found\n", __func__);
+ err = ENOENT;
+ goto out;
+ }
+
+ pf_info = &pf_entry->pf_info;
+
+ pf_info->handle = (void *)pf;
+
+ pf_info->ifp = pf->vsi.ifp;
+ pf_info->dev = pf->dev;
+ pf_info->pci_mem = pf->pci_mem;
+ pf_info->pf_id = pf->hw.pf_id;
+ pf_info->mtu = pf->vsi.ifp->if_mtu;
+
+ pf_info->iw_msix.count = IXL_IW_VEC_COUNT(pf);
+ pf_info->iw_msix.base = IXL_IW_VEC_BASE(pf);
+
+ for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++)
+ pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]);
+
+ pf_entry->state.pf = IXL_IW_PF_STATE_ON;
+ if (ixl_iw.registered) {
+ pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
+ taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
+ }
+
+out:
+ mtx_unlock(&ixl_iw.mtx);
+
+ return (err);
+}
+
+void
+ixl_iw_pf_stop(struct ixl_pf *pf)
+{
+ struct ixl_iw_pf_entry *pf_entry;
+
+ INIT_DEBUGOUT("begin");
+
+ mtx_lock(&ixl_iw.mtx);
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ if (pf_entry->pf == pf)
+ break;
+ if (pf_entry == NULL) {
+ /* attempt to stop PF which has not been attached - sth is wrong */
+ device_printf(pf->dev, "%s: PF not found\n", __func__);
+ goto out;
+ }
+
+ pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
+ if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
+ pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
+ if (ixl_iw.registered)
+ taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
+ }
+
+out:
+ mtx_unlock(&ixl_iw.mtx);
+
+ return;
+}
+
+int
+ixl_iw_pf_attach(struct ixl_pf *pf)
+{
+ struct ixl_iw_pf_entry *pf_entry;
+ int err = 0;
+
+ INIT_DEBUGOUT("begin");
+
+ if (ixl_iw_ref_cnt == 0)
+ ixl_iw_init();
+
+ mtx_lock(&ixl_iw.mtx);
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ if (pf_entry->pf == pf) {
+ device_printf(pf->dev, "%s: PF already exists\n",
+ __func__);
+ err = EEXIST;
+ goto out;
+ }
+
+ pf_entry = malloc(sizeof(struct ixl_iw_pf_entry),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (pf_entry == NULL) {
+ device_printf(pf->dev,
+ "%s: failed to allocate memory to attach new PF\n",
+ __func__);
+ err = ENOMEM;
+ goto out;
+ }
+ pf_entry->pf = pf;
+ pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
+ pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
+ pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
+
+ LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node);
+ ixl_iw_ref_cnt++;
+
+ TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry);
+out:
+ mtx_unlock(&ixl_iw.mtx);
+
+ return (err);
+}
+
+int
+ixl_iw_pf_detach(struct ixl_pf *pf)
+{
+ struct ixl_iw_pf_entry *pf_entry;
+ int err = 0;
+
+ INIT_DEBUGOUT("begin");
+
+ mtx_lock(&ixl_iw.mtx);
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ if (pf_entry->pf == pf)
+ break;
+ if (pf_entry == NULL) {
+ /* attempt to stop PF which has not been attached - sth is wrong */
+ device_printf(pf->dev, "%s: PF not found\n", __func__);
+ err = ENOENT;
+ goto out;
+ }
+
+ if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) {
+ /* attempt to detach PF which has not yet been stopped - sth is wrong */
+ device_printf(pf->dev, "%s: failed - PF is still active\n",
+ __func__);
+ err = EBUSY;
+ goto out;
+ }
+ LIST_REMOVE(pf_entry, node);
+ free(pf_entry, M_DEVBUF);
+ ixl_iw_ref_cnt--;
+
+out:
+ mtx_unlock(&ixl_iw.mtx);
+
+ if (ixl_iw_ref_cnt == 0)
+ ixl_iw_uninit();
+
+ return (err);
+}
+
+
+/******************************************************************************
+ * API exposed to iw_ixl module
+ *****************************************************************************/
+
+int
+ixl_iw_pf_reset(void *pf_handle)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
+
+ INIT_DEBUGOUT("begin");
+
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+
+ return (0);
+}
+
+int
+ixl_iw_pf_msix_init(void *pf_handle,
+ struct ixl_iw_msix_mapping *msix_info)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+ int vec, i;
+
+ INIT_DEBUGOUT("begin");
+
+ if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) ||
+ (msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) {
+ printf("%s: invalid MSIX vector (%i) for AEQ\n",
+ __func__, msix_info->aeq_vector);
+ return (EINVAL);
+ }
+ reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
+ (msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_AEQCTL, reg);
+
+ for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
+ for (i = 0; i < msix_info->ceq_cnt; i++)
+ if (msix_info->ceq_vector[i] == vec)
+ break;
+ if (i == msix_info->ceq_cnt) {
+ /* this vector has no CEQ mapped */
+ reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
+ wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
+ } else {
+ reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
+
+ reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
+ (vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (msix_info->itr_indx <<
+ I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
+ (IXL_QUEUE_EOL <<
+ I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_CEQCTL(i), reg);
+ }
+ }
+
+ return (0);
+}
+
+int
+ixl_iw_register(struct ixl_iw_ops *ops)
+{
+ struct ixl_iw_pf_entry *pf_entry;
+ int err = 0;
+
+ INIT_DEBUGOUT("begin");
+
+ if (ixl_enable_iwarp == 0) {
+ printf("%s: enable_iwarp is off, registering dropped\n",
+ __func__);
+ return (EACCES);
+ }
+
+ if ((ops->init == NULL) || (ops->stop == NULL)) {
+ printf("%s: invalid iwarp driver ops\n", __func__);
+ return (EINVAL);
+ }
+
+ mtx_lock(&ixl_iw.mtx);
+
+ if (ixl_iw.registered) {
+ printf("%s: iwarp driver already registered\n", __func__);
+ err = EBUSY;
+ goto out;
+ }
+
+ ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
+ taskqueue_thread_enqueue, &ixl_iw.tq);
+ if (ixl_iw.tq == NULL) {
+ printf("%s: failed to create queue\n", __func__);
+ err = ENOMEM;
+ goto out;
+ }
+ taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
+
+ ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (ixl_iw.ops == NULL) {
+ printf("%s: failed to allocate memory\n", __func__);
+ taskqueue_free(ixl_iw.tq);
+ err = ENOMEM;
+ goto out;
+ }
+
+ ixl_iw.ops->init = ops->init;
+ ixl_iw.ops->stop = ops->stop;
+ ixl_iw.registered = true;
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
+ pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
+ taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
+ }
+
+out:
+ mtx_unlock(&ixl_iw.mtx);
+
+ return (err);
+}
+
+int
+ixl_iw_unregister(void)
+{
+ struct ixl_iw_pf_entry *pf_entry;
+
+ INIT_DEBUGOUT("begin");
+
+ mtx_lock(&ixl_iw.mtx);
+
+ if (!ixl_iw.registered) {
+ printf("%s: failed - iwarp driver has not been registered\n",
+ __func__);
+ mtx_unlock(&ixl_iw.mtx);
+ return (ENOENT);
+ }
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
+ pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
+ taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
+ }
+
+ ixl_iw.registered = false;
+
+ mtx_unlock(&ixl_iw.mtx);
+
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task);
+ taskqueue_free(ixl_iw.tq);
+ ixl_iw.tq = NULL;
+ free(ixl_iw.ops, M_DEVBUF);
+ ixl_iw.ops = NULL;
+
+ return (0);
+}
+
+#endif /* IXL_IW */
diff --git a/sys/dev/ixl/ixl_iw.h b/sys/dev/ixl/ixl_iw.h
new file mode 100644
index 000000000000..7f4de0aebe76
--- /dev/null
+++ b/sys/dev/ixl/ixl_iw.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXL_IW_H_
+#define _IXL_IW_H_
+
+#define IXL_IW_MAX_USER_PRIORITY 8
+
+
+struct ixl_iw_msix_mapping {
+ u8 itr_indx;
+ int aeq_vector;
+ int ceq_cnt;
+ int *ceq_vector;
+};
+
+struct ixl_iw_msix {
+ int base;
+ int count;
+};
+
+struct ixl_iw_pf {
+ void *handle;
+ struct ifnet *ifp;
+ device_t dev;
+ struct resource *pci_mem;
+ u8 pf_id;
+ u16 mtu;
+ struct ixl_iw_msix iw_msix;
+ u16 qs_handle[IXL_IW_MAX_USER_PRIORITY];
+};
+
+struct ixl_iw_ops {
+ int (*init)(struct ixl_iw_pf *pf_info);
+ int (*stop)(struct ixl_iw_pf *pf_info);
+};
+
+int ixl_iw_pf_reset(void *pf_handle);
+int ixl_iw_pf_msix_init(void *pf_handle,
+ struct ixl_iw_msix_mapping *msix_info);
+int ixl_iw_register(struct ixl_iw_ops *iw_ops);
+int ixl_iw_unregister(void);
+
+#endif /* _IXL_IW_H_ */
diff --git a/sys/dev/ixl/ixl_iw_int.h b/sys/dev/ixl/ixl_iw_int.h
new file mode 100644
index 000000000000..12fb6ded1c09
--- /dev/null
+++ b/sys/dev/ixl/ixl_iw_int.h
@@ -0,0 +1,71 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXL_IW_INT_H_
+#define _IXL_IW_INT_H_
+
+enum ixl_iw_pf_state {
+ IXL_IW_PF_STATE_OFF,
+ IXL_IW_PF_STATE_ON
+};
+
+struct ixl_iw_pf_entry_state {
+ enum ixl_iw_pf_state pf;
+ enum ixl_iw_pf_state iw_scheduled;
+ enum ixl_iw_pf_state iw_current;
+};
+
+struct ixl_iw_pf_entry {
+ LIST_ENTRY(ixl_iw_pf_entry) node;
+ struct ixl_pf *pf;
+ struct ixl_iw_pf_entry_state state;
+ struct ixl_iw_pf pf_info;
+ struct task iw_task;
+};
+
+LIST_HEAD(ixl_iw_pfs_head, ixl_iw_pf_entry);
+struct ixl_iw_state {
+ struct ixl_iw_ops *ops;
+ bool registered;
+ struct ixl_iw_pfs_head pfs;
+ struct mtx mtx;
+ struct taskqueue *tq;
+};
+
+int ixl_iw_pf_init(struct ixl_pf *pf);
+void ixl_iw_pf_stop(struct ixl_pf *pf);
+int ixl_iw_pf_attach(struct ixl_pf *pf);
+int ixl_iw_pf_detach(struct ixl_pf *pf);
+
+#endif /* _IXL_IW_INT_H_ */
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index 107e30fded5d..4006fc5cb317 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -79,8 +79,14 @@ struct ixl_pf {
struct callout timer;
int msix;
+#ifdef IXL_IW
+ int iw_msix;
+ bool iw_enabled;
+#endif
int if_flags;
int state;
+ bool init_in_progress;
+ u8 supported_speeds;
struct ixl_pf_qmgr qmgr;
struct ixl_pf_qtag qtag;
@@ -107,6 +113,7 @@ struct ixl_pf {
int advertised_speed;
int fc; /* link flow ctrl setting */
enum ixl_dbg_mask dbg_mask;
+ bool has_i2c;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@@ -145,8 +152,10 @@ struct ixl_pf {
"\t 0x2 - advertise 1G\n" \
"\t 0x4 - advertise 10G\n" \
"\t 0x8 - advertise 20G\n" \
-"\t0x10 - advertise 40G\n\n" \
-"Set to 0 to disable link."
+"\t0x10 - advertise 25G\n" \
+"\t0x20 - advertise 40G\n\n" \
+"Set to 0 to disable link.\n" \
+"Use \"sysctl -x\" to view flags properly."
#define IXL_SYSCTL_HELP_FC \
"\nSet flow control mode using the values below.\n" \
@@ -171,10 +180,11 @@ static char *ixl_fc_string[6] = {
static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
/*** Functions / Macros ***/
-#define I40E_VC_DEBUG(pf, level, ...) \
- do { \
- if ((pf)->vc_debug_lvl >= (level)) \
- device_printf((pf)->dev, __VA_ARGS__); \
+/* Adjust the level here to 10 or over to print stats messages */
+#define I40E_VC_DEBUG(p, level, ...) \
+ do { \
+ if (level < 10) \
+ ixl_dbg(p, IXL_DBG_IOV_VC, ##__VA_ARGS__); \
} while (0)
#define i40e_send_vf_nack(pf, vf, op, st) \
@@ -187,16 +197,25 @@ static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
+/* Debug printing */
+#define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__)
+void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
+
/* For stats sysctl naming */
#define QUEUE_NAME_LEN 32
+/* For netmap(4) compatibility */
+#define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi)
+
/*
* PF-only function declarations
*/
void ixl_set_busmaster(device_t);
+void ixl_set_msix_enable(device_t);
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
+char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
void ixl_handle_que(void *context, int pending);
@@ -223,13 +242,10 @@ void ixl_media_status(struct ifnet *, struct ifmediareq *);
int ixl_media_change(struct ifnet *);
int ixl_ioctl(struct ifnet *, u_long, caddr_t);
-void ixl_enable_adminq(struct i40e_hw *);
-void ixl_get_bus_info(struct i40e_hw *, device_t);
-void ixl_disable_adminq(struct i40e_hw *);
void ixl_enable_queue(struct i40e_hw *, int);
void ixl_disable_queue(struct i40e_hw *, int);
-void ixl_enable_legacy(struct i40e_hw *);
-void ixl_disable_legacy(struct i40e_hw *);
+void ixl_enable_intr0(struct i40e_hw *);
+void ixl_disable_intr0(struct i40e_hw *);
void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
u64 *, u64 *);
@@ -239,6 +255,7 @@ void ixl_stat_update32(struct i40e_hw *, u32, bool,
void ixl_stop(struct ixl_pf *);
void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
int ixl_get_hw_capabilities(struct ixl_pf *);
+void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
int ixl_allocate_pci_resources(struct ixl_pf *);
int ixl_setup_stations(struct ixl_pf *);
@@ -256,7 +273,7 @@ int ixl_teardown_adminq_msix(struct ixl_pf *);
void ixl_configure_intr0_msix(struct ixl_pf *);
void ixl_configure_queue_intr_msix(struct ixl_pf *);
void ixl_free_adminq_tq(struct ixl_pf *);
-int ixl_assign_vsi_legacy(struct ixl_pf *);
+int ixl_setup_legacy(struct ixl_pf *);
int ixl_init_msix(struct ixl_pf *);
void ixl_configure_itr(struct ixl_pf *);
void ixl_configure_legacy(struct ixl_pf *);
@@ -271,7 +288,9 @@ void ixl_handle_mdd_event(struct ixl_pf *);
void ixl_add_hw_stats(struct ixl_pf *);
void ixl_update_stats_counters(struct ixl_pf *);
void ixl_pf_reset_stats(struct ixl_pf *);
-void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
+void ixl_get_bus_info(struct ixl_pf *pf);
+int ixl_aq_get_link_status(struct ixl_pf *,
+ struct i40e_aqc_get_link_status *);
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
@@ -295,10 +314,9 @@ int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
void ixl_update_eth_stats(struct ixl_vsi *);
-void ixl_disable_intr(struct ixl_vsi *);
void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
int ixl_initialize_vsi(struct ixl_vsi *);
-void ixl_add_ifmedia(struct ixl_vsi *, u32);
+void ixl_add_ifmedia(struct ixl_vsi *, u64);
int ixl_setup_queue_msix(struct ixl_vsi *);
int ixl_setup_queue_tqs(struct ixl_vsi *);
int ixl_teardown_queue_msix(struct ixl_vsi *);
@@ -319,4 +337,13 @@ void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
+/*
+ * I2C Function prototypes
+ */
+int ixl_find_i2c_interface(struct ixl_pf *);
+s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data);
+
#endif /* _IXL_PF_H_ */
diff --git a/sys/dev/ixl/ixl_pf_i2c.c b/sys/dev/ixl/ixl_pf_i2c.c
new file mode 100644
index 000000000000..23531274dbcc
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_i2c.c
@@ -0,0 +1,605 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixl_pf.h"
+
+#define IXL_I2C_T_RISE 1
+#define IXL_I2C_T_FALL 1
+#define IXL_I2C_T_SU_DATA 1
+#define IXL_I2C_T_SU_STA 5
+#define IXL_I2C_T_SU_STO 4
+#define IXL_I2C_T_HD_STA 4
+#define IXL_I2C_T_LOW 5
+#define IXL_I2C_T_HIGH 4
+#define IXL_I2C_T_BUF 5
+#define IXL_I2C_CLOCK_STRETCHING_TIMEOUT 500
+
+#define IXL_I2C_REG(_hw) \
+ I40E_GLGEN_I2CPARAMS(((struct i40e_osdep *)(_hw)->back)->i2c_intfc_num)
+
+
+static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data);
+static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl);
+static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl);
+static void ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl);
+static s32 ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data);
+static s32 ixl_get_i2c_ack(struct ixl_pf *pf);
+static s32 ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data);
+static s32 ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data);
+static s32 ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data);
+static void ixl_i2c_bus_clear(struct ixl_pf *pf);
+static void ixl_i2c_start(struct ixl_pf *pf);
+static void ixl_i2c_stop(struct ixl_pf *pf);
+
+/**
+ * ixl_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+static void
+ixl_i2c_bus_clear(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ u32 i;
+
+ DEBUGFUNC("ixl_i2c_bus_clear");
+
+ ixl_i2c_start(pf);
+
+ ixl_set_i2c_data(pf, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ ixl_raise_i2c_clk(pf, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ i40e_usec_delay(IXL_I2C_T_HIGH);
+
+ ixl_lower_i2c_clk(pf, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ i40e_usec_delay(IXL_I2C_T_LOW);
+ }
+
+ ixl_i2c_start(pf);
+
+ /* Put the i2c bus back to default state */
+ ixl_i2c_stop(pf);
+}
+
+/**
+ * ixl_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+static void
+ixl_i2c_stop(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+
+ DEBUGFUNC("ixl_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ ixl_set_i2c_data(pf, &i2cctl, 0);
+ ixl_raise_i2c_clk(pf, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ i40e_usec_delay(IXL_I2C_T_SU_STO);
+
+ ixl_set_i2c_data(pf, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ i40e_usec_delay(IXL_I2C_T_BUF);
+}
+
+/**
+ * ixl_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+static s32
+ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("ixl_clock_in_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ ixl_clock_in_i2c_bit(pf, &bit);
+ *data |= bit << i;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * ixl_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+static s32
+ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+
+ DEBUGFUNC("ixl_clock_in_i2c_bit");
+
+ ixl_raise_i2c_clk(pf, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ i40e_usec_delay(IXL_I2C_T_HIGH);
+
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK;
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ *data = ixl_get_i2c_data(pf, &i2cctl);
+
+ ixl_lower_i2c_clk(pf, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ i40e_usec_delay(IXL_I2C_T_LOW);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * ixl_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+static s32
+ixl_get_i2c_ack(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ u32 timeout = 10;
+ bool ack = 1;
+
+ ixl_raise_i2c_clk(pf, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ i40e_usec_delay(IXL_I2C_T_HIGH);
+
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK;
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ /* Poll for ACK. Note that ACK in I2C spec is
+ * transition from 1 to 0 */
+ for (i = 0; i < timeout; i++) {
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ ack = ixl_get_i2c_data(pf, &i2cctl);
+
+ i40e_usec_delay(1);
+ if (!ack)
+ break;
+ }
+
+ if (ack) {
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C ack was not received.\n");
+ status = I40E_ERR_PHY;
+ }
+
+ ixl_lower_i2c_clk(pf, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ i40e_usec_delay(IXL_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * ixl_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+static s32
+ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status;
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+
+ status = ixl_set_i2c_data(pf, &i2cctl, data);
+ if (status == I40E_SUCCESS) {
+ ixl_raise_i2c_clk(pf, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ i40e_usec_delay(IXL_I2C_T_HIGH);
+
+ ixl_lower_i2c_clk(pf, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ i40e_usec_delay(IXL_I2C_T_LOW);
+ } else {
+ status = I40E_ERR_PHY;
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C data was not set to %#x\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * ixl_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+static s32
+ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit;
+
+ DEBUGFUNC("ixl_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = ixl_clock_out_i2c_bit(pf, bit);
+
+ if (status != I40E_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK;
+ i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK);
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ return status;
+}
+
+/**
+ * ixl_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+static void
+ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_MASK);
+ *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK);
+
+ wr32(hw, IXL_I2C_REG(hw), *i2cctl);
+ ixl_flush(hw);
+
+ /* SCL fall time (300ns) */
+ i40e_usec_delay(IXL_I2C_T_FALL);
+}
+
+/**
+ * ixl_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+static void
+ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 i = 0;
+ u32 timeout = IXL_I2C_CLOCK_STRETCHING_TIMEOUT;
+ u32 i2cctl_r = 0;
+
+ for (i = 0; i < timeout; i++) {
+ *i2cctl |= I40E_GLGEN_I2CPARAMS_CLK_MASK;
+ *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK);
+
+ wr32(hw, IXL_I2C_REG(hw), *i2cctl);
+ ixl_flush(hw);
+ /* SCL rise time (1000ns) */
+ i40e_usec_delay(IXL_I2C_T_RISE);
+
+ i2cctl_r = rd32(hw, IXL_I2C_REG(hw));
+ if (i2cctl_r & I40E_GLGEN_I2CPARAMS_CLK_IN_MASK)
+ break;
+ }
+}
+
+/**
+ * ixl_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+static bool
+ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl)
+{
+ bool data;
+
+ if (*i2cctl & I40E_GLGEN_I2CPARAMS_DATA_IN_MASK)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * ixl_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+static s32
+ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+
+ DEBUGFUNC("ixl_set_i2c_data");
+
+ if (data)
+ *i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK;
+ else
+ *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK);
+ *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK);
+
+ wr32(hw, IXL_I2C_REG(hw), *i2cctl);
+ ixl_flush(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ i40e_usec_delay(IXL_I2C_T_RISE + IXL_I2C_T_FALL + IXL_I2C_T_SU_DATA);
+
+ /* Verify data was set correctly */
+ *i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ if (data != ixl_get_i2c_data(pf, i2cctl)) {
+ status = I40E_ERR_PHY;
+ ixl_dbg(pf, IXL_DBG_I2C, "Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * ixl_i2c_start - Sets I2C start condition
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+static void
+ixl_i2c_start(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+
+ DEBUGFUNC("ixl_i2c_start");
+
+ /* Start condition must begin with data and clock high */
+ ixl_set_i2c_data(pf, &i2cctl, 1);
+ ixl_raise_i2c_clk(pf, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ i40e_usec_delay(IXL_I2C_T_SU_STA);
+
+ ixl_set_i2c_data(pf, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ i40e_usec_delay(IXL_I2C_T_HD_STA);
+
+ ixl_lower_i2c_clk(pf, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ i40e_usec_delay(IXL_I2C_T_LOW);
+
+}
+
+/**
+ * ixl_read_i2c_byte - Reads 8 bit word over I2C
+ **/
+s32
+ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 max_retry = 10;
+ u32 retry = 0;
+ bool nack = 1;
+ s32 status;
+ *data = 0;
+
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ do {
+ ixl_i2c_start(pf);
+
+ /* Device Address and write indication */
+ status = ixl_clock_out_i2c_byte(pf, dev_addr);
+ if (status != I40E_SUCCESS) {
+ ixl_dbg(pf, IXL_DBG_I2C, "dev_addr clock out error\n");
+ goto fail;
+ }
+
+ status = ixl_get_i2c_ack(pf);
+ if (status != I40E_SUCCESS) {
+ ixl_dbg(pf, IXL_DBG_I2C, "dev_addr i2c ack error\n");
+ goto fail;
+ }
+
+ status = ixl_clock_out_i2c_byte(pf, byte_offset);
+ if (status != I40E_SUCCESS) {
+ ixl_dbg(pf, IXL_DBG_I2C, "byte_offset clock out error\n");
+ goto fail;
+ }
+
+ status = ixl_get_i2c_ack(pf);
+ if (status != I40E_SUCCESS) {
+ ixl_dbg(pf, IXL_DBG_I2C, "byte_offset i2c ack error\n");
+ goto fail;
+ }
+
+ ixl_i2c_start(pf);
+
+ /* Device Address and read indication */
+ status = ixl_clock_out_i2c_byte(pf, (dev_addr | 0x1));
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_get_i2c_ack(pf);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_clock_in_i2c_byte(pf, data);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_clock_out_i2c_bit(pf, nack);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ ixl_i2c_stop(pf);
+ status = I40E_SUCCESS;
+ goto done;
+
+fail:
+ ixl_i2c_bus_clear(pf);
+ i40e_msec_delay(100);
+ retry++;
+ if (retry < max_retry)
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying.\n");
+ else
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error.\n");
+
+ } while (retry < max_retry);
+done:
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ return status;
+}
+
+/**
+ * ixl_write_i2c_byte - Writes 8 bit word over I2C
+ **/
+s32
+ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+ u32 max_retry = 1;
+ u32 retry = 0;
+
+ u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ do {
+ ixl_i2c_start(pf);
+
+ status = ixl_clock_out_i2c_byte(pf, dev_addr);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_get_i2c_ack(pf);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_clock_out_i2c_byte(pf, byte_offset);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_get_i2c_ack(pf);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_clock_out_i2c_byte(pf, data);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ status = ixl_get_i2c_ack(pf);
+ if (status != I40E_SUCCESS)
+ goto fail;
+
+ ixl_i2c_stop(pf);
+ goto write_byte_out;
+
+fail:
+ ixl_i2c_bus_clear(pf);
+ i40e_msec_delay(100);
+ retry++;
+ if (retry < max_retry)
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying.\n");
+ else
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error.\n");
+ } while (retry < max_retry);
+
+write_byte_out:
+ i2cctl = rd32(hw, IXL_I2C_REG(hw));
+ i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
+ wr32(hw, IXL_I2C_REG(hw), i2cctl);
+ ixl_flush(hw);
+
+ return status;
+}
+
diff --git a/sys/dev/ixl/ixl_pf_iov.c b/sys/dev/ixl/ixl_pf_iov.c
index a8c8b29cc605..2662d0df65a2 100644
--- a/sys/dev/ixl/ixl_pf_iov.c
+++ b/sys/dev/ixl/ixl_pf_iov.c
@@ -42,7 +42,6 @@ static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
static bool ixl_zero_mac(const uint8_t *addr);
static bool ixl_bcast_mac(const uint8_t *addr);
-static const char * ixl_vc_opcode_str(uint16_t op);
static int ixl_vc_opcode_level(uint16_t opcode);
static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
@@ -421,58 +420,6 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
ixl_flush(hw);
}
-static const char *
-ixl_vc_opcode_str(uint16_t op)
-{
-
- switch (op) {
- case I40E_VIRTCHNL_OP_VERSION:
- return ("VERSION");
- case I40E_VIRTCHNL_OP_RESET_VF:
- return ("RESET_VF");
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- return ("GET_VF_RESOURCES");
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- return ("CONFIG_TX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- return ("CONFIG_RX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- return ("CONFIG_VSI_QUEUES");
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- return ("CONFIG_IRQ_MAP");
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- return ("ENABLE_QUEUES");
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- return ("DISABLE_QUEUES");
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- return ("ADD_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- return ("DEL_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- return ("ADD_VLAN");
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- return ("DEL_VLAN");
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- return ("CONFIG_PROMISCUOUS_MODE");
- case I40E_VIRTCHNL_OP_GET_STATS:
- return ("GET_STATS");
- case I40E_VIRTCHNL_OP_FCOE:
- return ("FCOE");
- case I40E_VIRTCHNL_OP_EVENT:
- return ("EVENT");
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
- return ("CONFIG_RSS_KEY");
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
- return ("CONFIG_RSS_LUT");
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- return ("GET_RSS_HENA_CAPS");
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
- return ("SET_RSS_HENA");
- default:
- return ("UNKNOWN");
- }
-}
-
static int
ixl_vc_opcode_level(uint16_t opcode)
{
@@ -1459,7 +1406,7 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
} else {
for (int i = 0; i < (key->key_len / 4); i++)
- i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
@@ -1514,7 +1461,7 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
} else {
for (int i = 0; i < (lut->lut_entries / 4); i++)
- i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
@@ -1541,8 +1488,8 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
hena = msg;
/* Set HENA */
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32));
DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
vf->vf_num, hena->hena);
@@ -1768,8 +1715,6 @@ ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
goto fail;
}
- ixl_enable_adminq(hw);
-
pf->num_vfs = num_vfs;
IXL_PF_UNLOCK(pf);
return (0);
@@ -1811,11 +1756,6 @@ ixl_iov_uninit(device_t dev)
pf->veb_seid = 0;
}
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
- ixl_disable_intr(vsi);
- ixl_flush(hw);
- }
-
vfs = pf->vfs;
num_vfs = pf->num_vfs;
diff --git a/sys/dev/ixl/ixl_pf_iov.h b/sys/dev/ixl/ixl_pf_iov.h
index ae8abc208d4b..569226825294 100644
--- a/sys/dev/ixl/ixl_pf_iov.h
+++ b/sys/dev/ixl/ixl_pf_iov.h
@@ -42,6 +42,9 @@
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
+#define IXL_GLOBAL_VF_NUM(hw, vf) \
+ (vf->vf_num + hw->func_caps.vf_base_id)
+
/* Public functions */
/*
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
index 23fc8271b23c..0694482f7a4e 100644
--- a/sys/dev/ixl/ixl_pf_main.c
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -39,6 +39,11 @@
#include "ixl_pf_iov.h"
#endif
+#ifdef IXL_IW
+#include "ixl_iw.h"
+#include "ixl_iw_int.h"
+#endif
+
#ifdef DEV_NETMAP
#include <net/netmap.h>
#include <sys/selinfo.h>
@@ -46,6 +51,8 @@
#endif /* DEV_NETMAP */
static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
+static u64 ixl_max_aq_speed_to_value(u8);
+static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
/* Sysctls */
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
@@ -63,18 +70,37 @@ static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
+#ifdef IXL_DEBUG
+static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
+#endif
+
+#ifdef IXL_IW
+extern int ixl_enable_iwarp;
+#endif
void
-ixl_dbg(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
+ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
{
va_list args;
if (!(mask & pf->dbg_mask))
return;
+ /* Re-implement device_printf() */
+ device_print_prettyname(pf->dev);
va_start(args, fmt);
- device_printf(pf->dev, fmt, args);
+ vprintf(fmt, args);
va_end(args);
}
@@ -184,11 +210,22 @@ ixl_init_locked(struct ixl_pf *pf)
u8 tmpaddr[ETHER_ADDR_LEN];
int ret;
- mtx_assert(&pf->pf_mtx, MA_OWNED);
INIT_DEBUGOUT("ixl_init_locked: begin");
+ IXL_PF_LOCK_ASSERT(pf);
ixl_stop_locked(pf);
+ /*
+ * If the aq is dead here, it probably means something outside of the driver
+ * did something to the adapter, like a PF reset.
+ * So rebuild the driver's state here if that occurs.
+ */
+ if (!i40e_check_asq_alive(&pf->hw)) {
+ device_printf(dev, "Admin Queue is down; resetting...\n");
+ ixl_teardown_hw_structs(pf);
+ ixl_reset(pf);
+ }
+
/* Get the latest mac address... User might use a LAA */
bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
I40E_ETH_LENGTH_OF_ADDRESS);
@@ -243,7 +280,7 @@ ixl_init_locked(struct ixl_pf *pf)
ixl_setup_vlan_filters(vsi);
/* Set up MSI/X routing and the ITR settings */
- if (pf->enable_msix) {
+ if (pf->msix > 1) {
ixl_configure_queue_intr_msix(pf);
ixl_configure_itr(pf);
} else
@@ -263,14 +300,21 @@ ixl_init_locked(struct ixl_pf *pf)
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
- /* Set initial advertised speed sysctl value */
- ixl_get_initial_advertised_speeds(pf);
-
/* Start the local timer */
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
/* Now inform the stack we're ready */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+#ifdef IXL_IW
+ if (ixl_enable_iwarp && pf->iw_enabled) {
+ ret = ixl_iw_pf_init(pf);
+ if (ret)
+ device_printf(dev,
+ "initialize iwarp failed, code %d\n", ret);
+ }
+#endif
+
}
@@ -337,6 +381,11 @@ retry:
(hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
"MDIO shared");
+ struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
+ osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
+ if (osdep->i2c_intfc_num != -1)
+ pf->has_i2c = true;
+
return (error);
}
@@ -431,16 +480,8 @@ ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
return;
}
- if (abilities.link_speed & I40E_LINK_SPEED_40GB)
- pf->advertised_speed |= 0x10;
- if (abilities.link_speed & I40E_LINK_SPEED_20GB)
- pf->advertised_speed |= 0x8;
- if (abilities.link_speed & I40E_LINK_SPEED_10GB)
- pf->advertised_speed |= 0x4;
- if (abilities.link_speed & I40E_LINK_SPEED_1GB)
- pf->advertised_speed |= 0x2;
- if (abilities.link_speed & I40E_LINK_SPEED_100MB)
- pf->advertised_speed |= 0x1;
+ pf->advertised_speed =
+ ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
}
int
@@ -613,10 +654,10 @@ ixl_intr(void *arg)
struct ixl_queue *que = vsi->queues;
struct ifnet *ifp = vsi->ifp;
struct tx_ring *txr = &que->txr;
- u32 reg, icr0, mask;
+ u32 icr0;
bool more_tx, more_rx;
- ++que->irqs;
+ pf->admin_irq++;
/* Protect against spurious interrupts */
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
@@ -624,11 +665,6 @@ ixl_intr(void *arg)
icr0 = rd32(hw, I40E_PFINT_ICR0);
- reg = rd32(hw, I40E_PFINT_DYN_CTL0);
- reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-
- mask = rd32(hw, I40E_PFINT_ICR0_ENA);
#ifdef PCI_IOV
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
@@ -637,33 +673,21 @@ ixl_intr(void *arg)
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
taskqueue_enqueue(pf->tq, &pf->adminq);
- return;
}
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
-
- /* re-enable other interrupt causes */
- wr32(hw, I40E_PFINT_ICR0_ENA, mask);
-
- /* And now the queues */
- reg = rd32(hw, I40E_QINT_RQCTL(0));
- reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), reg);
+ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ ++que->irqs;
- reg = rd32(hw, I40E_QINT_TQCTL(0));
- reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), reg);
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
- ixl_enable_legacy(hw);
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+ }
- return;
+ ixl_enable_intr0(hw);
}
@@ -797,7 +821,7 @@ ixl_msix_adminq(void *arg)
if (do_task)
taskqueue_enqueue(pf->tq, &pf->adminq);
else
- ixl_enable_adminq(hw);
+ ixl_enable_intr0(hw);
}
void
@@ -946,10 +970,12 @@ ixl_local_timer(void *arg)
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = pf->dev;
+ struct tx_ring *txr;
int hung = 0;
u32 mask;
+ s32 timer, new_timer;
- mtx_assert(&pf->pf_mtx, MA_OWNED);
+ IXL_PF_LOCK_ASSERT(pf);
/* Fire off the adminq task */
taskqueue_enqueue(pf->tq, &pf->adminq);
@@ -959,42 +985,64 @@ ixl_local_timer(void *arg)
/* Check status of the queues */
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++, que++) {
- /* Any queues with outstanding work get a sw irq */
- if (que->busy)
- wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- /*
- ** Each time txeof runs without cleaning, but there
- ** are uncleaned descriptors it increments busy. If
- ** we get to 5 we declare it hung.
- */
- if (que->busy == IXL_QUEUE_HUNG) {
- ++hung;
- continue;
- }
- if (que->busy >= IXL_MAX_TX_BUSY) {
-#ifdef IXL_DEBUG
- device_printf(dev, "Warning queue %d "
- "appears to be hung!\n", i);
-#endif
- que->busy = IXL_QUEUE_HUNG;
- ++hung;
+ txr = &que->txr;
+ timer = atomic_load_acq_32(&txr->watchdog_timer);
+ if (timer > 0) {
+ new_timer = timer - hz;
+ if (new_timer <= 0) {
+ atomic_store_rel_32(&txr->watchdog_timer, -1);
+ device_printf(dev, "WARNING: queue %d "
+ "appears to be hung!\n", que->me);
+ ++hung;
+ } else {
+ /*
+ * If this fails, that means something in the TX path has updated
+ * the watchdog, so it means the TX path is still working and
+ * the watchdog doesn't need to countdown.
+ */
+ atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
+ /* Any queues with outstanding work get a sw irq */
+ wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
+ }
}
}
- /* Only reinit if all queues show hung */
- if (hung == vsi->num_queues)
+ /* Reset when a queue shows hung */
+ if (hung)
goto hung;
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
return;
hung:
- device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
+ device_printf(dev, "WARNING: Resetting!\n");
+ pf->watchdog_events++;
ixl_init_locked(pf);
}
+void
+ixl_link_up_msg(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = pf->vsi.ifp;
+
+ log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ ifp->if_xname,
+ ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
+ (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
+ "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
+ "Clause 108 RS-FEC" : "None",
+ (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
+ (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
+ hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
+ ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
+ ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
+ ixl_fc_string[1] : ixl_fc_string[0]);
+}
+
/*
** Note: this routine updates the OS on the link state
** the real check of the hardware only happens with
@@ -1004,22 +1052,15 @@ void
ixl_update_link_status(struct ixl_pf *pf)
{
struct ixl_vsi *vsi = &pf->vsi;
- struct i40e_hw *hw = &pf->hw;
struct ifnet *ifp = vsi->ifp;
device_t dev = pf->dev;
if (pf->link_up) {
if (vsi->link_active == FALSE) {
- pf->fc = hw->fc.current_mode;
- if (bootverbose) {
- device_printf(dev, "Link is up %d Gbps %s,"
- " Flow Control: %s\n",
- ((pf->link_speed ==
- I40E_LINK_SPEED_40GB)? 40:10),
- "Full Duplex", ixl_fc_string[pf->fc]);
- }
vsi->link_active = TRUE;
+ ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
if_link_state_change(ifp, LINK_STATE_UP);
+ ixl_link_up_msg(pf);
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
@@ -1050,6 +1091,12 @@ ixl_stop_locked(struct ixl_pf *pf)
IXL_PF_LOCK_ASSERT(pf);
+#ifdef IXL_IW
+ /* Stop iWARP device */
+ if (ixl_enable_iwarp && pf->iw_enabled)
+ ixl_iw_pf_stop(pf);
+#endif
+
/* Stop the local timer */
callout_stop(&pf->timer);
@@ -1066,9 +1113,6 @@ ixl_stop(struct ixl_pf *pf)
IXL_PF_LOCK(pf);
ixl_stop_locked(pf);
IXL_PF_UNLOCK(pf);
-
- ixl_teardown_queue_msix(&pf->vsi);
- ixl_free_queue_tqs(&pf->vsi);
}
/*********************************************************************
@@ -1077,11 +1121,9 @@ ixl_stop(struct ixl_pf *pf)
*
**********************************************************************/
int
-ixl_assign_vsi_legacy(struct ixl_pf *pf)
+ixl_setup_legacy(struct ixl_pf *pf)
{
device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
int error, rid = 0;
if (pf->msix == 1)
@@ -1089,8 +1131,8 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf)
pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_SHAREABLE | RF_ACTIVE);
if (pf->res == NULL) {
- device_printf(dev, "Unable to allocate"
- " bus resource: vsi legacy/msi interrupt\n");
+ device_printf(dev, "bus_alloc_resource_any() for"
+ " legacy/msi interrupt\n");
return (ENXIO);
}
@@ -1100,22 +1142,16 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf)
ixl_intr, pf, &pf->tag);
if (error) {
pf->res = NULL;
- device_printf(dev, "Failed to register legacy/msi handler\n");
- return (error);
+ device_printf(dev, "bus_setup_intr() for legacy/msi"
+ " interrupt handler failed, error %d\n", error);
+ return (ENXIO);
+ }
+ error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
+ if (error) {
+ /* non-fatal */
+ device_printf(dev, "bus_describe_intr() for Admin Queue"
+ " interrupt name failed, error %d\n", error);
}
- bus_describe_intr(dev, pf->res, pf->tag, "irq0");
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(dev));
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-
- pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
- device_get_nameunit(dev));
return (0);
}
@@ -1230,7 +1266,7 @@ ixl_setup_adminq_msix(struct ixl_pf *pf)
}
error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
if (error) {
- /* Probably non-fatal? */
+ /* non-fatal */
device_printf(dev, "bus_describe_intr() for Admin Queue"
" interrupt name failed, error %d\n", error);
}
@@ -1272,6 +1308,7 @@ ixl_setup_queue_msix(struct ixl_vsi *vsi)
device_printf(dev, "bus_setup_intr() for Queue %d"
" interrupt handler failed, error %d\n",
que->me, error);
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
return (error);
}
error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
@@ -1305,11 +1342,20 @@ void
ixl_set_busmaster(device_t dev)
{
u16 pci_cmd_word;
- int msix_ctrl, rid;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+}
+
+/*
+ * rewrite the ENABLE in the MSIX control register
+ * to cause the host to successfully initialize us.
+ */
+void
+ixl_set_msix_enable(device_t dev)
+{
+ int msix_ctrl, rid;
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
@@ -1329,6 +1375,11 @@ ixl_init_msix(struct ixl_pf *pf)
struct i40e_hw *hw = &pf->hw;
int auto_max_queues;
int rid, want, vectors, queues, available;
+#ifdef IXL_IW
+ int iw_want, iw_vectors;
+
+ pf->iw_msix = 0;
+#endif
/* Override by tuneable */
if (!pf->enable_msix)
@@ -1338,7 +1389,7 @@ ixl_init_msix(struct ixl_pf *pf)
ixl_set_busmaster(dev);
/* First try MSI/X */
- rid = PCIR_BAR(IXL_BAR);
+ rid = PCIR_BAR(IXL_MSIX_BAR);
pf->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!pf->msix_mem) {
@@ -1400,13 +1451,34 @@ ixl_init_msix(struct ixl_pf *pf)
"MSIX Configuration Problem, "
"%d vectors available but %d wanted!\n",
available, want);
- return (0); /* Will go to Legacy setup */
+ pf->msix_mem = NULL;
+ goto no_msix; /* Will go to Legacy setup */
+ }
+
+#ifdef IXL_IW
+ if (ixl_enable_iwarp) {
+ /* iWARP wants additional vector for CQP */
+ iw_want = mp_ncpus + 1;
+ available -= vectors;
+ if (available > 0) {
+ iw_vectors = (available >= iw_want) ?
+ iw_want : available;
+ vectors += iw_vectors;
+ } else
+ iw_vectors = 0;
}
+#endif
+ ixl_set_msix_enable(dev);
if (pci_alloc_msix(dev, &vectors) == 0) {
device_printf(pf->dev,
"Using MSIX interrupts with %d vectors\n", vectors);
pf->msix = vectors;
+#ifdef IXL_IW
+ if (ixl_enable_iwarp)
+ pf->iw_msix = iw_vectors;
+#endif
+
pf->vsi.num_queues = queues;
#ifdef RSS
/*
@@ -1433,7 +1505,6 @@ no_msix:
vectors = pci_msi_count(dev);
pf->vsi.num_queues = 1;
pf->max_queues = 1;
- pf->enable_msix = 0;
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
device_printf(pf->dev, "Using an MSI interrupt\n");
else {
@@ -1520,10 +1591,22 @@ void
ixl_configure_legacy(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- u32 reg;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct rx_ring *rxr = &que->rxr;
+ struct tx_ring *txr = &que->txr;
+ u32 reg;
+
+ /* Configure ITR */
+ vsi->tx_itr_setting = pf->tx_itr;
+ wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
- wr32(hw, I40E_PFINT_ITR0(0), 0);
- wr32(hw, I40E_PFINT_ITR0(1), 0);
+ vsi->rx_itr_setting = pf->rx_itr;
+ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
/* Setup "other" causes */
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
@@ -1539,12 +1622,9 @@ ixl_configure_legacy(struct ixl_pf *pf)
;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- /* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
- /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
- wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+ /* No ITR for non-queue interrupts */
+ wr32(hw, I40E_PFINT_STAT_CTL0,
+ IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
@@ -1612,7 +1692,7 @@ int
ixl_teardown_adminq_msix(struct ixl_pf *pf)
{
device_t dev = pf->dev;
- int rid;
+ int rid, error = 0;
if (pf->admvec) /* we are doing MSIX */
rid = pf->admvec + 1;
@@ -1621,10 +1701,20 @@ ixl_teardown_adminq_msix(struct ixl_pf *pf)
if (pf->tag != NULL) {
bus_teardown_intr(dev, pf->res, pf->tag);
+ if (error) {
+ device_printf(dev, "bus_teardown_intr() for"
+ " interrupt 0 failed\n");
+ // return (ENXIO);
+ }
pf->tag = NULL;
}
if (pf->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
+ if (error) {
+ device_printf(dev, "bus_release_resource() for"
+ " interrupt 0 failed [rid=%d]\n", rid);
+ // return (ENXIO);
+ }
pf->res = NULL;
}
@@ -1640,7 +1730,7 @@ ixl_teardown_queue_msix(struct ixl_vsi *vsi)
int rid, error = 0;
/* We may get here before stations are setup */
- if ((!pf->enable_msix) || (que == NULL))
+ if ((pf->msix < 2) || (que == NULL))
return (0);
/* Release all MSIX queue resources */
@@ -1680,10 +1770,10 @@ ixl_free_pci_resources(struct ixl_pf *pf)
ixl_teardown_queue_msix(&pf->vsi);
ixl_teardown_adminq_msix(pf);
- if (pf->msix)
+ if (pf->msix > 0)
pci_release_msi(dev);
- memrid = PCIR_BAR(IXL_BAR);
+ memrid = PCIR_BAR(IXL_MSIX_BAR);
if (pf->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -1697,64 +1787,73 @@ ixl_free_pci_resources(struct ixl_pf *pf)
}
void
-ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
+ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
{
/* Display supported media types */
- if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+ if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+ if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
+ if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
+ if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
- phy_type & (1 << I40E_PHY_TYPE_XFI) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+ if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
+ phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
+ phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
- phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
+ phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
+ phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
+ phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
+ phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+ if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+ if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+ if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
- || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
+ || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_SFI))
+ if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+ if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
+ if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+ if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
}
/*********************************************************************
@@ -1765,6 +1864,7 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
int
ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ifnet *ifp;
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
@@ -1780,7 +1880,6 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = IF_Gbps(40);
ifp->if_init = ixl_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1857,8 +1956,10 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
" AQ error %d\n", aq_error, hw->aq.asq_last_status);
return (0);
}
+ pf->supported_speeds = abilities.link_speed;
+ ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
- ixl_add_ifmedia(vsi, abilities.phy_type);
+ ixl_add_ifmedia(vsi, hw->phy.phy_types);
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -1928,7 +2029,7 @@ ixl_switch_config(struct ixl_pf *pf)
sw_config->header.num_reported, sw_config->header.num_total);
for (int i = 0; i < sw_config->header.num_reported; i++) {
device_printf(dev,
- "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
+ "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
sw_config->element[i].element_type,
sw_config->element[i].seid,
sw_config->element[i].uplink_seid,
@@ -2007,6 +2108,14 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
else
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+#ifdef IXL_IW
+ /* Set TCP Enable for iWARP capable VSI */
+ if (ixl_enable_iwarp && pf->iw_enabled) {
+ ctxt.info.valid_sections |=
+ htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+#endif
/* Save VSI number and info for use later */
vsi->vsi_num = ctxt.vsi_number;
bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2198,7 +2307,6 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
int error = 0;
int rsize, tsize;
- /* ERJ: A lot of references to external objects... */
que->num_desc = pf->ringsz;
que->me = index;
que->vsi = vsi;
@@ -2316,6 +2424,7 @@ ixl_setup_stations(struct ixl_pf *pf)
return (error);
}
+ /* Then setup each queue */
for (int i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
error = ixl_setup_queue(que, pf, i);
@@ -2380,7 +2489,7 @@ ixl_set_queue_rx_itr(struct ixl_queue *que)
/* do an exponential smoothing */
rx_itr = (10 * rx_itr * rxr->itr) /
((9 * rx_itr) + rxr->itr);
- rxr->itr = rx_itr & IXL_MAX_ITR;
+ rxr->itr = min(rx_itr, IXL_MAX_ITR);
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
que->me), rxr->itr);
}
@@ -2454,7 +2563,7 @@ ixl_set_queue_tx_itr(struct ixl_queue *que)
/* do an exponential smoothing */
tx_itr = (10 * tx_itr * txr->itr) /
((9 * tx_itr) + txr->itr);
- txr->itr = tx_itr & IXL_MAX_ITR;
+ txr->itr = min(tx_itr, IXL_MAX_ITR);
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
que->me), txr->itr);
}
@@ -2497,7 +2606,7 @@ ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
* Retrieves I40E_QTX_TAIL value from hardware
* for a sysctl.
*/
-int
+static int
ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
@@ -2519,7 +2628,7 @@ ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
* Retrieves I40E_QRX_TAIL value from hardware
* for a sysctl.
*/
-int
+static int
ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
@@ -2658,6 +2767,9 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
+ CTLFLAG_RD, &(queues[q].mss_too_small),
+ "TSO sends with an MSS less than 64");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
"Queue No Descriptor Available");
@@ -2866,7 +2978,10 @@ ixl_set_rss_pctypes(struct ixl_pf *pf)
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
- set_hena = IXL_DEFAULT_RSS_HENA;
+ if (hw->mac.type == I40E_MAC_X722)
+ set_hena = IXL_DEFAULT_RSS_HENA_X722;
+ else
+ set_hena = IXL_DEFAULT_RSS_HENA_XL710;
#endif
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -3558,11 +3673,11 @@ ixl_enable_intr(struct ixl_vsi *vsi)
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
- if (pf->enable_msix) {
+ if (pf->msix > 1) {
for (int i = 0; i < vsi->num_queues; i++, que++)
ixl_enable_queue(hw, que->me);
} else
- ixl_enable_legacy(hw);
+ ixl_enable_intr0(hw);
}
void
@@ -3576,31 +3691,19 @@ ixl_disable_rings_intr(struct ixl_vsi *vsi)
}
void
-ixl_disable_intr(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = vsi->hw;
-
- if (pf->enable_msix)
- ixl_disable_adminq(hw);
- else
- ixl_disable_legacy(hw);
-}
-
-void
-ixl_enable_adminq(struct i40e_hw *hw)
+ixl_enable_intr0(struct i40e_hw *hw)
{
u32 reg;
+ /* Use IXL_ITR_NONE so ITR isn't updated here */
reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, reg);
- ixl_flush(hw);
}
void
-ixl_disable_adminq(struct i40e_hw *hw)
+ixl_disable_intr0(struct i40e_hw *hw)
{
u32 reg;
@@ -3630,25 +3733,6 @@ ixl_disable_queue(struct i40e_hw *hw, int id)
}
void
-ixl_enable_legacy(struct i40e_hw *hw)
-{
- u32 reg;
- reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-}
-
-void
-ixl_disable_legacy(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-}
-
-void
ixl_update_stats_counters(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
@@ -3842,7 +3926,7 @@ ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
if (error)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", error);
- ixl_disable_adminq(hw);
+ ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
error = i40e_shutdown_adminq(hw);
if (error)
@@ -3861,7 +3945,7 @@ ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
error);
}
ixl_configure_intr0_msix(pf);
- ixl_enable_adminq(hw);
+ ixl_enable_intr0(hw);
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
@@ -3943,7 +4027,7 @@ ixl_do_adminq(void *context, int pending)
break;
opcode = LE16_TO_CPU(event.desc.opcode);
ixl_dbg(pf, IXL_DBG_AQ,
- "%s: Admin Queue event: %#06x\n", __func__, opcode);
+ "Admin Queue event: %#06x\n", opcode);
switch (opcode) {
case i40e_aqc_opc_get_link_status:
ixl_link_event(pf, &event);
@@ -3969,7 +4053,7 @@ ixl_do_adminq(void *context, int pending)
if (result > 0)
taskqueue_enqueue(pf->tq, &pf->adminq);
else
- ixl_enable_adminq(hw);
+ ixl_enable_intr0(hw);
IXL_PF_UNLOCK(pf);
}
@@ -4156,6 +4240,7 @@ void
ixl_add_device_sysctls(struct ixl_pf *pf)
{
device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid_list *ctx_list =
@@ -4164,6 +4249,9 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
struct sysctl_oid *debug_node;
struct sysctl_oid_list *debug_list;
+ struct sysctl_oid *fec_node;
+ struct sysctl_oid_list *fec_list;
+
/* Set up sysctls */
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
@@ -4204,6 +4292,38 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
+ /* Add FEC sysctls for 25G adapters */
+ /*
+ * XXX: These settings can be changed, but that isn't supported,
+ * so these are read-only for now.
+ */
+ if (hw->device_id == I40E_DEV_ID_25G_B
+ || hw->device_id == I40E_DEV_ID_25G_SFP28) {
+ fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
+ OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
+ fec_list = SYSCTL_CHILDREN(fec_node);
+
+ SYSCTL_ADD_PROC(ctx, fec_list,
+ OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
+
+ SYSCTL_ADD_PROC(ctx, fec_list,
+ OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
+
+ SYSCTL_ADD_PROC(ctx, fec_list,
+ OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
+
+ SYSCTL_ADD_PROC(ctx, fec_list,
+ OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
+
+ SYSCTL_ADD_PROC(ctx, fec_list,
+ OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
+ }
+
/* Add sysctls meant to print debug information, but don't list them
* in "sysctl -a" output. */
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
@@ -4245,6 +4365,25 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
+ pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
+
+ if (pf->has_i2c) {
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
+ }
+
#ifdef PCI_IOV
SYSCTL_ADD_UINT(ctx, debug_list,
OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
@@ -4316,25 +4455,22 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
return (0);
}
-int
-ixl_current_speed(SYSCTL_HANDLER_ARGS)
+char *
+ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- int error = 0, index = 0;
+ int index;
char *speeds[] = {
"Unknown",
- "100M",
- "1G",
- "10G",
- "40G",
- "20G"
+ "100 Mbps",
+ "1 Gbps",
+ "10 Gbps",
+ "40 Gbps",
+ "20 Gbps",
+ "25 Gbps",
};
- ixl_update_link_status(pf);
-
- switch (hw->phy.link_info.link_speed) {
+ switch (link_speed) {
case I40E_LINK_SPEED_100MB:
index = 1;
break;
@@ -4350,17 +4486,56 @@ ixl_current_speed(SYSCTL_HANDLER_ARGS)
case I40E_LINK_SPEED_20GB:
index = 5;
break;
+ case I40E_LINK_SPEED_25GB:
+ index = 6;
+ break;
case I40E_LINK_SPEED_UNKNOWN:
default:
index = 0;
break;
}
- error = sysctl_handle_string(oidp, speeds[index],
- strlen(speeds[index]), req);
+ return speeds[index];
+}
+
+int
+ixl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+
+ ixl_update_link_status(pf);
+
+ error = sysctl_handle_string(oidp,
+ ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
+ 8, req);
return (error);
}
+static u8
+ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
+{
+ static u16 speedmap[6] = {
+ (I40E_LINK_SPEED_100MB | (0x1 << 8)),
+ (I40E_LINK_SPEED_1GB | (0x2 << 8)),
+ (I40E_LINK_SPEED_10GB | (0x4 << 8)),
+ (I40E_LINK_SPEED_20GB | (0x8 << 8)),
+ (I40E_LINK_SPEED_25GB | (0x10 << 8)),
+ (I40E_LINK_SPEED_40GB | (0x20 << 8))
+ };
+ u8 retval = 0;
+
+ for (int i = 0; i < 6; i++) {
+ if (to_aq)
+ retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
+ else
+ retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
+ }
+
+ return (retval);
+}
+
int
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
{
@@ -4383,23 +4558,14 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
/* Prepare new config */
bzero(&config, sizeof(config));
+ config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.abilities = abilities.abilities
| I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
- /* Translate into aq cmd link_speed */
- if (speeds & 0x10)
- config.link_speed |= I40E_LINK_SPEED_40GB;
- if (speeds & 0x8)
- config.link_speed |= I40E_LINK_SPEED_20GB;
- if (speeds & 0x4)
- config.link_speed |= I40E_LINK_SPEED_10GB;
- if (speeds & 0x2)
- config.link_speed |= I40E_LINK_SPEED_1GB;
- if (speeds & 0x1)
- config.link_speed |= I40E_LINK_SPEED_100MB;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -4408,18 +4574,9 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
"%s: Error setting new phy config %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
- return (EAGAIN);
+ return (EIO);
}
- /*
- ** This seems a bit heavy handed, but we
- ** need to get a reinit on some devices
- */
- IXL_PF_LOCK(pf);
- ixl_stop_locked(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
-
return (0);
}
@@ -4430,7 +4587,8 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
** 0x2 - advertise 1G
** 0x4 - advertise 10G
** 0x8 - advertise 20G
-** 0x10 - advertise 40G
+** 0x10 - advertise 25G
+** 0x20 - advertise 40G
**
** Set to 0 to disable link
*/
@@ -4440,6 +4598,7 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
+ u8 converted_speeds;
int requested_ls = 0;
int error = 0;
@@ -4448,63 +4607,25 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- /* Check for sane value */
- if (requested_ls > 0x10) {
- device_printf(dev, "Invalid advertised speed; "
- "valid modes are 0x1 through 0x10\n");
+ /* Check if changing speeds is supported */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ device_printf(dev, "Changing advertised speeds not supported"
+ " on this device.\n");
return (EINVAL);
}
- /* Then check for validity based on adapter type */
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- /* 1G BaseT */
- if (requested_ls & ~(0x2)) {
- device_printf(dev,
- "Only 1G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- /* 10G BaseT */
- if (requested_ls & ~(0x7)) {
- device_printf(dev,
- "Only 100M/1G/10G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_20G_KR2:
- case I40E_DEV_ID_20G_KR2_A:
- /* 20G */
- if (requested_ls & ~(0xE)) {
- device_printf(dev,
- "Only 1G/10G/20G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_KX_B:
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- /* 40G */
- if (requested_ls & ~(0x10)) {
- device_printf(dev,
- "Only 40G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- default:
- /* 10G (1G) */
- if (requested_ls & ~(0x6)) {
- device_printf(dev,
- "Only 1/10G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
+ if (requested_ls < 0 || requested_ls > 0xff) {
}
- /* Exit if no change */
- if (pf->advertised_speed == requested_ls)
- return (0);
+ /* Check for valid value */
+ converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
+ if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
+ device_printf(dev, "Invalid advertised speed; "
+ "valid flags are: 0x%02x\n",
+ ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
+ return (EINVAL);
+ }
error = ixl_set_advertised_speeds(pf, requested_ls);
if (error)
@@ -4516,14 +4637,40 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
}
/*
+ * Input: bitmap of enum i40e_aq_link_speed
+ */
+static u64
+ixl_max_aq_speed_to_value(u8 link_speeds)
+{
+ if (link_speeds & I40E_LINK_SPEED_40GB)
+ return IF_Gbps(40);
+ if (link_speeds & I40E_LINK_SPEED_25GB)
+ return IF_Gbps(25);
+ if (link_speeds & I40E_LINK_SPEED_20GB)
+ return IF_Gbps(20);
+ if (link_speeds & I40E_LINK_SPEED_10GB)
+ return IF_Gbps(10);
+ if (link_speeds & I40E_LINK_SPEED_1GB)
+ return IF_Gbps(1);
+ if (link_speeds & I40E_LINK_SPEED_100MB)
+ return IF_Mbps(100);
+ else
+ /* Minimum supported link speed */
+ return IF_Mbps(100);
+}
+
+/*
** Get the width and transaction speed of
** the bus this adapter is plugged into.
*/
void
-ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
+ixl_get_bus_info(struct ixl_pf *pf)
{
- u16 link;
- u32 offset;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ u16 link;
+ u32 offset, num_ports;
+ u64 max_speed;
/* Some devices don't use PCIE */
if (hw->mac.type == I40E_MAC_X722)
@@ -4543,16 +4690,28 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
(hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
(hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
(hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
(hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
("Unknown"));
- if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
- (hw->bus.speed < i40e_bus_speed_8000)) {
+ /*
+ * If adapter is in slot with maximum supported speed,
+ * no warning message needs to be printed out.
+ */
+ if (hw->bus.speed >= i40e_bus_speed_8000
+ && hw->bus.width >= i40e_bus_width_pcie_x8)
+ return;
+
+ num_ports = bitcount32(hw->func_caps.valid_functions);
+ max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
+
+ if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
device_printf(dev, "PCI-Express bandwidth available"
" for this device may be insufficient for"
" optimal performance.\n");
- device_printf(dev, "For optimal performance, a x8 "
- "PCIE Gen3 slot is required.\n");
+ device_printf(dev, "Please move the device to a different"
+ " PCI-e link with more lanes and/or higher"
+ " transfer rate.\n");
}
}
@@ -4650,8 +4809,8 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
}
if (status)
- device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
- status, perrno);
+ device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
+ i40e_stat_str(hw, status), perrno);
/*
* -EPERM is actually ERESTART, which the kernel interprets as it needing
@@ -4734,6 +4893,19 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
case I40E_PHY_TYPE_10GBASE_AOC:
ifmr->ifm_active |= IFM_OTHER;
break;
+ /* 25 G */
+ case I40E_PHY_TYPE_25GBASE_KR:
+ ifmr->ifm_active |= IFM_25G_KR;
+ break;
+ case I40E_PHY_TYPE_25GBASE_CR:
+ ifmr->ifm_active |= IFM_25G_CR;
+ break;
+ case I40E_PHY_TYPE_25GBASE_SR:
+ ifmr->ifm_active |= IFM_25G_SR;
+ break;
+ case I40E_PHY_TYPE_25GBASE_LR:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
/* 40 G */
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
@@ -4797,48 +4969,6 @@ void
ixl_init(void *arg)
{
struct ixl_pf *pf = arg;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = pf->dev;
- int error = 0;
-
- /*
- * If the aq is dead here, it probably means something outside of the driver
- * did something to the adapter, like a PF reset.
- * So rebuild the driver's state here if that occurs.
- */
- if (!i40e_check_asq_alive(&pf->hw)) {
- device_printf(dev, "Admin Queue is down; resetting...\n");
- IXL_PF_LOCK(pf);
- ixl_teardown_hw_structs(pf);
- ixl_reset(pf);
- IXL_PF_UNLOCK(pf);
- }
-
- /*
- * Set up LAN queue interrupts here.
- * Kernel interrupt setup functions cannot be called while holding a lock,
- * so this is done outside of init_locked().
- */
- if (pf->msix > 1) {
- /* Teardown existing interrupts, if they exist */
- ixl_teardown_queue_msix(vsi);
- ixl_free_queue_tqs(vsi);
- /* Then set them up again */
- error = ixl_setup_queue_msix(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
- error);
- error = ixl_setup_queue_tqs(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
- error);
- } else
- // possibly broken
- error = ixl_assign_vsi_legacy(pf);
- if (error) {
- device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
- return;
- }
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
@@ -4891,6 +5021,7 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
switch (command) {
case SIOCSIFADDR:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
#ifdef INET
if (ifa->ifa_addr->sa_family == AF_INET)
avoid_reset = TRUE;
@@ -4948,9 +5079,7 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_UNLOCK(pf);
- ixl_stop(pf);
- IXL_PF_LOCK(pf);
+ ixl_stop_locked(pf);
}
}
pf->if_flags = ifp->if_flags;
@@ -4971,7 +5100,7 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
+ ixl_disable_rings_intr(vsi);
ixl_add_multi(vsi);
ixl_enable_intr(vsi);
IXL_PF_UNLOCK(pf);
@@ -4981,7 +5110,7 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
+ ixl_disable_rings_intr(vsi);
ixl_del_multi(vsi);
ixl_enable_intr(vsi);
IXL_PF_UNLOCK(pf);
@@ -5021,7 +5150,37 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
break;
}
+#if __FreeBSD_version >= 1003000
+ case SIOCGI2C:
+ {
+ struct ifi2creq i2c;
+ int i;
+
+ IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
+ if (!pf->has_i2c)
+ return (ENOTTY);
+
+ error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+ if (error != 0)
+ break;
+ if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
+ error = EINVAL;
+ break;
+ }
+ if (i2c.len > sizeof(i2c.data)) {
+ error = EINVAL;
+ break;
+ }
+ for (i = 0; i < i2c.len; i++)
+ if (ixl_read_i2c_byte(pf, i2c.offset + i,
+ i2c.dev_addr, &i2c.data[i]))
+ return (EIO);
+
+ error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+ break;
+ }
+#endif
default:
IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
error = ether_ioctl(ifp, command, data);
@@ -5031,8 +5190,28 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
return (error);
}
+int
+ixl_find_i2c_interface(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ bool i2c_en, port_matched;
+ u32 reg;
+
+ for (int i = 0; i < 4; i++) {
+ reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
+ i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
+ port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
+ >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+ & BIT(hw->port);
+ if (i2c_en && port_matched)
+ return (i);
+ }
+
+ return (-1);
+}
+
static char *
-ixl_phy_type_string(u32 bit_pos)
+ixl_phy_type_string(u32 bit_pos, bool ext)
{
static char * phy_types_str[32] = {
"SGMII",
@@ -5068,20 +5247,59 @@ ixl_phy_type_string(u32 bit_pos)
"20GBASE-KR2",
"Reserved (31)"
};
+ static char * ext_phy_types_str[4] = {
+ "25GBASE-KR",
+ "25GBASE-CR",
+ "25GBASE-SR",
+ "25GBASE-LR"
+ };
+ if (ext && bit_pos > 3) return "Invalid_Ext";
if (bit_pos > 31) return "Invalid";
- return phy_types_str[bit_pos];
+
+ return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
+}
+
+int
+ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ struct i40e_aqc_get_link_status *aq_link_status =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return (EIO);
+ }
+
+ bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
+ return (0);
}
+static char *
+ixl_phy_type_string_ls(u8 val)
+{
+ if (val >= 0x1F)
+ return ixl_phy_type_string(val - 0x1F, true);
+ else
+ return ixl_phy_type_string(val, false);
+}
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- struct i40e_link_status link_status;
- enum i40e_status_code status;
struct sbuf *buf;
int error = 0;
@@ -5091,31 +5309,34 @@ ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
- status = i40e_aq_get_link_info(hw, true, &link_status, NULL);
- if (status) {
- device_printf(dev,
- "%s: i40e_aq_get_link_info() status %s, aq error %s\n",
- __func__, i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ struct i40e_aqc_get_link_status link_status;
+ error = ixl_aq_get_link_status(pf, &link_status);
+ if (error) {
sbuf_delete(buf);
- return (EIO);
+ return (error);
}
+ /* TODO: Add 25G types */
sbuf_printf(buf, "\n"
"PHY Type : 0x%02x<%s>\n"
"Speed : 0x%02x\n"
"Link info: 0x%02x\n"
"AN info : 0x%02x\n"
"Ext info : 0x%02x\n"
+ "Loopback : 0x%02x\n"
"Max Frame: %d\n"
- "Pacing : 0x%02x\n"
- "CRC En? : %s\n",
- link_status.phy_type, ixl_phy_type_string(link_status.phy_type),
+ "Config : 0x%02x\n"
+ "Power : 0x%02x",
+ link_status.phy_type,
+ ixl_phy_type_string_ls(link_status.phy_type),
link_status.link_speed,
- link_status.link_info, link_status.an_info,
- link_status.ext_info, link_status.max_frame_size,
- link_status.pacing,
- (link_status.crc_enable) ? "Yes" : "No");
+ link_status.link_info,
+ link_status.an_info,
+ link_status.ext_info,
+ link_status.loopback,
+ link_status.max_frame_size,
+ link_status.config,
+ link_status.power_desc);
error = sbuf_finish(buf);
if (error)
@@ -5143,7 +5364,7 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
}
status = i40e_aq_get_phy_capabilities(hw,
- TRUE, FALSE, &abilities, NULL);
+ FALSE, FALSE, &abilities, NULL);
if (status) {
device_printf(dev,
"%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
@@ -5161,10 +5382,22 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
sbuf_printf(buf, "<");
for (int i = 0; i < 32; i++)
if ((1 << i) & abilities.phy_type)
- sbuf_printf(buf, "%s,", ixl_phy_type_string(i));
+ sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
sbuf_printf(buf, ">\n");
}
+ sbuf_printf(buf, "PHY Ext : %02x",
+ abilities.phy_type_ext);
+
+ if (abilities.phy_type_ext != 0) {
+ sbuf_printf(buf, "<");
+ for (int i = 0; i < 4; i++)
+ if ((1 << i) & abilities.phy_type_ext)
+ sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
+ sbuf_printf(buf, ">");
+ }
+ sbuf_printf(buf, "\n");
+
sbuf_printf(buf,
"Speed : %02x\n"
"Abilities: %02x\n"
@@ -5172,14 +5405,19 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
"EEER reg : %08x\n"
"D3 Lpan : %02x\n"
"ID : %02x %02x %02x %02x\n"
- "ModType : %02x %02x %02x",
+ "ModType : %02x %02x %02x\n"
+ "ModType E: %01x\n"
+ "FEC Cfg : %02x\n"
+ "Ext CC : %02x",
abilities.link_speed,
abilities.abilities, abilities.eee_capability,
abilities.eeer_val, abilities.d3_lpan,
abilities.phy_id[0], abilities.phy_id[1],
abilities.phy_id[2], abilities.phy_id[3],
abilities.module_type[0], abilities.module_type[1],
- abilities.module_type[2]);
+ abilities.module_type[2], abilities.phy_type_ext >> 5,
+ abilities.phy_type_ext & 0x1F,
+ abilities.ext_comp_code);
error = sbuf_finish(buf);
if (error)
@@ -5252,7 +5490,7 @@ ixl_res_alloc_cmp(const void *a, const void *b)
char *
ixl_switch_res_type_string(u8 type)
{
- char * ixl_switch_res_type_strings[0x14] = {
+ static char * ixl_switch_res_type_strings[0x14] = {
"VEB",
"VSI",
"Perfect Match MAC address",
@@ -5556,3 +5794,283 @@ ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
return (error);
}
+static int
+ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ u64 hena;
+
+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+
+ return sysctl_handle_long(oidp, NULL, hena, req);
+}
+
+/*
+ * Sysctl to disable firmware's link management
+ *
+ * 1 - Disable link management on this port
+ * 0 - Re-enable link management
+ *
+ * On normal NVMs, firmware manages link by default.
+ */
+static int
+ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_mode = -1;
+ enum i40e_status_code status = 0;
+ int error = 0;
+
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &requested_mode, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ /* Check for sane value */
+ if (requested_mode < 0 || requested_mode > 1) {
+ device_printf(dev, "Valid modes are 0 or 1\n");
+ return (EINVAL);
+ }
+
+ /* Set new mode */
+ status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: Error setting new phy debug mode %s,"
+ " aq error: %s\n", __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/*
+ * Sysctl to read a byte from I2C bus.
+ *
+ * Input: 32-bit value:
+ * bits 0-7: device address (0xA0 or 0xA2)
+ * bits 8-15: offset (0-255)
+ * bits 16-31: unused
+ * Output: 8-bit value read
+ */
+static int
+ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ int input = -1, error = 0;
+
+ device_printf(dev, "%s: start\n", __func__);
+
+ u8 dev_addr, offset, output;
+
+ /* Read in I2C read parameters */
+ error = sysctl_handle_int(oidp, &input, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ /* Validate device address */
+ dev_addr = input & 0xFF;
+ if (dev_addr != 0xA0 && dev_addr != 0xA2) {
+ return (EINVAL);
+ }
+ offset = (input >> 8) & 0xFF;
+
+ error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
+ if (error)
+ return (error);
+
+ device_printf(dev, "%02X\n", output);
+ return (0);
+}
+
+/*
+ * Sysctl to write a byte to the I2C bus.
+ *
+ * Input: 32-bit value:
+ * bits 0-7: device address (0xA0 or 0xA2)
+ * bits 8-15: offset (0-255)
+ * bits 16-23: value to write
+ * bits 24-31: unused
+ * Output: 8-bit value written
+ */
+static int
+ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ int input = -1, error = 0;
+
+ u8 dev_addr, offset, value;
+
+ /* Read in I2C write parameters */
+ error = sysctl_handle_int(oidp, &input, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ /* Validate device address */
+ dev_addr = input & 0xFF;
+ if (dev_addr != 0xA0 && dev_addr != 0xA2) {
+ return (EINVAL);
+ }
+ offset = (input >> 8) & 0xFF;
+ value = (input >> 16) & 0xFF;
+
+ error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
+ if (error)
+ return (error);
+
+ device_printf(dev, "%02X written\n", value);
+ return (0);
+}
+
+static int
+ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
+ u8 bit_pos, int *is_set)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ enum i40e_status_code status;
+
+ status = i40e_aq_get_phy_capabilities(hw,
+ FALSE, FALSE, abilities, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return (EIO);
+ }
+
+ *is_set = !!(abilities->phy_type_ext & bit_pos);
+ return (0);
+}
+
+static int
+ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
+ u8 bit_pos, int set)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+
+ /* Set new PHY config */
+ memset(&config, 0, sizeof(config));
+ config.fec_config = abilities->phy_type_ext & ~(bit_pos);
+ if (set)
+ config.fec_config |= bit_pos;
+ if (config.fec_config != abilities->phy_type_ext) {
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.phy_type = abilities->phy_type;
+ config.phy_type_ext = abilities->phy_type_ext;
+ config.link_speed = abilities->link_speed;
+ config.eee_capability = abilities->eee_capability;
+ config.eeer = abilities->eeer_val;
+ config.low_power_ctrl = abilities->d3_lpan;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return (EIO);
+ }
+ }
+
+ return (0);
+}
+
+static int
+ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int mode, error = 0;
+
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
+ if (error)
+ return (error);
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &mode, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
+}
+
+static int
+ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int mode, error = 0;
+
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
+ if (error)
+ return (error);
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &mode, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
+}
+
+static int
+ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int mode, error = 0;
+
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
+ if (error)
+ return (error);
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &mode, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
+}
+
+static int
+ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int mode, error = 0;
+
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
+ if (error)
+ return (error);
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &mode, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
+}
+
+static int
+ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int mode, error = 0;
+
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
+ if (error)
+ return (error);
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &mode, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
+}
+
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index 9a7fae3b055f..7e168dd511bd 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -218,22 +218,27 @@ static inline bool
ixl_tso_detect_sparse(struct mbuf *mp)
{
struct mbuf *m;
- int num = 0, mss;
- bool ret = FALSE;
+ int num, mss;
+ num = 0;
mss = mp->m_pkthdr.tso_segsz;
+
+ /* Exclude first mbuf; assume it contains all headers */
for (m = mp->m_next; m != NULL; m = m->m_next) {
- num++;
- mss -= m->m_len;
- if (mss < 1)
- break;
- if (m->m_next == NULL)
+ if (m == NULL)
break;
+ num++;
+ mss -= m->m_len % mp->m_pkthdr.tso_segsz;
+
+ if (mss < 1) {
+ if (num > IXL_SPARSE_CHAIN)
+ return (true);
+ num = (mss == 0) ? 0 : 1;
+ mss += mp->m_pkthdr.tso_segsz;
+ }
}
- if (num > IXL_SPARSE_CHAIN)
- ret = TRUE;
- return (ret);
+ return (false);
}
@@ -312,18 +317,12 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
error = bus_dmamap_load_mbuf_sg(tag, map,
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
- if (error == ENOMEM) {
- que->tx_dmamap_failed++;
- return (error);
- } else if (error != 0) {
+ if (error != 0) {
que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
}
- } else if (error == ENOMEM) {
- que->tx_dmamap_failed++;
- return (error);
} else if (error != 0) {
que->tx_dmamap_failed++;
m_freem(*m_headp);
@@ -404,8 +403,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
wr32(hw, txr->tail, i);
/* Mark outstanding work */
- if (que->busy == 0)
- que->busy = 1;
+ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
return (0);
xmit_fail:
@@ -524,12 +522,14 @@ ixl_init_tx_ring(struct ixl_queue *que)
txr->next_avail = 0;
txr->next_to_clean = 0;
+ /* Reset watchdog status */
+ txr->watchdog_timer = 0;
+
#ifdef IXL_FDIR
/* Initialize flow director */
txr->atr_rate = ixl_atr_rate;
txr->atr_count = 0;
#endif
-
/* Free any existing tx mbufs. */
buf = txr->buffers;
for (int i = 0; i < que->num_desc; i++, buf++) {
@@ -818,7 +818,11 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
- /* ERJ: this must not be less than 64 */
+ /* TSO MSS must not be less than 64 */
+ if (mp->m_pkthdr.tso_segsz < IXL_MIN_TSO_MSS) {
+ que->mss_too_small++;
+ mp->m_pkthdr.tso_segsz = IXL_MIN_TSO_MSS;
+ }
mss = mp->m_pkthdr.tso_segsz;
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
@@ -878,7 +882,7 @@ ixl_txeof(struct ixl_queue *que)
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
- que->busy = 0;
+ atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
@@ -957,25 +961,10 @@ ixl_txeof(struct ixl_queue *que)
/*
- ** Hang detection, we know there's
- ** work outstanding or the first return
- ** would have been taken, so indicate an
- ** unsuccessful pass, in local_timer if
- ** the value is too great the queue will
- ** be considered hung. If anything has been
- ** cleaned then reset the state.
- */
- if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
- ++que->busy;
-
- if (processed)
- que->busy = 1; /* Note this turns off HUNG */
-
- /*
* If there are no pending descriptors, clear the timeout.
*/
if (txr->avail == que->num_desc) {
- que->busy = 0;
+ atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
@@ -1753,8 +1742,16 @@ next_desc:
/*
* Flush any outstanding LRO work
*/
+#if __FreeBSD_version >= 1100105
tcp_lro_flush_all(lro);
+#else
+ struct lro_entry *queued;
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
#endif
+#endif /* defined(INET6) || defined(INET) */
IXL_RX_UNLOCK(rxr);
return (FALSE);
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index 888b4e0a2bc7..aa5fe5f2ecf6 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -38,7 +38,8 @@
#include "ixlv_vc_mgr.h"
-#define IXLV_AQ_MAX_ERR 200
+#define IXLV_AQ_MAX_ERR 30
+#define IXLV_MAX_INIT_WAIT 120
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index 7b041c18a8d7..9c6b869a13ab 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -178,8 +178,11 @@ ixlv_send_pf_msg(struct ixlv_sc *sc,
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
if (err)
- device_printf(dev, "Unable to send opcode %d to PF, "
- "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
+ device_printf(dev, "Unable to send opcode %s to PF, "
+ "status %s, aq error %s\n",
+ ixl_vc_opcode_str(op),
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -871,7 +874,7 @@ ixlv_set_rss_hena(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_hena hena;
- hena.hena = IXL_DEFAULT_RSS_HENA;
+ hena.hena = IXL_DEFAULT_RSS_HENA_X722;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
@@ -972,8 +975,8 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* Catch-all error response */
if (v_retval) {
device_printf(dev,
- "%s: AQ returned error %d to our request %d!\n",
- __func__, v_retval, v_opcode);
+ "%s: AQ returned error %s to our request %s!\n",
+ __func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
}
#ifdef IXL_DEBUG
@@ -1055,8 +1058,8 @@ ixlv_vc_completion(struct ixlv_sc *sc,
default:
#ifdef IXL_DEBUG
device_printf(dev,
- "%s: Received unexpected message %d from PF.\n",
- __func__, v_opcode);
+ "%s: Received unexpected message %s from PF.\n",
+ __func__, ixl_vc_opcode_str(v_opcode));
#endif
break;
}
diff --git a/sys/modules/ixl/Makefile b/sys/modules/ixl/Makefile
index e962c31da78d..a6b01f11a480 100644
--- a/sys/modules/ixl/Makefile
+++ b/sys/modules/ixl/Makefile
@@ -4,9 +4,9 @@
KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
-SRCS += opt_inet.h opt_inet6.h opt_rss.h
-SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c i40e_osdep.c
-SRCS += ixl_pf_iov.c
+SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
+SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
+SRCS += ixl_pf_iov.c ixl_iw.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
diff --git a/sys/modules/ixlv/Makefile b/sys/modules/ixlv/Makefile
index ac2c2c46edbb..b79d8ccb0394 100644
--- a/sys/modules/ixlv/Makefile
+++ b/sys/modules/ixlv/Makefile
@@ -3,8 +3,8 @@
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixlv
-SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
-SRCS += opt_inet.h opt_inet6.h opt_rss.h
+SRCS = device_if.h bus_if.h pci_if.h
+SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source