aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/conf/files.amd642
-rw-r--r--sys/dev/ixl/i40e_adminq.c45
-rw-r--r--sys/dev/ixl/i40e_adminq.h4
-rw-r--r--sys/dev/ixl/i40e_adminq_cmd.h185
-rw-r--r--sys/dev/ixl/i40e_alloc.h2
-rw-r--r--sys/dev/ixl/i40e_common.c463
-rw-r--r--sys/dev/ixl/i40e_dcb.c1385
-rw-r--r--sys/dev/ixl/i40e_dcb.h224
-rw-r--r--sys/dev/ixl/i40e_devids.h8
-rw-r--r--sys/dev/ixl/i40e_hmc.c3
-rw-r--r--sys/dev/ixl/i40e_hmc.h2
-rw-r--r--sys/dev/ixl/i40e_lan_hmc.c2
-rw-r--r--sys/dev/ixl/i40e_lan_hmc.h2
-rw-r--r--sys/dev/ixl/i40e_nvm.c450
-rw-r--r--sys/dev/ixl/i40e_osdep.c52
-rw-r--r--sys/dev/ixl/i40e_osdep.h5
-rw-r--r--sys/dev/ixl/i40e_prototype.h67
-rw-r--r--sys/dev/ixl/i40e_register.h4
-rw-r--r--sys/dev/ixl/i40e_status.h3
-rw-r--r--sys/dev/ixl/i40e_type.h90
-rw-r--r--sys/dev/ixl/i40e_virtchnl.h424
-rw-r--r--sys/dev/ixl/if_ixl.c162
-rw-r--r--sys/dev/ixl/if_ixlv.c608
-rw-r--r--sys/dev/ixl/ixl.h36
-rw-r--r--sys/dev/ixl/ixl_iw.c41
-rw-r--r--sys/dev/ixl/ixl_iw.h4
-rw-r--r--sys/dev/ixl/ixl_iw_int.h2
-rw-r--r--sys/dev/ixl/ixl_pf.h35
-rw-r--r--sys/dev/ixl/ixl_pf_i2c.c2
-rw-r--r--sys/dev/ixl/ixl_pf_iov.c311
-rw-r--r--sys/dev/ixl/ixl_pf_iov.h6
-rw-r--r--sys/dev/ixl/ixl_pf_main.c1117
-rw-r--r--sys/dev/ixl/ixl_pf_qmgr.c2
-rw-r--r--sys/dev/ixl/ixl_pf_qmgr.h2
-rw-r--r--sys/dev/ixl/ixl_txrx.c590
-rw-r--r--sys/dev/ixl/ixlv.h13
-rw-r--r--sys/dev/ixl/ixlv_vc_mgr.h2
-rw-r--r--sys/dev/ixl/ixlvc.c273
-rw-r--r--sys/dev/ixl/virtchnl.h747
-rw-r--r--sys/modules/ixl/Makefile2
40 files changed, 5490 insertions, 1887 deletions
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 2e85a8603790..68191238ff97 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -290,6 +290,8 @@ dev/ixl/i40e_nvm.c optional ixl pci | ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_adminq.c optional ixl pci | ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
+dev/ixl/i40e_dcb.c optional ixl pci \
+ compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/fdc/fdc.c optional fdc
dev/fdc/fdc_acpi.c optional fdc
dev/fdc/fdc_isa.c optional fdc isa
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index 6e922f5bdb23..9baee72ef1ba 100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -644,6 +644,24 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 7)))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 5)))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -899,8 +917,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- i40e_msec_delay(1);
- total_delay++;
+ i40e_usec_delay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -941,10 +959,15 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -1007,9 +1030,9 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
/* set next_to_use to head */
if (!i40e_is_vf(hw))
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
- if (i40e_is_vf(hw))
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1067,7 +1090,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
diff --git a/sys/dev/ixl/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h
index a0279273bd94..f58dc9828951 100644
--- a/sys/dev/ixl/i40e_adminq.h
+++ b/sys/dev/ixl/i40e_adminq.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -159,7 +159,7 @@ static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 88da079bdf59..90bcad0449a1 100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,8 +41,17 @@
* This file needs to comply with the Linux Kernel coding style.
*/
+
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -202,6 +211,7 @@ enum i40e_admin_queue_opc {
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -241,6 +251,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -248,6 +260,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_progress = 0x0706,
i40e_aqc_opc_oem_post_update = 0x0720,
i40e_aqc_opc_thermal_sensor = 0x0721,
@@ -771,8 +784,52 @@ struct i40e_aqc_set_switch_config {
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ /* Next byte is split into following:
+ * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
+ * Bit 6 : 0 : Destination Port, 1: source port
+ * Bit 5..4 : L4 type
+ * 0: rsvd
+ * 1: TCP
+ * 2: UDP
+ * 3: Both TCP and UDP
+ * Bits 3:0 Mode
+ * 0: default mode
+ * 1: L4 port only mode
+ * 2: non-tunneled mode
+ * 3: tunneled mode
+ */
+#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
+
+#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
+
+#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
+#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
+#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
+
+#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
+#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
+#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
+ u8 mode;
+ u8 rsvd5[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1703,6 +1760,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1721,8 +1780,50 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
-};
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
+};
+
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
@@ -1778,6 +1879,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -1907,19 +2010,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1956,11 +2071,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
+ u8 lb_level;
+#define I40E_AQ_LB_NONE 0
+#define I40E_AQ_LB_MAC 1
+#define I40E_AQ_LB_SERDES 2
+#define I40E_AQ_LB_PHY_INT 3
+#define I40E_AQ_LB_PHY_EXT 4
+#define I40E_AQ_LB_CPVL_PCS 5
+#define I40E_AQ_LB_CPVL_EXT 6
#define I40E_AQ_LB_PHY_LOCAL 0x01
#define I40E_AQ_LB_PHY_REMOTE 0x02
#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
+ u8 lb_type;
+#define I40E_AQ_LB_LOCAL 0
+#define I40E_AQ_LB_FAR 0x01
+ u8 speed;
+#define I40E_AQ_LB_SPEED_NONE 0
+#define I40E_AQ_LB_SPEED_1G 1
+#define I40E_AQ_LB_SPEED_10G 2
+#define I40E_AQ_LB_SPEED_40G 3
+#define I40E_AQ_LB_SPEED_20G 4
+ u8 force_speed;
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
@@ -2002,14 +2134,34 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2269,6 +2421,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
diff --git a/sys/dev/ixl/i40e_alloc.h b/sys/dev/ixl/i40e_alloc.h
index b09570855e58..292016b81fd0 100644
--- a/sys/dev/ixl/i40e_alloc.h
+++ b/sys/dev/ixl/i40e_alloc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index d405cddf4ccd..91b41d73af4a 100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/**
@@ -68,7 +68,6 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
- case I40E_DEV_ID_X722_A0:
case I40E_DEV_ID_KX_X722:
case I40E_DEV_ID_QSFP_X722:
case I40E_DEV_ID_SFP_X722:
@@ -78,11 +77,11 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_A0_VF:
hw->mac.type = I40E_MAC_X722_VF;
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -298,6 +297,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -318,13 +319,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
+ u16 len;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = LE16_TO_CPU(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
@@ -1008,7 +1011,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1242,6 +1246,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1324,6 +1330,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
+
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
@@ -1331,6 +1339,12 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
i40e_msec_delay(1);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@@ -1519,6 +1533,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1567,6 +1582,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1577,9 +1593,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = FALSE;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@@ -1609,35 +1622,58 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
if (!abilities)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_abilities);
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
- if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- if (qualified_modules)
- desc.params.external.param0 |=
+ if (qualified_modules)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
- if (report_init)
- desc.params.external.param0 |=
+ if (report_init)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
- cmd_details);
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
- status = I40E_ERR_UNKNOWN_PHY;
+ if (status != I40E_SUCCESS)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ i40e_msec_delay(1);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status != I40E_SUCCESS)
+ return status;
if (report_init) {
- hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ } else {
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
}
return status;
@@ -1680,6 +1716,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
/**
* i40e_set_fc
* @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
*
* Set the requested flow control mode using set_phy_config.
**/
@@ -1899,7 +1937,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
- hw_link_info->loopback = resp->loopback;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@@ -1930,6 +1968,16 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ __le32 tmp;
+
+ i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
+ I40E_NONDMA_TO_NONDMA);
+ hw->phy.phy_types = LE32_TO_CPU(tmp);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@@ -2069,9 +2117,9 @@ aq_get_partner_advt_exit:
*
* Sets loopback modes.
**/
-enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
- u16 lb_modes,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_lb_mode *cmd =
@@ -2081,7 +2129,11 @@ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_lb_modes);
- cmd->lb_mode = CPU_TO_LE16(lb_modes);
+ cmd->lb_level = lb_level;
+ cmd->lb_type = lb_type;
+ cmd->speed = speed;
+ if (speed)
+ cmd->force_speed = 1;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2607,13 +2659,14 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
* i40e_aq_set_switch_config
* @hw: pointer to the hardware structure
* @flags: bit flag values to set
+ * @mode: cloud filter mode
* @valid_flags: which bit flags to set
* @cmd_details: pointer to command details structure or NULL
*
* Set switch configuration bits
**/
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags, u16 valid_flags,
+ u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2625,7 +2678,12 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
-
+ scfg->mode = mode;
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -2771,6 +2829,10 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
@@ -3019,8 +3081,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
@@ -3080,8 +3142,8 @@ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
@@ -3111,8 +3173,8 @@ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* add_mirrorrule.
* @mr_list: list of mirrored VLAN IDs to be removed
* @cmd_details: pointer to command details structure or NULL
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
@@ -3515,6 +3577,8 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
/**
* i40e_aq_oem_post_update - triggers an OEM specific flow after update
* @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
@@ -3593,9 +3657,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ enum i40e_status_code status;
+ u16 id, ocp_cfg_word0;
u8 major_rev;
u32 i = 0;
- u16 id;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -3887,6 +3952,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
hw->num_ports++;
}
+ /* OCP cards case: if a mezz is removed the ethernet port is at
+ * disabled state in PRTGEN_CNF register. Additional NVM read is
+ * needed in order to check if we are dealing with OCP card.
+ * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
+ * physical ports results in wrong partition id calculation and thus
+ * not supporting WoL.
+ */
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) {
+ status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
+ 2 * I40E_SR_OCP_CFG_WORD0,
+ sizeof(ocp_cfg_word0),
+ &ocp_cfg_word0, TRUE, NULL);
+ if (status == I40E_SUCCESS &&
+ (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
+ hw->num_ports = 4;
+ i40e_release_nvm(hw);
+ }
+ }
+
valid_functions = p->valid_functions;
num_functions = 0;
while (valid_functions) {
@@ -3964,13 +4049,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3991,6 +4077,16 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
cmd->module_pointer = module_pointer;
cmd->offset = CPU_TO_LE32(offset);
cmd->length = CPU_TO_LE16(length);
@@ -4006,6 +4102,28 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_nvm_progress
+ * @hw: pointer to the hw struct
+ * @progress: pointer to progress returned from AQ
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Gets progress of flash rearrangement process
+ **/
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ DEBUGFUNC("i40e_aq_nvm_progress");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ *progress = desc.params.raw[0];
+ return status;
+}
+
+/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -4319,7 +4437,39 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((hw->mac.type != I40E_MAC_XL710) ||
+ ((hw->aq.api_maj_ver < 1) ||
+ ((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6))))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4387,7 +4537,6 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add in Host byte order
- * @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
@@ -5434,7 +5583,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
cmd->etype = CPU_TO_LE16(ethtype);
@@ -5458,10 +5607,10 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: VSI seid to add ethertype filter from
**/
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 seid)
{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -5892,6 +6041,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
*
* Dump internal FW/HW data for debug purposes.
*
@@ -6014,7 +6164,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6059,7 +6209,7 @@ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
@@ -6100,7 +6250,7 @@ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6174,7 +6324,7 @@ phy_read_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -6241,7 +6391,7 @@ phy_write_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -6277,7 +6427,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6312,7 +6462,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
- * @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
@@ -6399,6 +6548,64 @@ phy_blinking_end:
}
/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ *reg_val = 0;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)reg_val);
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)reg_val);
+ }
+
+ return status;
+}
+
+/**
* i40e_led_get_phy - return current on/off mode
* @hw: pointer to the hw struct
* @led_addr: address of led register to use
@@ -6410,17 +6617,23 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
{
enum i40e_status_code status = I40E_SUCCESS;
u16 gpio_led_port;
+ u32 reg_val_aq;
+ u16 temp_addr;
u8 phy_addr = 0;
u16 reg_val;
- u16 temp_addr;
- u8 port_num;
- u32 i;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status == I40E_SUCCESS)
+ *val = (u16)reg_val_aq;
+ return status;
+ }
temp_addr = I40E_PHY_LED_PROV_REG_1;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
+ phy_addr = i40e_get_phy_address(hw, hw->port);
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
status = i40e_read_phy_register_clause45(hw,
@@ -6442,7 +6655,9 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: TRUE or FALSE
+ * @led_addr: address of led register to use
* @mode: original val plus bit for set or ignore
+ *
* Set led's on or off when controlled by the PHY
*
**/
@@ -6450,51 +6665,37 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
enum i40e_status_code status = I40E_SUCCESS;
- u16 led_ctl = 0;
- u16 led_reg = 0;
- u8 phy_addr = 0;
- u8 port_num;
- u32 i;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
}
return status;
+
restore_config:
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
return status;
}
@@ -6544,7 +6745,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -6603,7 +6806,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -6621,6 +6826,76 @@ do_retry:
}
/**
+ * i40e_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+ cmd->reg_value = CPU_TO_LE32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = LE32_TO_CPU(cmd->reg_value);
+
+ return status;
+}
+
+
+/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
@@ -6634,7 +6909,7 @@ do_retry:
* completion before returning.
**/
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -6673,9 +6948,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -6684,20 +6959,18 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
- hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
- hw->dev_caps.iwarp = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.iwarp = (msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
i40e_memcpy(hw->mac.perm_addr,
vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
}
vsi_res++;
@@ -6714,14 +6987,14 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
/**
* i40e_aq_set_arp_proxy_config
* @hw: pointer to the HW structure
- * @proxy_config - pointer to proxy config command table struct
+ * @proxy_config: pointer to proxy config command table struct
* @cmd_details: pointer to command details
*
* Set ARP offload parameters from pre-populated
diff --git a/sys/dev/ixl/i40e_dcb.c b/sys/dev/ixl/i40e_dcb.c
new file mode 100644
index 000000000000..46ea4e184f42
--- /dev/null
+++ b/sys/dev/ixl/i40e_dcb.c
@@ -0,0 +1,1385 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i;
+
+ typelength = I40E_NTOHS(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ switch (selector) {
+ case I40E_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ break;
+ case I40E_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = I40E_NTOHS(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += I40E_LLDP_MIB_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ /* Add APPs if Error is False */
+ if (!err) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FCoE APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* iSCSI APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* FIP APP */
+ dcbcfg->app[i].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
+ }
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)))
+ return i40e_get_ieee_dcb_config(hw);
+
+ /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE16_TO_CPU(cee_v1_cfg.tlv_status);
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (ret == I40E_SUCCESS) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ LE32_TO_CPU(cee_cfg.tlv_status);
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret != I40E_SUCCESS)
+ goto out;
+
+ /* Get CEE DCB Desired Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->desired_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = I40E_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = I40E_HTONS(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = I40E_HTONS(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 typelength;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + 2;
+ /* END TLV or beyond LLDPDU size */
+ if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+ (offset > I40E_LLDPDU_SIZE))
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ }
+ *miblen = offset;
+ return ret;
+}
+
+
+/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ enum i40e_status_code ret;
+ __le16 raw_mem;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+ TRUE, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ mem = LE16_TO_CPU(raw_mem);
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+ TRUE, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ mem = LE16_TO_CPU(raw_mem);
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ TRUE, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 mem;
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, TRUE, NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
+
+ return ret;
+}
diff --git a/sys/dev/ixl/i40e_dcb.h b/sys/dev/ixl/i40e_dcb.h
new file mode 100644
index 000000000000..8bb011623c1c
--- /dev/null
+++ b/sys/dev/ixl/i40e_dcb.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_OFFLOAD_DISABLED 0
+#define I40E_DCBX_OFFLOAD_ENABLED 1
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_ADMINSTATUS_DISABLED 0
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
+#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_MIB_HLEN 14
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_CBS 1
+#define I40E_IEEE_TSA_ETS 2
+#define I40E_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP TLV structure */
+struct i40e_lldp_generic_tlv {
+ __be16 typelength;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+/*
+ * TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct i40e_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct i40e_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
+
+#endif /* _I40E_DCB_H_ */
diff --git a/sys/dev/ixl/i40e_devids.h b/sys/dev/ixl/i40e_devids.h
index 12ba99f83823..939bbcd5319e 100644
--- a/sys/dev/ixl/i40e_devids.h
+++ b/sys/dev/ixl/i40e_devids.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -54,8 +54,7 @@
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
-#define I40E_DEV_ID_X722_A0 0x374C
-#define I40E_DEV_ID_X722_A0_VF 0x374D
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
@@ -68,4 +67,7 @@
(d) == I40E_DEV_ID_QSFP_B || \
(d) == I40E_DEV_ID_QSFP_C)
+#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \
+ (d) == I40E_DEV_ID_25G_SFP28)
+
#endif /* _I40E_DEVIDS_H_ */
diff --git a/sys/dev/ixl/i40e_hmc.c b/sys/dev/ixl/i40e_hmc.c
index 4f0de109c341..f8072751d9d6 100644
--- a/sys/dev/ixl/i40e_hmc.c
+++ b/sys/dev/ixl/i40e_hmc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -211,7 +211,6 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
- * @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table
diff --git a/sys/dev/ixl/i40e_hmc.h b/sys/dev/ixl/i40e_hmc.h
index 1a9995ab6afc..398332effa28 100644
--- a/sys/dev/ixl/i40e_hmc.h
+++ b/sys/dev/ixl/i40e_hmc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/i40e_lan_hmc.c b/sys/dev/ixl/i40e_lan_hmc.c
index a6716a913ce0..5472352b5d34 100644
--- a/sys/dev/ixl/i40e_lan_hmc.c
+++ b/sys/dev/ixl/i40e_lan_hmc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/i40e_lan_hmc.h b/sys/dev/ixl/i40e_lan_hmc.h
index cce4a3931c9f..e6d0ab6644c9 100644
--- a/sys/dev/ixl/i40e_lan_hmc.h
+++ b/sys/dev/ixl/i40e_lan_hmc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
index 3d36c6433a9e..81fd5a97b3f9 100644
--- a/sys/dev/ixl/i40e_nvm.c
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,18 +34,6 @@
#include "i40e_prototype.h"
-enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command);
-
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
@@ -208,52 +196,6 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @data: word read from the Shadow RAM
- *
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
- **/
-enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- enum i40e_status_code ret_code = I40E_SUCCESS;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- }
- i40e_release_nvm(hw);
- }
- return ret_code;
-}
-
-/**
- * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @data: word read from the Shadow RAM
- *
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
- **/
-enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset,
- u16 *data)
-{
- enum i40e_status_code ret_code = I40E_SUCCESS;
-
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- return ret_code;
-}
-
-/**
* i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -304,15 +246,68 @@ read_nvm_exit:
}
/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
@@ -325,55 +320,49 @@ enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
}
/**
- * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
* @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads one 16 bit word from the Shadow RAM.
+ *
+ * Do not use this function except in cases where the nvm lock is already
+ * taken via i40e_acquire_nvm().
**/
-enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset,
- u16 *words, u16 *data)
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
{
- enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
- else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- return ret_code;
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
+ * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
* @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads one 16 bit word from the Shadow RAM.
**/
-enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
- data);
- i40e_release_nvm(hw);
- }
- } else {
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- }
+
+ if (ret_code)
+ return ret_code;
+ ret_code = __i40e_read_nvm_word(hw, offset, data);
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
@@ -388,8 +377,8 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 index, word;
@@ -421,8 +410,8 @@ enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
enum i40e_status_code ret_code;
u16 read_size = *words;
@@ -470,57 +459,55 @@ read_nvm_buffer_aq_exit:
}
/**
- * i40e_read_nvm_aq - Read Shadow RAM.
- * @hw: pointer to the HW structure.
- * @module_pointer: module pointer location in words from the NVM beginning
- * @offset: offset in words from module start
- * @words: number of words to write
- * @data: buffer with words to write to the Shadow RAM
- * @last_command: tells the AdminQ that this is the last command
+ * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
*
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method.
**/
-enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset,
+ u16 *words, u16 *data)
{
- enum i40e_status_code ret_code = I40E_ERR_NVM;
- struct i40e_asq_cmd_details cmd_details;
-
- DEBUGFUNC("i40e_read_nvm_aq");
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
- /* Here we are checking the SR limit only for the flat memory model.
- * We cannot do it for the module-based model, as we did not acquire
- * the NVM resource yet (we cannot get the module pointer value).
- * Firmware will check the module-based model.
- */
- if ((offset + words) > hw->nvm.sr_size)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
- (offset + words), hw->nvm.sr_size);
- else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
- /* We can write only up to 4KB (one sector), in one AQ write */
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write fail error: tried to write %d words, limit is %d.\n",
- words, I40E_SR_SECTOR_SIZE_IN_WORDS);
- else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
- != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
- /* A single write cannot spread over two sectors */
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
- offset, words);
- else
- ret_code = i40e_aq_read_nvm(hw, module_pointer,
- 2 * offset, /*bytes*/
- 2 * words, /*bytes*/
- data, last_command, &cmd_details);
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
return ret_code;
}
+
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
@@ -562,7 +549,8 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ data, last_command, 0,
+ &cmd_details);
return ret_code;
}
@@ -651,16 +639,14 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
data = (u16 *)vmem.va;
/* read pointer to VPD area */
- ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
- &vpd_module);
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = __i40e_read_nvm_word(hw,
- I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -750,19 +736,19 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- i40e_release_nvm(hw);
- if (ret_code != I40E_SUCCESS)
- goto i40e_validate_nvm_checksum_exit;
- } else {
- goto i40e_validate_nvm_checksum_exit;
- }
-
- i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ /* We must acquire the NVM lock in order to correctly synchronize the
+ * NVM accesses across multiple PFs. Without doing so it is possible
+ * for one of the PFs to read invalid data potentially indicating that
+ * the checksum is invalid.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ return ret_code;
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_release_nvm(hw);
+ if (ret_code)
+ return ret_code;
/* Verify read checksum from EEPROM is the same as
* calculated checksum
@@ -774,7 +760,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
if (checksum)
*checksum = checksum_local;
-i40e_validate_nvm_checksum_exit:
return ret_code;
}
@@ -805,6 +790,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
+static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static INLINE u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -814,6 +802,12 @@ static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@@ -831,6 +825,7 @@ static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@@ -900,6 +895,15 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -919,8 +923,9 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
- return I40E_SUCCESS;
+ i40e_nvmupd_clear_wait_state(hw);
+ status = I40E_SUCCESS;
+ break;
}
status = I40E_ERR_NOT_READY;
@@ -935,6 +940,8 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
+
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
@@ -1064,6 +1071,10 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@@ -1242,39 +1253,55 @@ retry:
}
/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
- * @opcode: the event that just happened
**/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = FALSE;
- }
- hw->nvm_wait_opcode = 0;
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = FALSE;
+ }
+ hw->nvm_wait_opcode = 0;
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_memcpy(&hw->nvm_aq_event_desc, desc,
+ aq_desc_len, I40E_NONDMA_TO_NONDMA);
+ i40e_nvmupd_clear_wait_state(hw);
}
}
@@ -1332,6 +1359,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
break;
@@ -1394,6 +1424,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return I40E_SUCCESS;
+
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -1430,6 +1463,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@@ -1439,6 +1475,7 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
}
/* should we wait for a followup event? */
@@ -1520,6 +1557,41 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
}
/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1614,18 +1686,20 @@ static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
- &cmd_details);
+ preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index a2b25ccf1437..35031ab000f2 100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -59,6 +59,8 @@ i40e_status
i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
{
free(mem->va, M_DEVBUF);
+ mem->va = NULL;
+
return(0);
}
@@ -207,47 +209,47 @@ const char *
ixl_vc_opcode_str(uint16_t op)
{
switch (op) {
- case I40E_VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_VERSION:
return ("VERSION");
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
return ("RESET_VF");
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
return ("GET_VF_RESOURCES");
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
return ("CONFIG_TX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
return ("CONFIG_RX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
return ("CONFIG_VSI_QUEUES");
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
return ("CONFIG_IRQ_MAP");
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
return ("ENABLE_QUEUES");
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
return ("DISABLE_QUEUES");
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- return ("ADD_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- return ("DEL_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ return ("ADD_ETH_ADDR");
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ return ("DEL_ETH_ADDR");
+ case VIRTCHNL_OP_ADD_VLAN:
return ("ADD_VLAN");
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
return ("DEL_VLAN");
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
return ("CONFIG_PROMISCUOUS_MODE");
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
return ("GET_STATS");
- case I40E_VIRTCHNL_OP_FCOE:
- return ("FCOE");
- case I40E_VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_RSVD:
+ return ("RSVD");
+ case VIRTCHNL_OP_EVENT:
return ("EVENT");
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
return ("CONFIG_RSS_KEY");
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
return ("CONFIG_RSS_LUT");
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
return ("GET_RSS_HENA_CAPS");
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ case VIRTCHNL_OP_SET_RSS_HENA:
return ("SET_RSS_HENA");
default:
return ("UNKNOWN");
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index 5467745ba857..ec1f5b1552a8 100644
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -239,4 +239,7 @@ ixl_flush_osdep(struct i40e_osdep *osdep)
#define ixl_flush(a) ixl_flush_osdep((a)->back)
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+
#endif /* _I40E_OSDEP_H_ */
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index 22b264d095e1..2f675231b9a7 100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -140,8 +140,9 @@ enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
u64 *advt_reg,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@@ -226,7 +227,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags, u16 valid_flags,
+ u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
@@ -261,7 +262,9 @@ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@@ -288,6 +291,10 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -459,7 +466,9 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@@ -471,6 +480,37 @@ static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static INLINE enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW spinlocks */
void i40e_init_spinlock(struct i40e_spinlock *sp);
void i40e_acquire_spinlock(struct i40e_spinlock *sp);
@@ -479,10 +519,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -508,6 +548,15 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index fb41ea23a3d0..6e74929a505b 100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -2803,7 +2803,7 @@
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/sys/dev/ixl/i40e_status.h b/sys/dev/ixl/i40e_status.h
index 08f09d0c8a25..bed25f14b436 100644
--- a/sys/dev/ixl/i40e_status.h
+++ b/sys/dev/ixl/i40e_status.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -103,6 +103,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index 73af9653b5d4..f36824860206 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Check whether address is multicast. */
#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
@@ -81,7 +84,7 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
+#define ETH_ALEN 6
/* Data type manipulation macros. */
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
@@ -185,9 +188,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0005
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
@@ -257,6 +257,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 req_fec_info;
u8 fec_info;
u8 ext_info;
u8 loopback;
@@ -340,6 +341,10 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -360,6 +365,15 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define I40E_CLOUD_FILTER_MODE1 0x6
+#define I40E_CLOUD_FILTER_MODE2 0x7
+#define I40E_CLOUD_FILTER_MODE3 0x8
+
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
@@ -427,10 +441,10 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -472,6 +486,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -491,15 +506,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -515,6 +536,19 @@ struct i40e_nvm_access {
u8 data[1];
};
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_TYPE_ADDR 0x00
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DIAG_CAPAB 0x40
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
@@ -669,6 +703,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -689,8 +724,16 @@ struct i40e_hw {
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1433,7 +1476,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_PE_IMAGE_PTR 0x0C
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
@@ -1474,6 +1518,11 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
+#define I40E_SR_OCP_CFG_WORD0 0x2B
+#define I40E_SR_OCP_ENABLED BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1589,7 +1638,8 @@ enum i40e_reset_type {
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xD
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;
diff --git a/sys/dev/ixl/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h
deleted file mode 100644
index 32af9c8e28c3..000000000000
--- a/sys/dev/ixl/i40e_virtchnl.h
+++ /dev/null
@@ -1,424 +0,0 @@
-/******************************************************************************
-
- Copyright (c) 2013-2015, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum i40e_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the rss fields in
- * the vf resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
-#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index f814855c187c..105bc9ce4972 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,13 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.7.12-k";
+#define IXL_DRIVER_VERSION_MAJOR 1
+#define IXL_DRIVER_VERSION_MINOR 9
+#define IXL_DRIVER_VERSION_BUILD 9
+
+char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."
+ __XSTRING(IXL_DRIVER_VERSION_MINOR) "."
+ __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k";
/*********************************************************************
* PCI Device ID Table
@@ -86,7 +92,7 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
*********************************************************************/
static char *ixl_strings[] = {
- "Intel(R) Ethernet Connection XL710/X722 Driver"
+ "Intel(R) Ethernet Connection 700 Series PF Driver"
};
@@ -99,7 +105,6 @@ static int ixl_detach(device_t);
static int ixl_shutdown(device_t);
static int ixl_save_pf_tunables(struct ixl_pf *);
-static int ixl_attach_get_link_status(struct ixl_pf *);
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -151,13 +156,18 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
"Enable MSI-X interrupts");
/*
-** Number of descriptors per ring:
-** - TX and RX are the same size
+** Number of descriptors per ring
+** - TX and RX sizes are independently configurable
*/
-static int ixl_ring_size = IXL_DEFAULT_RING;
-TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
-SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
- &ixl_ring_size, 0, "Descriptor Ring Size");
+static int ixl_tx_ring_size = IXL_DEFAULT_RING;
+TUNABLE_INT("hw.ixl.tx_ring_size", &ixl_tx_ring_size);
+SYSCTL_INT(_hw_ixl, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
+ &ixl_tx_ring_size, 0, "TX Descriptor Ring Size");
+
+static int ixl_rx_ring_size = IXL_DEFAULT_RING;
+TUNABLE_INT("hw.ixl.rx_ring_size", &ixl_rx_ring_size);
+SYSCTL_INT(_hw_ixl, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
+ &ixl_rx_ring_size, 0, "RX Descriptor Ring Size");
/*
** This can be set manually, if left as 0 the
@@ -169,6 +179,10 @@ TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
&ixl_max_queues, 0, "Number of Queues");
+/*
+ * Leave this on unless you need to send flow control
+ * frames (or other control frames) from software
+ */
static int ixl_enable_tx_fc_filter = 1;
TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
&ixl_enable_tx_fc_filter);
@@ -176,6 +190,17 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
&ixl_enable_tx_fc_filter, 0,
"Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
+/*
+ * Different method for processing TX descriptor
+ * completion.
+ */
+static int ixl_enable_head_writeback = 1;
+TUNABLE_INT("hw.ixl.enable_head_writeback",
+ &ixl_enable_head_writeback);
+SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
+ &ixl_enable_head_writeback, 0,
+ "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
+
static int ixl_core_debug_mask = 0;
TUNABLE_INT("hw.ixl.core_debug_mask",
&ixl_core_debug_mask);
@@ -218,6 +243,17 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
#ifdef IXL_IW
int ixl_enable_iwarp = 0;
TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
+SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
+ &ixl_enable_iwarp, 0, "iWARP enabled");
+
+#if __FreeBSD_version < 1100000
+int ixl_limit_iwarp_msix = 1;
+#else
+int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
+#endif
+TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
+SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
+ &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP");
#endif
#ifdef DEV_NETMAP
@@ -275,30 +311,6 @@ ixl_probe(device_t dev)
return (ENXIO);
}
-static int
-ixl_attach_get_link_status(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int error = 0;
-
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
- return error;
- }
- }
-
- /* Determine link state */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- return (0);
-}
-
/*
* Sanity check and save off tunable values.
*/
@@ -315,20 +327,16 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
pf->dbg_mask = ixl_core_debug_mask;
pf->hw.debug_mask = ixl_shared_debug_mask;
+#ifdef DEV_NETMAP
+ if (ixl_enable_head_writeback == 0)
+ device_printf(dev, "Head writeback mode cannot be disabled "
+ "when netmap is enabled\n");
+ pf->vsi.enable_head_writeback = 1;
+#else
+ pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
+#endif
- if (ixl_ring_size < IXL_MIN_RING
- || ixl_ring_size > IXL_MAX_RING
- || ixl_ring_size % IXL_RING_INCREMENT != 0) {
- device_printf(dev, "Invalid ring_size value of %d set!\n",
- ixl_ring_size);
- device_printf(dev, "ring_size must be between %d and %d, "
- "inclusive, and must be a multiple of %d\n",
- IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
- device_printf(dev, "Using default value of %d instead\n",
- IXL_DEFAULT_RING);
- pf->ringsz = IXL_DEFAULT_RING;
- } else
- pf->ringsz = ixl_ring_size;
+ ixl_vsi_setup_rings_size(&pf->vsi, ixl_tx_ring_size, ixl_rx_ring_size);
if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
device_printf(dev, "Invalid tx_itr value of %d set!\n",
@@ -389,6 +397,7 @@ ixl_attach(device_t dev)
*/
vsi = &pf->vsi;
vsi->dev = pf->dev;
+ vsi->back = pf;
/* Save tunable values */
error = ixl_save_pf_tunables(pf);
@@ -427,12 +436,6 @@ ixl_attach(device_t dev)
goto err_out;
}
- /*
- * Allocate interrupts and figure out number of queues to use
- * for PF interface
- */
- pf->msix = ixl_init_msix(pf);
-
/* Set up the admin queue */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
@@ -450,23 +453,24 @@ ixl_attach(device_t dev)
if (status == I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "The driver for the device stopped "
- "because the NVM image is newer than expected.\n"
- "You must install the most recent version of "
+ "because the NVM image is newer than expected.\n");
+ device_printf(dev, "You must install the most recent version of "
"the network driver.\n");
error = EIO;
goto err_out;
}
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+ hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
device_printf(dev, "The driver for the device detected "
- "a newer version of the NVM image than expected.\n"
- "Please install the most recent version of the network driver.\n");
- else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+ "a newer version of the NVM image than expected.\n");
+ device_printf(dev, "Please install the most recent version "
+ "of the network driver.\n");
+ } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
device_printf(dev, "The driver for the device detected "
- "an older version of the NVM image than expected.\n"
- "Please update the NVM image.\n");
+ "an older version of the NVM image than expected.\n");
+ device_printf(dev, "Please update the NVM image.\n");
+ }
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -478,6 +482,12 @@ ixl_attach(device_t dev)
goto err_get_cap;
}
+ /*
+ * Allocate interrupts and figure out number of queues to use
+ * for PF interface
+ */
+ pf->msix = ixl_init_msix(pf);
+
/* Set up host memory cache */
status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
@@ -513,8 +523,10 @@ ixl_attach(device_t dev)
/* Disable LLDP from the firmware for certain NVM versions */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
- (pf->hw.aq.fw_maj_ver < 4))
+ (pf->hw.aq.fw_maj_ver < 4)) {
i40e_aq_stop_lldp(hw, TRUE, NULL);
+ pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
+ }
/* Get MAC addresses from hardware */
i40e_get_mac_addr(hw, hw->mac.addr);
@@ -526,6 +538,14 @@ ixl_attach(device_t dev)
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+ /* Query device FW LLDP status */
+ ixl_get_fw_lldp_status(pf);
+ /* Tell FW to apply DCB config on link up */
+ if ((hw->mac.type != I40E_MAC_X722)
+ && ((pf->hw.aq.api_maj_ver > 1)
+ || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7)))
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
/* Initialize mac filter list for VSI */
SLIST_INIT(&vsi->ftl);
@@ -619,7 +639,7 @@ ixl_attach(device_t dev)
}
/* Set initial advertised speed sysctl value */
- ixl_get_initial_advertised_speeds(pf);
+ ixl_set_initial_advertised_speeds(pf);
/* Initialize statistics & add sysctls */
ixl_add_device_sysctls(pf);
@@ -639,7 +659,13 @@ ixl_attach(device_t dev)
#endif
#ifdef DEV_NETMAP
- ixl_netmap_attach(vsi);
+ if (vsi->num_rx_desc == vsi->num_tx_desc) {
+ vsi->queues[0].num_desc = vsi->num_rx_desc;
+ ixl_netmap_attach(vsi);
+ } else
+ device_printf(dev,
+ "Netmap is not supported when RX and TX descriptor ring sizes differ\n");
+
#endif /* DEV_NETMAP */
#ifdef IXL_IW
@@ -652,7 +678,8 @@ ixl_attach(device_t dev)
"interfacing to iwarp driver failed: %d\n",
error);
goto err_late;
- }
+ } else
+ device_printf(dev, "iWARP ready\n");
} else
device_printf(dev,
"iwarp disabled on this device (no msix vectors)\n");
@@ -718,6 +745,9 @@ ixl_detach(device_t dev)
}
#endif
+ /* Remove all previously allocated media types */
+ ifmedia_removeall(&vsi->media);
+
ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
ixl_stop(pf);
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index 390ef51e5ce8..5c86b93e7ff2 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,13 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.4.12-k";
+#define IXLV_DRIVER_VERSION_MAJOR 1
+#define IXLV_DRIVER_VERSION_MINOR 5
+#define IXLV_DRIVER_VERSION_BUILD 4
+
+char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
+ __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
+ __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
/*********************************************************************
* PCI Device ID Table
@@ -54,7 +60,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
- {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -64,7 +70,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
*********************************************************************/
static char *ixlv_strings[] = {
- "Intel(R) Ethernet Connection XL710/X722 VF Driver"
+ "Intel(R) Ethernet Connection 700 Series VF Driver"
};
@@ -86,6 +92,7 @@ static void ixlv_config_rss(struct ixlv_sc *);
static void ixlv_stop(struct ixlv_sc *);
static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
+static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
@@ -129,6 +136,9 @@ static int ixlv_vf_config(struct ixlv_sc *);
static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
+static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
+static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
+
static void ixlv_add_sysctls(struct ixlv_sc *);
#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -167,12 +177,17 @@ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
/*
** Number of descriptors per ring:
-** - TX and RX are the same size
+** - TX and RX sizes are independently configurable
*/
-static int ixlv_ringsz = IXL_DEFAULT_RING;
-TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
-SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
- &ixlv_ringsz, 0, "Descriptor Ring Size");
+static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
+TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
+ &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
+
+static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
+TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
+ &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
/* Set to zero to auto calculate */
int ixlv_max_queues = 0;
@@ -192,6 +207,17 @@ SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
&ixlv_txbrsz, 0, "TX Buf Ring Size");
/*
+ * Different method for processing TX descriptor
+ * completion.
+ */
+static int ixlv_enable_head_writeback = 0;
+TUNABLE_INT("hw.ixlv.enable_head_writeback",
+ &ixlv_enable_head_writeback);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
+ &ixlv_enable_head_writeback, 0,
+ "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
+
+/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
** - default values for static ITR
@@ -300,6 +326,9 @@ ixlv_attach(device_t dev)
/* Allocate filter lists */
ixlv_init_filters(sc);
+ /* Save this tunable */
+ vsi->enable_head_writeback = ixlv_enable_head_writeback;
+
/* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
@@ -402,7 +431,9 @@ ixlv_attach(device_t dev)
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
- sc->link_up = TRUE;
+ vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
+
+ ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
@@ -412,27 +443,32 @@ ixlv_attach(device_t dev)
goto out;
}
- /* Setup the stack interface */
- if (ixlv_setup_interface(dev, sc) != 0) {
- device_printf(dev, "%s: setup interface failed!\n",
+ /* Do queue interrupt setup */
+ if (ixlv_assign_msix(sc) != 0) {
+ device_printf(dev, "%s: allocating queue interrupts failed!\n",
__func__);
- error = EIO;
+ error = ENXIO;
goto out;
}
- INIT_DBG_DEV(dev, "Queue memory and interface setup");
+ INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
- /* Do queue interrupt setup */
- if (ixlv_assign_msix(sc) != 0) {
- device_printf(dev, "%s: allocating queue interrupts failed!\n",
+ /* Setup the stack interface */
+ if (ixlv_setup_interface(dev, sc) != 0) {
+ device_printf(dev, "%s: setup interface failed!\n",
__func__);
- error = ENXIO;
+ error = EIO;
goto out;
}
+ INIT_DBG_DEV(dev, "Interface setup complete");
+
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
+ /* We expect a link state message, so schedule the AdminQ task now */
+ taskqueue_enqueue(sc->tq, &sc->aq_irq);
+
/* Initialize stats */
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
ixlv_add_sysctls(sc);
@@ -456,6 +492,7 @@ ixlv_attach(device_t dev)
out:
ixlv_free_queues(vsi);
+ ixlv_teardown_adminq_msix(sc);
err_res_buf:
free(sc->vf_res, M_DEVBUF);
err_aq:
@@ -495,6 +532,9 @@ ixlv_detach(device_t dev)
return (EBUSY);
}
+ /* Remove all the media and link information */
+ ifmedia_removeall(&sc->media);
+
/* Stop driver */
ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
@@ -525,8 +565,8 @@ ixlv_detach(device_t dev)
if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
- ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
+ ixlv_free_pci_resources(sc);
ixlv_free_filters(sc);
bus_generic_detach(dev);
@@ -1138,8 +1178,8 @@ retry_config:
retried + 1);
if (!sc->vf_res) {
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
if (!sc->vf_res) {
device_printf(dev,
@@ -1318,29 +1358,10 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
-
sc->hw.back = &sc->osdep;
- /*
- ** Explicitly set the guest PCI BUSMASTER capability
- ** and we must rewrite the ENABLE in the MSIX control
- ** register again at this point to cause the host to
- ** successfully initialize us.
- **
- ** This must be set before accessing any registers.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
+ ixl_set_busmaster(dev);
+ ixl_set_msix_enable(dev);
/* Disable adminq interrupts (just in case) */
ixlv_disable_adminq_irq(&sc->hw);
@@ -1348,33 +1369,37 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
return (0);
}
+/*
+ * Free MSI-X related resources for a single queue
+ */
static void
-ixlv_free_pci_resources(struct ixlv_sc *sc)
+ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
{
- struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
- /* We may get here before stations are setup */
- if (que == NULL)
- goto early;
-
/*
** Release all msix queue resources:
*/
- for (int i = 0; i < vsi->num_queues; i++, que++) {
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
+ }
+ if (que->res != NULL) {
int rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- que->res = NULL;
- }
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ que->res = NULL;
}
-
-early:
+ if (que->tq != NULL) {
+ taskqueue_free(que->tq);
+ que->tq = NULL;
+ }
+}
+
+static void
+ixlv_free_pci_resources(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+
pci_release_msi(dev);
if (sc->msix_mem != NULL)
@@ -1437,7 +1462,7 @@ ixlv_assign_msix(struct ixlv_sc *sc)
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixlv_msix_que, que, &que->tag);
if (error) {
- que->res = NULL;
+ que->tag = NULL;
device_printf(dev, "Failed to register que handler");
return (error);
}
@@ -1518,8 +1543,8 @@ ixlv_reset_complete(struct i40e_hw *hw)
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((reg == I40E_VFR_VFACTIVE) ||
- (reg == I40E_VFR_COMPLETED))
+ if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
+ (reg == VIRTCHNL_VFR_COMPLETED))
return (0);
i40e_msec_pause(100);
}
@@ -1552,7 +1577,11 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
+#if __FreeBSD_version >= 1100000
ifp->if_baudrate = IF_Gbps(40);
+#else
+ if_initbaudrate(ifp, IF_Gbps(40));
+#endif
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1565,7 +1594,7 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+ ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
ether_ifattach(ifp, sc->hw.mac.addr);
@@ -1573,6 +1602,10 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
+ ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
+ ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
+ ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
+
/*
* Tell the upper layer(s) we support long frames.
*/
@@ -1607,7 +1640,12 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
ixlv_media_status);
- // JFV Add media types later?
+ /* Media types based on reported link speed over AdminQ */
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
@@ -1617,6 +1655,116 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
}
/*
+** Allocate and setup a single queue
+*/
+static int
+ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
+{
+ device_t dev = sc->dev;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rsize, tsize;
+ int error = I40E_SUCCESS;
+
+ txr = &que->txr;
+ txr->que = que;
+ txr->tail = I40E_QTX_TAIL1(que->me);
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+ /*
+ * Create the TX descriptor ring
+ *
+ * In Head Writeback mode, the descriptor ring is one bigger
+ * than the number of descriptors for space for the HW to
+ * write back index of last completed descriptor.
+ */
+ if (sc->vsi.enable_head_writeback) {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ } else {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)), DBA_ALIGN);
+ }
+ if (i40e_allocate_dma_mem(&sc->hw,
+ &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_destroy_tx_mtx;
+ }
+ txr->base = (struct i40e_tx_desc *)txr->dma.va;
+ bzero((void *)txr->base, tsize);
+ /* Now allocate transmit soft structs for the ring */
+ if (ixl_allocate_tx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up TX structures\n");
+ error = ENOMEM;
+ goto err_free_tx_dma;
+ }
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
+ M_WAITOK, &txr->mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up TX buf ring\n");
+ error = ENOMEM;
+ goto err_free_tx_data;
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(que->num_rx_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ rxr = &que->rxr;
+ rxr->que = que;
+ rxr->tail = I40E_QRX_TAIL1(que->me);
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (i40e_allocate_dma_mem(&sc->hw,
+ &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
+ device_printf(dev,
+ "Unable to allocate RX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_destroy_rx_mtx;
+ }
+ rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+ bzero((void *)rxr->base, rsize);
+
+ /* Allocate receive soft structs for the ring */
+ if (ixl_allocate_rx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up receive structs\n");
+ error = ENOMEM;
+ goto err_free_rx_dma;
+ }
+
+ return (0);
+
+err_free_rx_dma:
+ i40e_free_dma_mem(&sc->hw, &rxr->dma);
+err_destroy_rx_mtx:
+ mtx_destroy(&rxr->mtx);
+ /* err_free_tx_buf_ring */
+ buf_ring_free(txr->br, M_DEVBUF);
+err_free_tx_data:
+ ixl_free_que_tx(que);
+err_free_tx_dma:
+ i40e_free_dma_mem(&sc->hw, &txr->dma);
+err_destroy_tx_mtx:
+ mtx_destroy(&txr->mtx);
+
+ return (error);
+}
+
+/*
** Allocate and setup the interface queues
*/
static int
@@ -1625,9 +1773,7 @@ ixlv_setup_queues(struct ixlv_sc *sc)
device_t dev = sc->dev;
struct ixl_vsi *vsi;
struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
+ int i;
int error = I40E_SUCCESS;
vsi = &sc->vsi;
@@ -1640,104 +1786,30 @@ ixlv_setup_queues(struct ixlv_sc *sc)
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
+ return ENOMEM;
}
- for (int i = 0; i < vsi->num_queues; i++) {
+ for (i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
- que->num_desc = ixlv_ringsz;
+ que->num_tx_desc = vsi->num_tx_desc;
+ que->num_rx_desc = vsi->num_rx_desc;
que->me = i;
que->vsi = vsi;
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL1(que->me);
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /*
- ** Create the TX descriptor ring, the extra int is
- ** added as the location for HEAD WB.
- */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&sc->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
- M_WAITOK, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL1(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&sc->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring */
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
+ if (ixlv_setup_queue(sc, que)) {
error = ENOMEM;
- goto fail;
+ goto err_free_queues;
}
}
return (0);
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- }
+err_free_queues:
+ while (i--)
+ ixlv_free_queue(sc, &vsi->queues[i]);
+
free(vsi->queues, M_DEVBUF);
-early:
return (error);
}
@@ -2255,6 +2327,34 @@ ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
/* Hardware is always full-duplex */
ifmr->ifm_active |= IFM_FDX;
+
+ /* Based on the link speed reported by the PF over the AdminQ, choose a
+ * PHY type to report. This isn't 100% correct since we don't really
+ * know the underlying PHY type of the PF, but at least we can report
+ * a valid link speed...
+ */
+ switch (sc->link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ case VIRTCHNL_LINK_SPEED_25GB:
+ ifmr->ifm_active |= IFM_25G_SR;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+ default:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
+ }
+
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end");
return;
@@ -2279,8 +2379,10 @@ ixlv_media_change(struct ifnet * ifp)
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
+ if_printf(ifp, "Changing speed is not supported\n");
+
INIT_DBG_IF(ifp, "end");
- return (0);
+ return (ENODEV);
}
@@ -2342,7 +2444,7 @@ ixlv_add_multi(struct ixl_vsi *vsi)
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
- sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+ sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
sc);
@@ -2435,15 +2537,10 @@ ixlv_del_multi(struct ixl_vsi *vsi)
static void
ixlv_local_timer(void *arg)
{
- struct ixlv_sc *sc = arg;
+ struct ixlv_sc *sc = arg;
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = sc->dev;
- struct tx_ring *txr;
- int hung = 0;
- u32 mask, val;
- s32 timer, new_timer;
+ u32 val;
IXLV_CORE_LOCK_ASSERT(sc);
@@ -2455,9 +2552,9 @@ ixlv_local_timer(void *arg)
val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (val != I40E_VFR_VFACTIVE
- && val != I40E_VFR_COMPLETED) {
- DDPRINTF(dev, "reset in progress! (%d)", val);
+ if (val != VIRTCHNL_VFR_VFACTIVE
+ && val != VIRTCHNL_VFR_COMPLETED) {
+ DDPRINTF(sc->dev, "reset in progress! (%d)", val);
return;
}
@@ -2466,48 +2563,11 @@ ixlv_local_timer(void *arg)
/* clean and process any events */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
- /*
- ** Check status on the queues for a hang
- */
- mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- txr = &que->txr;
- timer = atomic_load_acq_32(&txr->watchdog_timer);
- if (timer > 0) {
- new_timer = timer - hz;
- if (new_timer <= 0) {
- atomic_store_rel_32(&txr->watchdog_timer, -1);
- device_printf(dev, "WARNING: queue %d "
- "appears to be hung!\n", que->me);
- ++hung;
- } else {
- /*
- * If this fails, that means something in the TX path has updated
- * the watchdog, so it means the TX path is still working and
- * the watchdog doesn't need to countdown.
- */
- atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
- /* Any queues with outstanding work get a sw irq */
- wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
- }
- }
- }
- /* Reset when a queue shows hung */
- if (hung)
- goto hung;
+ /* Increment stat when a queue shows hung */
+ if (ixl_queue_hang_check(vsi))
+ sc->watchdog_events++;
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
- return;
-
-hung:
- device_printf(dev, "WARNING: Resetting!\n");
- sc->init_state = IXLV_RESET_REQUIRED;
- sc->watchdog_events++;
- ixlv_stop(sc);
- ixlv_init_locked(sc);
}
/*
@@ -2524,8 +2584,8 @@ ixlv_update_link_status(struct ixlv_sc *sc)
if (sc->link_up){
if (vsi->link_active == FALSE) {
if (bootverbose)
- if_printf(ifp,"Link is Up, %d Gbps\n",
- (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
+ if_printf(ifp,"Link is Up, %s\n",
+ ixlv_vc_speed_to_string(sc->link_speed));
vsi->link_active = TRUE;
if_link_state_change(ifp, LINK_STATE_UP);
}
@@ -2573,6 +2633,33 @@ ixlv_stop(struct ixlv_sc *sc)
INIT_DBG_IF(ifp, "end");
}
+/* Free a single queue struct */
+static void
+ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+ return;
+ IXL_TX_LOCK(txr);
+ if (txr->br)
+ buf_ring_free(txr->br, M_DEVBUF);
+ ixl_free_que_tx(que);
+ if (txr->base)
+ i40e_free_dma_mem(&sc->hw, &txr->dma);
+ IXL_TX_UNLOCK(txr);
+ IXL_TX_LOCK_DESTROY(txr);
+
+ if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+ return;
+ IXL_RX_LOCK(rxr);
+ ixl_free_que_rx(que);
+ if (rxr->base)
+ i40e_free_dma_mem(&sc->hw, &rxr->dma);
+ IXL_RX_UNLOCK(rxr);
+ IXL_RX_LOCK_DESTROY(rxr);
+}
/*********************************************************************
*
@@ -2586,28 +2673,12 @@ ixlv_free_queues(struct ixl_vsi *vsi)
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&sc->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&sc->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
+ /* First, free the MSI-X resources */
+ ixlv_free_msix_resources(sc, que);
+ /* Then free other queue data */
+ ixlv_free_queue(sc, que);
}
+
free(vsi->queues, M_DEVBUF);
}
@@ -2716,10 +2787,10 @@ ixlv_config_rss_pf(struct ixlv_sc *sc)
static void
ixlv_config_rss(struct ixlv_sc *sc)
{
- if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
+ if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
ixlv_config_rss_reg(sc);
- } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
ixlv_config_rss_pf(sc);
} else
@@ -2823,7 +2894,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
- struct i40e_virtchnl_msg *v_msg;
+ struct virtchnl_msg *v_msg;
device_t dev = sc->dev;
u16 result = 0;
u32 reg, oldreg;
@@ -2834,7 +2905,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = sc->aq_buffer;
- v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40e_clean_arq_element(hw, &event, &result);
@@ -2917,13 +2988,25 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
struct rx_ring *rxr;
/* Driver statistics sysctls */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &sc->watchdog_events,
"Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &sc->admin_irq,
"Admin Queue IRQ Handled");
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
+ CTLFLAG_RD, &vsi->num_tx_desc, 0,
+ "TX ring size");
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
+ CTLFLAG_RD, &vsi->num_rx_desc, 0,
+ "RX ring size");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ixlv_sysctl_current_speed,
+ "A", "Current Port Speed");
+
/* VSI statistics sysctls */
vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
CTLFLAG_RD, NULL, "VSI-specific statistics");
@@ -3048,14 +3131,71 @@ ixlv_free_filters(struct ixlv_sc *sc)
SLIST_REMOVE_HEAD(sc->mac_filters, next);
free(f, M_DEVBUF);
}
+ free(sc->mac_filters, M_DEVBUF);
while (!SLIST_EMPTY(sc->vlan_filters)) {
v = SLIST_FIRST(sc->vlan_filters);
SLIST_REMOVE_HEAD(sc->vlan_filters, next);
free(v, M_DEVBUF);
}
+ free(sc->vlan_filters, M_DEVBUF);
return;
}
+static char *
+ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
+{
+ int index;
+
+ char *speeds[] = {
+ "Unknown",
+ "100 Mbps",
+ "1 Gbps",
+ "10 Gbps",
+ "40 Gbps",
+ "20 Gbps",
+ "25 Gbps",
+ };
+
+ switch (link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ index = 1;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ index = 2;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ index = 3;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ index = 4;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ index = 5;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ index = 6;
+ break;
+ case VIRTCHNL_LINK_SPEED_UNKNOWN:
+ default:
+ index = 0;
+ break;
+ }
+
+ return speeds[index];
+}
+
+static int
+ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
+ int error = 0;
+
+ error = sysctl_handle_string(oidp,
+ ixlv_vc_speed_to_string(sc->link_speed),
+ 8, req);
+ return (error);
+}
+
#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 03851b52d9cf..286afc7224eb 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -188,8 +188,8 @@ enum ixl_dbg_mask {
* The driver currently always uses 32 byte Rx descriptors.
*/
#define IXL_DEFAULT_RING 1024
-#define IXL_MAX_RING 8160
-#define IXL_MIN_RING 32
+#define IXL_MAX_RING 4096
+#define IXL_MIN_RING 64
#define IXL_RING_INCREMENT 32
#define IXL_AQ_LEN 256
@@ -207,6 +207,9 @@ enum ixl_dbg_mask {
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
+ *
+ * XXX: Watchdog currently counts down in units of (hz)
+ * Set this to just (hz) if you want queues to hang under a little bit of stress
*/
#define IXL_WATCHDOG (10 * hz)
@@ -214,8 +217,8 @@ enum ixl_dbg_mask {
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
-#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
-#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
+#define IXL_TX_CLEANUP_THRESHOLD (que->num_tx_desc / 8)
+#define IXL_TX_OP_THRESHOLD (que->num_tx_desc / 32)
#define MAX_MULTICAST_ADDR 128
@@ -232,9 +235,10 @@ enum ixl_dbg_mask {
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 128
-#define IXL_SPARSE_CHAIN 6
+#define IXL_SPARSE_CHAIN 7
#define IXL_QUEUE_HUNG 0x80000000
#define IXL_MIN_TSO_MSS 64
+#define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
#define IXL_RSS_KEY_SIZE_REG 13
#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
@@ -287,6 +291,8 @@ enum ixl_dbg_mask {
/* Misc flags for ixl_vsi.flags */
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
+#define IXL_FLAGS_USES_MSIX (1 << 2)
+#define IXL_FLAGS_IS_VF (1 << 3)
#define IXL_VF_RESET_TIMEOUT 100
@@ -505,7 +511,11 @@ struct ixl_queue {
u32 eims; /* This queue's EIMS bit */
struct resource *res;
void *tag;
- int num_desc; /* both tx and rx */
+ int num_tx_desc; /* both tx and rx */
+ int num_rx_desc; /* both tx and rx */
+#ifdef DEV_NETMAP
+ int num_desc; /* for compatibility with current netmap code in kernel */
+#endif
struct tx_ring txr;
struct rx_ring rxr;
struct task task;
@@ -536,9 +546,12 @@ struct ixl_vsi {
enum i40e_vsi_type type;
int id;
u16 num_queues;
+ int num_tx_desc;
+ int num_rx_desc;
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 max_frame_size;
+ bool enable_head_writeback;
struct ixl_queue *queues; /* head of queues */
@@ -596,7 +609,7 @@ ixl_rx_unrefreshed(struct ixl_queue *que)
if (rxr->next_check > rxr->next_refresh)
return (rxr->next_check - rxr->next_refresh - 1);
else
- return ((que->num_desc + rxr->next_check) -
+ return ((que->num_rx_desc + rxr->next_check) -
rxr->next_refresh - 1);
}
@@ -681,6 +694,9 @@ void ixl_free_que_rx(struct ixl_queue *);
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
+
+void ixl_vsi_setup_rings_size(struct ixl_vsi *, int, int);
+int ixl_queue_hang_check(struct ixl_vsi *);
void ixl_free_vsi(struct ixl_vsi *);
void ixl_qflush(struct ifnet *);
@@ -689,4 +705,8 @@ void ixl_qflush(struct ifnet *);
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
void ixl_get_default_rss_key(u32 *);
+const char * i40e_vc_stat_str(struct i40e_hw *hw,
+ enum virtchnl_status_code stat_err);
+void ixl_set_busmaster(device_t);
+void ixl_set_msix_enable(device_t);
#endif /* _IXL_H_ */
diff --git a/sys/dev/ixl/ixl_iw.c b/sys/dev/ixl/ixl_iw.c
index e1b99e48eb48..06b0e7e70ae7 100644
--- a/sys/dev/ixl/ixl_iw.c
+++ b/sys/dev/ixl/ixl_iw.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -374,9 +374,16 @@ ixl_iw_register(struct ixl_iw_ops *ops)
{
struct ixl_iw_pf_entry *pf_entry;
int err = 0;
+ int iwarp_cap_on_pfs = 0;
INIT_DEBUGOUT("begin");
-
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
+ if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
+ printf("%s: the device is not iwarp-capable, registering dropped\n",
+ __func__);
+ return (ENODEV);
+ }
if (ixl_enable_iwarp == 0) {
printf("%s: enable_iwarp is off, registering dropped\n",
__func__);
@@ -389,19 +396,20 @@ ixl_iw_register(struct ixl_iw_ops *ops)
}
mtx_lock(&ixl_iw.mtx);
-
if (ixl_iw.registered) {
printf("%s: iwarp driver already registered\n", __func__);
- err = EBUSY;
+ err = (EBUSY);
goto out;
}
+ ixl_iw.registered = true;
+ mtx_unlock(&ixl_iw.mtx);
ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
taskqueue_thread_enqueue, &ixl_iw.tq);
if (ixl_iw.tq == NULL) {
printf("%s: failed to create queue\n", __func__);
- err = ENOMEM;
- goto out;
+ ixl_iw.registered = false;
+ return (ENOMEM);
}
taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
@@ -410,20 +418,19 @@ ixl_iw_register(struct ixl_iw_ops *ops)
if (ixl_iw.ops == NULL) {
printf("%s: failed to allocate memory\n", __func__);
taskqueue_free(ixl_iw.tq);
- err = ENOMEM;
- goto out;
+ ixl_iw.registered = false;
+ return (ENOMEM);
}
ixl_iw.ops->init = ops->init;
ixl_iw.ops->stop = ops->stop;
- ixl_iw.registered = true;
+ mtx_lock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
}
-
out:
mtx_unlock(&ixl_iw.mtx);
@@ -434,9 +441,23 @@ int
ixl_iw_unregister(void)
{
struct ixl_iw_pf_entry *pf_entry;
+ int iwarp_cap_on_pfs = 0;
INIT_DEBUGOUT("begin");
+ LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
+ iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
+ if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
+ printf("%s: attempt to unregister driver when no iwarp-capable device present\n",
+ __func__);
+ return (ENODEV);
+ }
+
+ if (ixl_enable_iwarp == 0) {
+ printf("%s: attempt to unregister driver when enable_iwarp is off\n",
+ __func__);
+ return (ENODEV);
+ }
mtx_lock(&ixl_iw.mtx);
if (!ixl_iw.registered) {
diff --git a/sys/dev/ixl/ixl_iw.h b/sys/dev/ixl/ixl_iw.h
index 7f4de0aebe76..bd59b2d45d01 100644
--- a/sys/dev/ixl/ixl_iw.h
+++ b/sys/dev/ixl/ixl_iw.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,7 @@
#define _IXL_IW_H_
#define IXL_IW_MAX_USER_PRIORITY 8
-
+#define IXL_IW_MAX_MSIX 64
struct ixl_iw_msix_mapping {
u8 itr_indx;
diff --git a/sys/dev/ixl/ixl_iw_int.h b/sys/dev/ixl/ixl_iw_int.h
index 12fb6ded1c09..66253701663e 100644
--- a/sys/dev/ixl/ixl_iw_int.h
+++ b/sys/dev/ixl/ixl_iw_int.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index e57e5b80c2fe..1a09a4b67e4b 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -46,6 +46,7 @@
#define VF_FLAG_MAC_ANTI_SPOOF 0x10
#define IXL_PF_STATE_EMPR_RESETTING (1 << 0)
+#define IXL_PF_STATE_FW_LLDP_DISABLED (1 << 1)
struct ixl_vf {
struct ixl_vsi vsi;
@@ -94,7 +95,6 @@ struct ixl_pf {
/* Tunable values */
bool enable_msix;
int max_queues;
- int ringsz;
bool enable_tx_fc_filter;
int dynamic_rx_itr;
int dynamic_tx_itr;
@@ -157,6 +157,17 @@ struct ixl_pf {
"Set to 0 to disable link.\n" \
"Use \"sysctl -x\" to view flags properly."
+#define IXL_SYSCTL_HELP_SUPPORTED_SPEED \
+"\nSupported link speeds.\n" \
+"Flags:\n" \
+"\t 0x1 - 100M\n" \
+"\t 0x2 - 1G\n" \
+"\t 0x4 - 10G\n" \
+"\t 0x8 - 20G\n" \
+"\t0x10 - 25G\n" \
+"\t0x20 - 40G\n\n" \
+"Use \"sysctl -x\" to view flags properly."
+
#define IXL_SYSCTL_HELP_FC \
"\nSet flow control mode using the values below.\n" \
"\t0 - off\n" \
@@ -168,6 +179,11 @@ struct ixl_pf {
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
+#define IXL_SYSCTL_HELP_FW_LLDP \
+"\nFW LLDP engine:\n" \
+"\t0 - disable\n" \
+"\t1 - enable\n"
+
extern const char * const ixl_fc_string[6];
MALLOC_DECLARE(M_IXL);
@@ -204,8 +220,6 @@ void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
* PF-only function declarations
*/
-void ixl_set_busmaster(device_t);
-void ixl_set_msix_enable(device_t);
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
@@ -273,8 +287,8 @@ void ixl_configure_legacy(struct ixl_pf *);
void ixl_free_pci_resources(struct ixl_pf *);
void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
void ixl_config_rss(struct ixl_pf *);
-int ixl_set_advertised_speeds(struct ixl_pf *, int);
-void ixl_get_initial_advertised_speeds(struct ixl_pf *);
+int ixl_set_advertised_speeds(struct ixl_pf *, int, bool);
+void ixl_set_initial_advertised_speeds(struct ixl_pf *);
void ixl_print_nvm_version(struct ixl_pf *pf);
void ixl_add_device_sysctls(struct ixl_pf *);
void ixl_handle_mdd_event(struct ixl_pf *);
@@ -287,7 +301,8 @@ int ixl_aq_get_link_status(struct ixl_pf *,
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
-int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
+int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
+int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
void ixl_set_queue_rx_itr(struct ixl_queue *);
void ixl_set_queue_tx_itr(struct ixl_queue *);
@@ -330,6 +345,9 @@ void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
+int ixl_vsi_setup_queues(struct ixl_vsi *vsi);
+void ixl_vsi_free_queues(struct ixl_vsi *vsi);
+
/*
* I2C Function prototypes
*/
@@ -339,4 +357,7 @@ s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
+int ixl_get_fw_lldp_status(struct ixl_pf *pf);
+int ixl_attach_get_link_status(struct ixl_pf *);
+
#endif /* _IXL_PF_H_ */
diff --git a/sys/dev/ixl/ixl_pf_i2c.c b/sys/dev/ixl/ixl_pf_i2c.c
index 23531274dbcc..3258d72ba70f 100644
--- a/sys/dev/ixl/ixl_pf_i2c.c
+++ b/sys/dev/ixl/ixl_pf_i2c.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/ixl_pf_iov.c b/sys/dev/ixl/ixl_pf_iov.c
index 2662d0df65a2..541b99d899f3 100644
--- a/sys/dev/ixl/ixl_pf_iov.c
+++ b/sys/dev/ixl/ixl_pf_iov.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -60,12 +60,12 @@ static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t
static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
-static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
-static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
+static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
+static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
-static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
+static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
enum i40e_queue_type *last_type, uint16_t *last_queue);
-static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
+static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
@@ -403,7 +403,7 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
if (i == IXL_VF_RESET_TIMEOUT)
device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
@@ -416,7 +416,7 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
ixl_vf_setup_vsi(pf, vf);
ixl_vf_map_queues(pf, vf);
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
ixl_flush(hw);
}
@@ -424,7 +424,7 @@ static int
ixl_vc_opcode_level(uint16_t opcode)
{
switch (opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
return (10);
default:
return (5);
@@ -471,19 +471,19 @@ static void
ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_version_info reply;
+ struct virtchnl_version_info reply;
- if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
+ if (msg_size != sizeof(struct virtchnl_version_info)) {
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
I40E_ERR_PARAM);
return;
}
- vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
+ vf->version = ((struct virtchnl_version_info *)msg)->minor;
- reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
- reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
+ reply.major = VIRTCHNL_VERSION_MAJOR;
+ reply.minor = VIRTCHNL_VERSION_MINOR;
+ ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
sizeof(reply));
}
@@ -493,7 +493,7 @@ ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
if (msg_size != 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
I40E_ERR_PARAM);
return;
}
@@ -507,30 +507,30 @@ static void
ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_vf_resource reply;
+ struct virtchnl_vf_resource reply;
if ((vf->version == 0 && msg_size != 0) ||
(vf->version == 1 && msg_size != 4)) {
device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
- " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
+ " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
vf->version);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_ERR_PARAM);
return;
}
bzero(&reply, sizeof(reply));
- if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
- reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
+ reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
else
/* Force VF RSS setup by PF in 1.1+ VFs */
- reply.vf_offload_flags = *(u32 *)msg & (
- I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
+ reply.vf_cap_flags = *(u32 *)msg & (
+ VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_VLAN);
reply.num_vsis = 1;
reply.num_queue_pairs = vf->vsi.num_queues;
@@ -538,17 +538,17 @@ ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
reply.rss_key_size = 52;
reply.rss_lut_size = 64;
reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
- reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_SUCCESS, &reply, sizeof(reply));
}
static int
ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
- struct i40e_virtchnl_txq_info *info)
+ struct virtchnl_txq_info *info)
{
struct i40e_hw *hw;
struct i40e_hmc_obj_txq txq;
@@ -593,7 +593,7 @@ ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
static int
ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
- struct i40e_virtchnl_rxq_info *info)
+ struct virtchnl_rxq_info *info)
{
struct i40e_hw *hw;
struct i40e_hmc_obj_rxq rxq;
@@ -662,13 +662,13 @@ static void
ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_vsi_queue_config_info *info;
- struct i40e_virtchnl_queue_pair_info *pair;
+ struct virtchnl_vsi_queue_config_info *info;
+ struct virtchnl_queue_pair_info *pair;
uint16_t expected_msg_size;
int i;
if (msg_size < sizeof(*info)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -677,7 +677,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -686,7 +686,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (msg_size != expected_msg_size) {
device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
vf->vf_num, msg_size, expected_msg_size);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -694,7 +694,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (info->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -708,29 +708,29 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
pair->txq.queue_id >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
}
static void
ixl_vf_set_qctl(struct ixl_pf *pf,
- const struct i40e_virtchnl_vector_map *vector,
+ const struct virtchnl_vector_map *vector,
enum i40e_queue_type cur_type, uint16_t cur_queue,
enum i40e_queue_type *last_type, uint16_t *last_queue)
{
@@ -759,7 +759,7 @@ ixl_vf_set_qctl(struct ixl_pf *pf,
static void
ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
- const struct i40e_virtchnl_vector_map *vector)
+ const struct virtchnl_vector_map *vector)
{
struct i40e_hw *hw;
u_int qindex;
@@ -816,28 +816,28 @@ static void
ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_irq_map_info *map;
- struct i40e_virtchnl_vector_map *vector;
+ struct virtchnl_irq_map_info *map;
+ struct virtchnl_vector_map *vector;
struct i40e_hw *hw;
int i, largest_txq, largest_rxq;
hw = &pf->hw;
if (msg_size < sizeof(*map)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
map = msg;
if (map->num_vectors == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@@ -848,7 +848,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
vector->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
return;
}
@@ -856,7 +856,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
largest_rxq = fls(vector->rxq_map) - 1;
if (largest_rxq >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@@ -866,7 +866,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
largest_txq = fls(vector->txq_map) - 1;
if (largest_txq >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@@ -875,7 +875,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
vector->txitr_idx > IXL_MAX_ITR_IDX) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@@ -883,18 +883,18 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
ixl_vf_config_vector(pf, vf, vector);
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
}
static void
ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_queue_select *select;
+ struct virtchnl_queue_select *select;
int error = 0;
if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -902,7 +902,7 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
select = msg;
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -957,23 +957,23 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
if (error) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_TIMEOUT);
return;
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
}
static void
ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
- struct i40e_virtchnl_queue_select *select;
+ struct virtchnl_queue_select *select;
int error = 0;
if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -981,7 +981,7 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
select = msg;
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@@ -1039,12 +1039,12 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
}
if (error) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_TIMEOUT);
return;
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
}
static bool
@@ -1085,8 +1085,8 @@ static void
ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_ether_addr_list *addr_list;
- struct i40e_virtchnl_ether_addr *addr;
+ struct virtchnl_ether_addr_list *addr_list;
+ struct virtchnl_ether_addr *addr;
struct ixl_vsi *vsi;
int i;
size_t expected_size;
@@ -1094,7 +1094,7 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
vsi = &vf->vsi;
if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@@ -1106,7 +1106,7 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (addr_list->num_elements == 0 ||
addr_list->vsi_id != vsi->vsi_num ||
msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@@ -1114,7 +1114,7 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
for (i = 0; i < addr_list->num_elements; i++) {
if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
return;
}
}
@@ -1124,20 +1124,20 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
}
static void
ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_ether_addr_list *addr_list;
- struct i40e_virtchnl_ether_addr *addr;
+ struct virtchnl_ether_addr_list *addr_list;
+ struct virtchnl_ether_addr *addr;
size_t expected_size;
int i;
if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@@ -1149,7 +1149,7 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (addr_list->num_elements == 0 ||
addr_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@@ -1158,7 +1158,7 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
addr = &addr_list->list[i];
if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
return;
}
}
@@ -1168,7 +1168,7 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
}
static enum i40e_status_code
@@ -1189,13 +1189,13 @@ static void
ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_vlan_filter_list *filter_list;
+ struct virtchnl_vlan_filter_list *filter_list;
enum i40e_status_code code;
size_t expected_size;
int i;
if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
@@ -1206,20 +1206,20 @@ ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (filter_list->num_elements == 0 ||
filter_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++) {
if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
@@ -1227,26 +1227,26 @@ ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
code = ixl_vf_enable_vlan_strip(pf, vf);
if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
}
for (i = 0; i < filter_list->num_elements; i++)
ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
}
static void
ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_vlan_filter_list *filter_list;
+ struct virtchnl_vlan_filter_list *filter_list;
int i;
size_t expected_size;
if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
}
@@ -1257,21 +1257,21 @@ ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (filter_list->num_elements == 0 ||
filter_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++) {
if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
}
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
@@ -1279,76 +1279,76 @@ ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
for (i = 0; i < filter_list->num_elements; i++)
ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
}
static void
ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
- struct i40e_virtchnl_promisc_info *info;
+ struct virtchnl_promisc_info *info;
enum i40e_status_code code;
if (msg_size != sizeof(*info)) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
info = msg;
if (info->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
- info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
+ info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
return;
}
code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
- info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
+ info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
return;
}
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
}
static void
ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct i40e_virtchnl_queue_select *queue;
+ struct virtchnl_queue_select *queue;
if (msg_size != sizeof(*queue)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
I40E_ERR_PARAM);
return;
}
queue = msg;
if (queue->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
I40E_ERR_PARAM);
return;
}
ixl_update_eth_stats(&vf->vsi);
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
}
@@ -1357,14 +1357,14 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
- struct i40e_virtchnl_rss_key *key;
+ struct virtchnl_rss_key *key;
struct i40e_aqc_get_set_rss_key_data key_data;
enum i40e_status_code status;
hw = &pf->hw;
if (msg_size < sizeof(*key)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
@@ -1374,7 +1374,7 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (key->key_len > 52) {
device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
vf->vf_num, key->key_len, 52);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
@@ -1382,7 +1382,7 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (key->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
@@ -1400,19 +1400,19 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (status) {
device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_ADMIN_QUEUE_ERROR);
return;
}
} else {
for (int i = 0; i < (key->key_len / 4); i++)
- i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
vf->vf_num, key->key[0]);
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
}
static void
@@ -1420,13 +1420,13 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
- struct i40e_virtchnl_rss_lut *lut;
+ struct virtchnl_rss_lut *lut;
enum i40e_status_code status;
hw = &pf->hw;
if (msg_size < sizeof(*lut)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
@@ -1436,7 +1436,7 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (lut->lut_entries > 64) {
device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
vf->vf_num, lut->lut_entries, 64);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
@@ -1444,7 +1444,7 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (lut->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
@@ -1455,19 +1455,19 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (status) {
device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_ADMIN_QUEUE_ERROR);
return;
}
} else {
for (int i = 0; i < (lut->lut_entries / 4); i++)
- i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
vf->vf_num, lut->lut[0], lut->lut_entries);
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
}
static void
@@ -1475,12 +1475,12 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
- struct i40e_virtchnl_rss_hena *hena;
+ struct virtchnl_rss_hena *hena;
hw = &pf->hw;
if (msg_size < sizeof(*hena)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
I40E_ERR_PARAM);
return;
}
@@ -1488,13 +1488,39 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
hena = msg;
/* Set HENA */
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena);
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
vf->vf_num, hena->hena);
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
+ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
+}
+
+static void
+ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct virtchnl_pf_event event;
+ struct i40e_hw *hw;
+
+ hw = &pf->hw;
+ event.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ event.severity = PF_EVENT_SEVERITY_INFO;
+ event.event_data.link_event.link_status = pf->vsi.link_active;
+ event.event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)hw->phy.link_info.link_speed;
+
+ ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
+ sizeof(event));
+}
+
+void
+ixl_broadcast_link_state(struct ixl_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_vfs; i++)
+ ixl_notify_vf_link_state(pf, &pf->vfs[i]);
}
void
@@ -1528,58 +1554,66 @@ ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
return;
switch (opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_VERSION:
ixl_vf_version_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
ixl_vf_reset_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
+ /* Notify VF of link state after it obtains queues, as this is
+ * the last thing it will do as part of initialization
+ */
+ ixl_notify_vf_link_state(pf, vf);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
+ /* Notify VF of link state after it obtains queues, as this is
+ * the last thing it will do as part of initialization
+ */
+ ixl_notify_vf_link_state(pf, vf);
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ case VIRTCHNL_OP_SET_RSS_HENA:
ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
break;
/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
default:
i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
break;
@@ -1746,6 +1780,7 @@ ixl_iov_uninit(device_t dev)
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
+ ixl_free_mac_filters(&pf->vfs[i].vsi);
DDPRINTF(dev, "VF %d: %d released\n",
i, pf->vfs[i].qtag.num_allocated);
DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
diff --git a/sys/dev/ixl/ixl_pf_iov.h b/sys/dev/ixl/ixl_pf_iov.h
index 569226825294..68c37074e634 100644
--- a/sys/dev/ixl/ixl_pf_iov.h
+++ b/sys/dev/ixl/ixl_pf_iov.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,9 +42,6 @@
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
-#define IXL_GLOBAL_VF_NUM(hw, vf) \
- (vf->vf_num + hw->func_caps.vf_base_id)
-
/* Public functions */
/*
@@ -61,5 +58,6 @@ int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
void ixl_initialize_sriov(struct ixl_pf *pf);
void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event);
void ixl_handle_vflr(void *arg, int pending);
+void ixl_broadcast_link_state(struct ixl_pf *pf);
#endif /* _IXL_PF_IOV_H_ */
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
index 1befa463154e..0351d5b0852c 100644
--- a/sys/dev/ixl/ixl_pf_main.c
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -50,14 +50,16 @@
#include <dev/netmap/netmap_kern.h>
#endif /* DEV_NETMAP */
-static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
+static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
static u64 ixl_max_aq_speed_to_value(u8);
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
+static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
/* Sysctls */
-static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
-static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
@@ -80,6 +82,8 @@ static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
#ifdef IXL_DEBUG
static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -87,6 +91,7 @@ static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
#ifdef IXL_IW
extern int ixl_enable_iwarp;
+extern int ixl_limit_iwarp_msix;
#endif
const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
@@ -101,6 +106,12 @@ const char * const ixl_fc_string[6] = {
"Default"
};
+static char *ixl_fec_string[3] = {
+ "CL108 RS-FEC",
+ "CL74 FC-FEC/BASE-R",
+ "None"
+};
+
MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
void
@@ -242,12 +253,13 @@ ixl_init_locked(struct ixl_pf *pf)
/* Get the latest mac address... User might use a LAA */
bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
- I40E_ETH_LENGTH_OF_ADDRESS);
+ ETH_ALEN);
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+ device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n");
ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
bcopy(tmpaddr, hw->mac.addr,
- I40E_ETH_LENGTH_OF_ADDRESS);
+ ETH_ALEN);
ret = i40e_aq_mac_address_write(hw,
I40E_AQC_WRITE_TYPE_LAA_ONLY,
hw->mac.addr, NULL);
@@ -328,7 +340,6 @@ ixl_init_locked(struct ixl_pf *pf)
"initialize iwarp failed, code %d\n", ret);
}
#endif
-
}
@@ -387,19 +398,20 @@ retry:
hw->func_caps.num_rx_qp,
hw->func_caps.base_queue);
#endif
+ struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
+ osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
+ if (osdep->i2c_intfc_num != -1)
+ pf->has_i2c = true;
+
/* Print a subset of the capability information. */
device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
(hw->func_caps.mdio_port_mode == 2) ? "I2C" :
+ (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
(hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
"MDIO shared");
- struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
- osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
- if (osdep->i2c_intfc_num != -1)
- pf->has_i2c = true;
-
return (error);
}
@@ -477,25 +489,25 @@ ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
/* For the set_advertise sysctl */
void
-ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
+ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- enum i40e_status_code status;
- struct i40e_aq_get_phy_abilities_resp abilities;
+ int err;
- /* Set initial sysctl values */
- status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
- NULL);
- if (status) {
+ /* Make sure to initialize the device to the complete list of
+ * supported speeds on driver load, to ensure unloading and
+ * reloading the driver will restore this value.
+ */
+ err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
+ if (err) {
/* Non-fatal error */
- device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
- __func__, status);
+ device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
+ __func__, err);
return;
}
pf->advertised_speed =
- ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false);
+ ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
}
int
@@ -515,10 +527,8 @@ ixl_teardown_hw_structs(struct ixl_pf *pf)
}
}
- // XXX: This gets called when we know the adminq is inactive;
- // so we already know it's setup when we get here.
-
/* Shutdown admin queue */
+ ixl_disable_intr0(hw);
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
@@ -540,7 +550,7 @@ ixl_reset(struct ixl_pf *pf)
i40e_clear_hw(hw);
error = i40e_pf_reset(hw);
if (error) {
- device_printf(dev, "init: PF reset failure");
+ device_printf(dev, "init: PF reset failure\n");
error = EIO;
goto err_out;
}
@@ -548,7 +558,7 @@ ixl_reset(struct ixl_pf *pf)
error = i40e_init_adminq(hw);
if (error) {
device_printf(dev, "init: Admin queue init failure;"
- " status code %d", error);
+ " status code %d\n", error);
error = EIO;
goto err_out;
}
@@ -618,6 +628,12 @@ ixl_reset(struct ixl_pf *pf)
}
+ /* Re-enable admin queue interrupt */
+ if (pf->msix > 1) {
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ }
+
err_out:
return (error);
}
@@ -630,6 +646,7 @@ ixl_handle_que(void *context, int pending)
{
struct ixl_queue *que = context;
struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
struct ifnet *ifp = vsi->ifp;
@@ -648,9 +665,11 @@ ixl_handle_que(void *context, int pending)
}
}
- /* Reenable this interrupt - hmmm */
- ixl_enable_queue(hw, que->me);
- return;
+ /* Re-enable queue interrupt */
+ if (pf->msix > 1)
+ ixl_enable_queue(hw, que->me);
+ else
+ ixl_enable_intr0(hw);
}
@@ -669,13 +688,15 @@ ixl_intr(void *arg)
struct ifnet *ifp = vsi->ifp;
struct tx_ring *txr = &que->txr;
u32 icr0;
- bool more_tx, more_rx;
+ bool more;
pf->admin_irq++;
- /* Protect against spurious interrupts */
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
+ /* Clear PBA at start of ISR if using legacy interrupts */
+ if (pf->msix == 0)
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
icr0 = rd32(hw, I40E_PFINT_ICR0);
@@ -685,20 +706,22 @@ ixl_intr(void *arg)
taskqueue_enqueue(pf->tq, &pf->vflr_task);
#endif
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
taskqueue_enqueue(pf->tq, &pf->adminq);
- }
- if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
++que->irqs;
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+ more = ixl_rxeof(que, IXL_RX_LIMIT);
IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
+ ixl_txeof(que);
if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
+ ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
+
+ if (more)
+ taskqueue_enqueue(que->tq, &que->task);
}
ixl_enable_intr0(hw);
@@ -713,7 +736,7 @@ ixl_intr(void *arg)
void
ixl_msix_que(void *arg)
{
- struct ixl_queue *que = arg;
+ struct ixl_queue *que = arg;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
@@ -846,26 +869,25 @@ ixl_set_promisc(struct ixl_vsi *vsi)
int err, mcnt = 0;
bool uni = FALSE, multi = FALSE;
- if (ifp->if_flags & IFF_ALLMULTI)
- multi = TRUE;
+ if (ifp->if_flags & IFF_PROMISC)
+ uni = multi = TRUE;
+ else if (ifp->if_flags & IFF_ALLMULTI)
+ multi = TRUE;
else { /* Need to count the multicast addresses */
struct ifmultiaddr *ifma;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_MULTICAST_ADDR)
- break;
- mcnt++;
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (mcnt == MAX_MULTICAST_ADDR) {
+ multi = TRUE;
+ break;
+ }
+ mcnt++;
}
if_maddr_runlock(ifp);
}
- if (mcnt >= MAX_MULTICAST_ADDR)
- multi = TRUE;
- if (ifp->if_flags & IFF_PROMISC)
- uni = TRUE;
-
err = i40e_aq_set_vsi_unicast_promiscuous(hw,
vsi->seid, uni, NULL, TRUE);
err = i40e_aq_set_vsi_multicast_promiscuous(hw,
@@ -965,11 +987,10 @@ ixl_del_multi(struct ixl_vsi *vsi)
ixl_del_hw_filters(vsi, mcnt);
}
-
/*********************************************************************
* Timer routine
*
- * This routine checks for link status,updates statistics,
+ * This routine checks for link status, updates statistics,
* and runs the watchdog check.
*
* Only runs when the driver is configured UP and RUNNING.
@@ -980,14 +1001,6 @@ void
ixl_local_timer(void *arg)
{
struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
- struct tx_ring *txr;
- int hung = 0;
- u32 mask;
- s32 timer, new_timer;
IXL_PF_LOCK_ASSERT(pf);
@@ -997,44 +1010,11 @@ ixl_local_timer(void *arg)
/* Update stats */
ixl_update_stats_counters(pf);
- /* Check status of the queues */
- mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- txr = &que->txr;
- timer = atomic_load_acq_32(&txr->watchdog_timer);
- if (timer > 0) {
- new_timer = timer - hz;
- if (new_timer <= 0) {
- atomic_store_rel_32(&txr->watchdog_timer, -1);
- device_printf(dev, "WARNING: queue %d "
- "appears to be hung!\n", que->me);
- ++hung;
- } else {
- /*
- * If this fails, that means something in the TX path has updated
- * the watchdog, so it means the TX path is still working and
- * the watchdog doesn't need to countdown.
- */
- atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
- /* Any queues with outstanding work get a sw irq */
- wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- }
- }
- }
- /* Reset when a queue shows hung */
- if (hung)
- goto hung;
+ /* Increment stat when a queue shows hung */
+ if (ixl_queue_hang_check(&pf->vsi))
+ pf->watchdog_events++;
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
- return;
-
-hung:
- device_printf(dev, "WARNING: Resetting!\n");
- pf->watchdog_events++;
- ixl_init_locked(pf);
}
void
@@ -1042,13 +1022,29 @@ ixl_link_up_msg(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ifnet *ifp = pf->vsi.ifp;
+ char *req_fec_string, *neg_fec_string;
+ u8 fec_abilities;
+
+ fec_abilities = hw->phy.link_info.req_fec_info;
+ /* If both RS and KR are requested, only show RS */
+ if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
+ req_fec_string = ixl_fec_string[0];
+ else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
+ req_fec_string = ixl_fec_string[1];
+ else
+ req_fec_string = ixl_fec_string[2];
- log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+ neg_fec_string = ixl_fec_string[0];
+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+ neg_fec_string = ixl_fec_string[1];
+ else
+ neg_fec_string = ixl_fec_string[2];
+
+ log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
ifp->if_xname,
ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
- (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ?
- "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ?
- "Clause 108 RS-FEC" : "None",
+ req_fec_string, neg_fec_string,
(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
(hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
@@ -1072,9 +1068,16 @@ ixl_update_link_status(struct ixl_pf *pf)
if (pf->link_up) {
if (vsi->link_active == FALSE) {
vsi->link_active = TRUE;
+#if __FreeBSD_version >= 1100000
ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
+#else
+ if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed));
+#endif
if_link_state_change(ifp, LINK_STATE_UP);
ixl_link_up_msg(pf);
+#ifdef PCI_IOV
+ ixl_broadcast_link_state(pf);
+#endif
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
@@ -1082,10 +1085,11 @@ ixl_update_link_status(struct ixl_pf *pf)
device_printf(dev, "Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
vsi->link_active = FALSE;
+#ifdef PCI_IOV
+ ixl_broadcast_link_state(pf);
+#endif
}
}
-
- return;
}
/*********************************************************************
@@ -1348,37 +1352,6 @@ ixl_setup_queue_msix(struct ixl_vsi *vsi)
}
/*
- * When used in a virtualized environment PCI BUSMASTER capability may not be set
- * so explicity set it here and rewrite the ENABLE in the MSIX control register
- * at this point to cause the host to successfully initialize us.
- */
-void
-ixl_set_busmaster(device_t dev)
-{
- u16 pci_cmd_word;
-
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
-}
-
-/*
- * rewrite the ENABLE in the MSIX control register
- * to cause the host to successfully initialize us.
- */
-void
-ixl_set_msix_enable(device_t dev)
-{
- int msix_ctrl, rid;
-
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
-}
-
-/*
* Allocate MSI/X vectors from the OS.
* Returns 0 for legacy, 1 for MSI, >1 for MSIX.
*/
@@ -1387,10 +1360,15 @@ ixl_init_msix(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
+#ifdef IXL_IW
+#if __FreeBSD_version >= 1100000
+ cpuset_t cpu_set;
+#endif
+#endif
int auto_max_queues;
int rid, want, vectors, queues, available;
#ifdef IXL_IW
- int iw_want, iw_vectors;
+ int iw_want=0, iw_vectors;
pf->iw_msix = 0;
#endif
@@ -1399,9 +1377,6 @@ ixl_init_msix(struct ixl_pf *pf)
if (!pf->enable_msix)
goto no_msix;
- /* Ensure proper operation in virtualized environment */
- ixl_set_busmaster(dev);
-
/* First try MSI/X */
rid = PCIR_BAR(IXL_MSIX_BAR);
pf->msix_mem = bus_alloc_resource_any(dev,
@@ -1416,6 +1391,7 @@ ixl_init_msix(struct ixl_pf *pf)
available = pci_msix_count(dev);
if (available < 2) {
/* system has msix disabled (0), or only one vector (1) */
+ device_printf(pf->dev, "Less than two MSI-X vectors available\n");
bus_release_resource(dev, SYS_RES_MEMORY,
rid, pf->msix_mem);
pf->msix_mem = NULL;
@@ -1470,9 +1446,20 @@ ixl_init_msix(struct ixl_pf *pf)
}
#ifdef IXL_IW
- if (ixl_enable_iwarp) {
- /* iWARP wants additional vector for CQP */
- iw_want = mp_ncpus + 1;
+ if (ixl_enable_iwarp && hw->func_caps.iwarp) {
+#if __FreeBSD_version >= 1100000
+ if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0)
+ {
+ iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX);
+ }
+#endif
+ if(!iw_want)
+ iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX);
+ if(ixl_limit_iwarp_msix > 0)
+ iw_want = min(iw_want, ixl_limit_iwarp_msix);
+ else
+ iw_want = min(iw_want, 1);
+
available -= vectors;
if (available > 0) {
iw_vectors = (available >= iw_want) ?
@@ -1489,8 +1476,13 @@ ixl_init_msix(struct ixl_pf *pf)
"Using MSIX interrupts with %d vectors\n", vectors);
pf->msix = vectors;
#ifdef IXL_IW
- if (ixl_enable_iwarp)
+ if (ixl_enable_iwarp && hw->func_caps.iwarp)
+ {
pf->iw_msix = iw_vectors;
+ device_printf(pf->dev,
+ "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n",
+ iw_vectors);
+ }
#endif
pf->vsi.num_queues = queues;
@@ -1547,6 +1539,7 @@ ixl_configure_intr0_msix(struct ixl_pf *pf)
I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -1627,8 +1620,6 @@ ixl_configure_legacy(struct ixl_pf *pf)
| I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
| I40E_PFINT_ICR0_ENA_GRST_MASK
| I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
- | I40E_PFINT_ICR0_ENA_GPIO_MASK
- | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
| I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
| I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
| I40E_PFINT_ICR0_ENA_VFLR_MASK
@@ -1646,7 +1637,7 @@ ixl_configure_legacy(struct ixl_pf *pf)
/* Associate the queue pair to the vector and enable the q int */
reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
| (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
- | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), reg);
reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
@@ -1671,6 +1662,8 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
return (ENXIO);
}
+ /* Ensure proper PCI device operation */
+ ixl_set_busmaster(dev);
/* Save off the PCI information */
hw->vendor_id = pci_get_vendor(dev);
@@ -1844,7 +1837,7 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
|| phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
@@ -1867,7 +1860,11 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL);
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
}
/*********************************************************************
@@ -1907,7 +1904,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_qflush = ixl_qflush;
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+ ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
@@ -1916,7 +1913,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
/* Set TSO limits */
ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
- ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
+ ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
/*
* Tell the upper layer(s) we support long frames.
@@ -1968,12 +1965,16 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
device_printf(dev,
"Error getting supported media types, err %d,"
" AQ error %d\n", aq_error, hw->aq.asq_last_status);
- return (0);
- }
- pf->supported_speeds = abilities.link_speed;
- ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
+ } else {
+ pf->supported_speeds = abilities.link_speed;
+#if __FreeBSD_version >= 1100000
+ ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
+#else
+ if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
+#endif
- ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ }
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -2001,6 +2002,7 @@ ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
/* Print out message if an unqualified module is found */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (pf->advertised_speed) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
device_printf(dev, "Link failed because "
@@ -2157,17 +2159,27 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
u16 size;
/* Setup the HMC TX Context */
- size = que->num_desc * sizeof(struct i40e_tx_desc);
- memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
+ size = que->num_tx_desc * sizeof(struct i40e_tx_desc);
+ bzero(&tctx, sizeof(tctx));
tctx.new_context = 1;
tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = que->num_desc;
- tctx.fc_ena = 0;
- tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
- /* Enable HEAD writeback */
- tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.qlen = que->num_tx_desc;
+ tctx.fc_ena = 0; /* Disable FCoE */
+ /*
+ * This value needs to pulled from the VSI that this queue
+ * is assigned to. Index into array is traffic class.
+ */
+ tctx.rdylist = vsi->info.qs_handle[0];
+ /*
+ * Set these to enable Head Writeback
+ * - Address is last entry in TX ring (reserved for HWB index)
+ * Leave these as 0 for Descriptor Writeback
+ */
+ if (vsi->enable_head_writeback) {
+ tctx.head_wb_ena = 1;
+ tctx.head_wb_addr = txr->dma.pa +
+ (que->num_tx_desc * sizeof(struct i40e_tx_desc));
+ }
tctx.rdylist_act = 0;
err = i40e_clear_lan_tx_queue_context(hw, i);
if (err) {
@@ -2205,20 +2217,20 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
vsi->max_frame_size : max_rxmax;
rctx.dtype = 0;
- rctx.dsize = 1; /* do 32byte descriptors */
- rctx.hsplit_0 = 0; /* no HDR split initially */
+ rctx.dsize = 1; /* do 32byte descriptors */
+ rctx.hsplit_0 = 0; /* no header split */
rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = que->num_desc;
+ rctx.qlen = que->num_rx_desc;
rctx.tphrdesc_ena = 1;
rctx.tphwdesc_ena = 1;
- rctx.tphdata_ena = 0;
- rctx.tphhead_ena = 0;
- rctx.lrxqthresh = 2;
+ rctx.tphdata_ena = 0; /* Header Split related */
+ rctx.tphhead_ena = 0; /* Header Split related */
+ rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */
rctx.crcstrip = 1;
rctx.l2tsel = 1;
- rctx.showiv = 1;
- rctx.fc_ena = 0;
- rctx.prefena = 1;
+ rctx.showiv = 1; /* Strip inner VLAN header */
+ rctx.fc_ena = 0; /* Disable FCoE */
+ rctx.prefena = 1; /* Prefetch descriptors */
err = i40e_clear_lan_rx_queue_context(hw, i);
if (err) {
@@ -2245,26 +2257,22 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
} else
#endif /* DEV_NETMAP */
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1);
}
return (err);
}
-/*********************************************************************
- *
- * Free all VSI structs.
- *
- **********************************************************************/
+
+
void
-ixl_free_vsi(struct ixl_vsi *vsi)
+ixl_vsi_free_queues(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ixl_queue *que = vsi->queues;
- /* Free station queues */
- if (!vsi->queues)
- goto free_filters;
+ if (NULL == vsi->queues)
+ return;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
@@ -2273,6 +2281,8 @@ ixl_free_vsi(struct ixl_vsi *vsi)
if (!mtx_initialized(&txr->mtx)) /* uninitialized */
continue;
IXL_TX_LOCK(txr);
+ if (txr->br)
+ buf_ring_free(txr->br, M_DEVBUF);
ixl_free_que_tx(que);
if (txr->base)
i40e_free_dma_mem(&pf->hw, &txr->dma);
@@ -2288,9 +2298,23 @@ ixl_free_vsi(struct ixl_vsi *vsi)
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
}
- free(vsi->queues, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ * Free all VSI structs.
+ *
+ **********************************************************************/
+void
+ixl_free_vsi(struct ixl_vsi *vsi)
+{
+
+ /* Free station queues */
+ ixl_vsi_free_queues(vsi);
+ if (vsi->queues)
+ free(vsi->queues, M_DEVBUF);
-free_filters:
/* Free VSI filter list */
ixl_free_mac_filters(vsi);
}
@@ -2311,17 +2335,18 @@ ixl_free_mac_filters(struct ixl_vsi *vsi)
* Fill out fields in queue struct and setup tx/rx memory and structs
*/
static int
-ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
+ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index)
{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
int error = 0;
int rsize, tsize;
- que->num_desc = pf->ringsz;
+ que->num_tx_desc = vsi->num_tx_desc;
+ que->num_rx_desc = vsi->num_rx_desc;
que->me = index;
que->vsi = vsi;
@@ -2332,16 +2357,27 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /* Create the TX descriptor ring */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
+ /*
+ * Create the TX descriptor ring
+ *
+ * In Head Writeback mode, the descriptor ring is one bigger
+ * than the number of descriptors for space for the HW to
+ * write back index of last completed descriptor.
+ */
+ if (vsi->enable_head_writeback) {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ } else {
+ tsize = roundup2((que->num_tx_desc *
+ sizeof(struct i40e_tx_desc)), DBA_ALIGN);
+ }
if (i40e_allocate_dma_mem(hw,
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
device_printf(dev,
"Unable to allocate TX Descriptor memory\n");
error = ENOMEM;
- goto fail;
+ goto err_destroy_tx_mtx;
}
txr->base = (struct i40e_tx_desc *)txr->dma.va;
bzero((void *)txr->base, tsize);
@@ -2350,7 +2386,7 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Critical Failure setting up TX structures\n");
error = ENOMEM;
- goto fail;
+ goto err_free_tx_dma;
}
/* Allocate a buf ring */
txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
@@ -2359,10 +2395,10 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Critical Failure setting up TX buf ring\n");
error = ENOMEM;
- goto fail;
+ goto err_free_tx_data;
}
- rsize = roundup2(que->num_desc *
+ rsize = roundup2(que->num_rx_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
rxr->que = que;
rxr->tail = I40E_QRX_TAIL(que->me);
@@ -2377,7 +2413,7 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Unable to allocate RX Descriptor memory\n");
error = ENOMEM;
- goto fail;
+ goto err_destroy_rx_mtx;
}
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
@@ -2386,27 +2422,43 @@ ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
device_printf(dev,
"Critical Failure setting up receive structs\n");
error = ENOMEM;
- goto fail;
+ goto err_free_rx_dma;
}
return (0);
-fail:
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- if (mtx_initialized(&rxr->mtx))
- mtx_destroy(&rxr->mtx);
- if (txr->br) {
- buf_ring_free(txr->br, M_DEVBUF);
- txr->br = NULL;
- }
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- if (mtx_initialized(&txr->mtx))
- mtx_destroy(&txr->mtx);
+err_free_rx_dma:
+ i40e_free_dma_mem(&pf->hw, &rxr->dma);
+err_destroy_rx_mtx:
+ mtx_destroy(&rxr->mtx);
+ /* err_free_tx_buf_ring */
+ buf_ring_free(txr->br, M_DEVBUF);
+err_free_tx_data:
+ ixl_free_que_tx(que);
+err_free_tx_dma:
+ i40e_free_dma_mem(&pf->hw, &txr->dma);
+err_destroy_tx_mtx:
+ mtx_destroy(&txr->mtx);
+
+ return (error);
+}
+
+int
+ixl_vsi_setup_queues(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ error = ixl_vsi_setup_queue(vsi, que, i);
+ if (error)
+ break;
+ }
return (error);
}
+
/*********************************************************************
*
* Allocate memory for the VSI (virtual station interface) and their
@@ -2419,7 +2471,6 @@ ixl_setup_stations(struct ixl_pf *pf)
{
device_t dev = pf->dev;
struct ixl_vsi *vsi;
- struct ixl_queue *que;
int error = 0;
vsi = &pf->vsi;
@@ -2429,24 +2480,22 @@ ixl_setup_stations(struct ixl_pf *pf)
vsi->num_vlans = 0;
vsi->back = pf;
+ if (pf->msix > 1)
+ vsi->flags |= IXL_FLAGS_USES_MSIX;
+
/* Get memory for the station queues */
if (!(vsi->queues =
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
error = ENOMEM;
- return (error);
+ goto ixl_setup_stations_err;
}
/* Then setup each queue */
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- error = ixl_setup_queue(que, pf, i);
- if (error)
- return (error);
- }
-
- return (0);
+ error = ixl_vsi_setup_queues(vsi);
+ixl_setup_stations_err:
+ return (error);
}
/*
@@ -2749,10 +2798,10 @@ ixl_add_hw_stats(struct ixl_pf *pf)
char queue_namebuf[QUEUE_NAME_LEN];
/* Driver statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &pf->watchdog_events,
"Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &pf->admin_irq,
"Admin Queue IRQ Handled");
@@ -2809,6 +2858,15 @@ ixl_add_hw_stats(struct ixl_pf *pf)
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
#ifdef IXL_DEBUG
+ SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog",
+ CTLFLAG_RD, &(txr->watchdog_timer), 0,
+ "Ticks before watchdog timer causes interface reinit");
+ SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail",
+ CTLFLAG_RD, &(txr->next_avail), 0,
+ "Next TX descriptor to be used");
+ SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean",
+ CTLFLAG_RD, &(txr->next_to_clean), 0,
+ "Next TX descriptor to be cleaned");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
CTLFLAG_RD, &(rxr->not_done),
"Queue Rx Descriptors not Done");
@@ -2818,16 +2876,16 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
CTLFLAG_RD, &(rxr->next_check), 0,
"Queue Rx Descriptors not Done");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixl_sysctl_qtx_tail_handler, "IU",
"Queue Transmit Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
#endif
}
@@ -2935,29 +2993,25 @@ ixl_set_rss_key(struct ixl_pf *pf)
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
enum i40e_status_code status;
-#ifdef RSS
- u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
-#else
- u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c,
- 0x35897377, 0x328b25e1, 0x4fa98922,
- 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
- 0x0, 0x0, 0x0};
-#endif
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
+#else
+ ixl_get_default_rss_key(rss_seed);
#endif
/* Fill out hash function seed */
if (hw->mac.type == I40E_MAC_X722) {
struct i40e_aqc_get_set_rss_key_data key_data;
- bcopy(rss_seed, key_data.standard_rss_key, 40);
+ bcopy(rss_seed, &key_data, 52);
status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
if (status)
- device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
- i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ device_printf(dev,
+ "i40e_aq_set_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
@@ -3016,10 +3070,7 @@ ixl_set_rss_hlut(struct ixl_pf *pf)
u32 lut = 0;
enum i40e_status_code status;
- if (hw->mac.type == I40E_MAC_X722)
- lut_entry_width = 7;
- else
- lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
+ lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
/* Populate the LUT with max no. of queues in round robin fashion */
u8 hlut_buf[512];
@@ -3330,7 +3381,7 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
** the add bit.
*/
SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags == flags) {
+ if ((f->flags & flags) == flags) {
b = &a[j]; // a pox on fvl long names :)
bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
if (f->vlan == IXL_VLAN_ANY) {
@@ -3391,8 +3442,13 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
if (f->flags & IXL_FILTER_DEL) {
e = &d[j]; // a pox on fvl long names :)
bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
- e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ if (f->vlan == IXL_VLAN_ANY) {
+ e->vlan_tag = 0;
+ e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ } else {
+ e->vlan_tag = f->vlan;
+ }
/* delete entry from vsi list */
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
free(f, M_DEVBUF);
@@ -3445,7 +3501,7 @@ ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
break;
- i40e_msec_delay(10);
+ i40e_usec_delay(10);
}
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
device_printf(pf->dev, "TX queue %d still disabled!\n",
@@ -3479,7 +3535,7 @@ ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
break;
- i40e_msec_delay(10);
+ i40e_usec_delay(10);
}
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
device_printf(pf->dev, "RX queue %d still disabled!\n",
@@ -3519,6 +3575,9 @@ ixl_enable_rings(struct ixl_vsi *vsi)
return (error);
}
+/*
+ * Returns error on first ring that is detected hung.
+ */
int
ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
@@ -3551,6 +3610,9 @@ ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
return (error);
}
+/*
+ * Returns error on first ring that is detected hung.
+ */
int
ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
{
@@ -3661,14 +3723,14 @@ ixl_handle_mdd_event(struct ixl_pf *pf)
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
device_printf(dev,
- "MDD TX event is for this function!");
+ "MDD TX event is for this function!\n");
pf_mdd_detected = true;
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
device_printf(dev,
- "MDD RX event is for this function!");
+ "MDD RX event is for this function!\n");
pf_mdd_detected = true;
}
}
@@ -3923,81 +3985,185 @@ ixl_update_stats_counters(struct ixl_pf *pf)
}
int
-ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
+ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
- bool is_up = false;
int error = 0;
- is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
-
/* Teardown */
if (is_up)
ixl_stop(pf);
+
+ ixl_teardown_queue_msix(vsi);
+
error = i40e_shutdown_lan_hmc(hw);
if (error)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", error);
+
ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
+
error = i40e_shutdown_adminq(hw);
if (error)
device_printf(dev,
"Shutdown Admin queue failed with code %d\n", error);
+ callout_drain(&pf->timer);
+
+ /* Free ring buffers, locks and filters */
+ ixl_vsi_free_queues(vsi);
+
+ /* Free VSI filter list */
+ ixl_free_mac_filters(vsi);
+
+ ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
+
+ return (error);
+}
+
+int
+ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ device_printf(dev, "Rebuilding driver state...\n");
+
+ error = i40e_pf_reset(hw);
+ if (error) {
+ device_printf(dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, error));
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
/* Setup */
error = i40e_init_adminq(hw);
if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
- error = ixl_setup_adminq_msix(pf);
+
+ i40e_clear_pxe_mode(hw);
+
+ error = ixl_get_hw_capabilities(pf);
if (error) {
- device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
- error);
+ device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
- ixl_configure_intr0_msix(pf);
- ixl_enable_intr0(hw);
+
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
+
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (error) {
device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
}
+
+ /* reserve a contiguous allocation for the PF's VSI */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
+ if (error) {
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
+ /* TODO: error handling */
+ }
+
+ device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
+ pf->qtag.num_allocated, pf->qtag.num_active);
+
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ if (ixl_vsi_setup_queues(vsi)) {
+ device_printf(dev, "setup queues failed!\n");
+ error = ENOMEM;
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ if (pf->msix > 1) {
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+
+ error = ixl_setup_queue_msix(vsi);
+ if (error) {
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+ } else {
+ error = ixl_setup_legacy(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_legacy() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+ }
+
+ /* Determine link state */
+ if (ixl_attach_get_link_status(pf)) {
+ error = EINVAL;
+ /* TODO: error handling */
+ }
+
+ i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
+ ixl_get_fw_lldp_status(pf);
+
if (is_up)
ixl_init(pf);
+ device_printf(dev, "Rebuilding driver state done.\n");
return (0);
+
+ixl_rebuild_hw_structs_after_reset_err:
+ device_printf(dev, "Reload the driver to recover\n");
+ return (error);
}
void
ixl_handle_empr_reset(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
int count = 0;
u32 reg;
+ ixl_prepare_for_reset(pf, is_up);
+
/* Typically finishes within 3-4 seconds */
while (count++ < 100) {
reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
+ & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
if (reg)
i40e_msec_delay(100);
else
break;
}
ixl_dbg(pf, IXL_DBG_INFO,
- "EMPR reset wait count: %d\n", count);
+ "EMPR reset wait count: %d\n", count);
- device_printf(dev, "Rebuilding driver state...\n");
- ixl_rebuild_hw_structs_after_reset(pf);
- device_printf(dev, "Rebuilding driver state done.\n");
+ ixl_rebuild_hw_structs_after_reset(pf, is_up);
atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
}
@@ -4269,15 +4435,19 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
/* Set up sysctls */
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
+ pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
+ pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_current_speed, "A", "Current Port Speed");
+ pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
@@ -4306,38 +4476,45 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "tx_ring_size", CTLFLAG_RD,
+ &pf->vsi.num_tx_desc, 0, "TX ring size");
+
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "rx_ring_size", CTLFLAG_RD,
+ &pf->vsi.num_rx_desc, 0, "RX ring size");
+
/* Add FEC sysctls for 25G adapters */
- /*
- * XXX: These settings can be changed, but that isn't supported,
- * so these are read-only for now.
- */
- if (hw->device_id == I40E_DEV_ID_25G_B
- || hw->device_id == I40E_DEV_ID_25G_SFP28) {
+ if (i40e_is_25G_device(hw->device_id)) {
fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
fec_list = SYSCTL_CHILDREN(fec_node);
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
SYSCTL_ADD_PROC(ctx, fec_list,
- OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD,
+ OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
}
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
+
/* Add sysctls meant to print debug information, but don't list them
* in "sysctl -a" output. */
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
@@ -4388,6 +4565,10 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
+
if (pf->has_i2c) {
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
@@ -4430,7 +4611,7 @@ ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
** 3 - full
*/
int
-ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
@@ -4513,7 +4694,7 @@ ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
}
int
-ixl_current_speed(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
@@ -4527,6 +4708,10 @@ ixl_current_speed(SYSCTL_HANDLER_ARGS)
return (error);
}
+/*
+ * Converts 8-bit speeds value to and from sysctl flags and
+ * Admin Queue flags.
+ */
static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
{
@@ -4551,7 +4736,7 @@ ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
}
int
-ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
+ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
@@ -4572,7 +4757,10 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
/* Prepare new config */
bzero(&config, sizeof(config));
- config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
+ if (from_aq)
+ config.link_speed = speeds;
+ else
+ config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
config.phy_type = abilities.phy_type;
config.phy_type_ext = abilities.phy_type_ext;
config.abilities = abilities.abilities
@@ -4580,6 +4768,7 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -4595,6 +4784,25 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
}
/*
+** Supported link speedsL
+** Flags:
+** 0x1 - 100 Mb
+** 0x2 - 1G
+** 0x4 - 10G
+** 0x8 - 20G
+** 0x10 - 25G
+** 0x20 - 40G
+*/
+static int
+ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
+
+ return sysctl_handle_int(oidp, NULL, supported, req);
+}
+
+/*
** Control link advertise speed:
** Flags:
** 0x1 - advertise 100 Mb
@@ -4607,10 +4815,9 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
** Set to 0 to disable link
*/
int
-ixl_set_advertise(SYSCTL_HANDLER_ARGS)
+ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u8 converted_speeds;
int requested_ls = 0;
@@ -4621,18 +4828,16 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- /* Check if changing speeds is supported */
- switch (hw->device_id) {
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- device_printf(dev, "Changing advertised speeds not supported"
- " on this device.\n");
+
+ /* Error out if bits outside of possible flag range are set */
+ if ((requested_ls & ~((u8)0x3F)) != 0) {
+ device_printf(dev, "Input advertised speed out of range; "
+ "valid flags are: 0x%02x\n",
+ ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
return (EINVAL);
}
- if (requested_ls < 0 || requested_ls > 0xff) {
- }
- /* Check for valid value */
+ /* Check if adapter supports input value */
converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
device_printf(dev, "Invalid advertised speed; "
@@ -4641,7 +4846,7 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
- error = ixl_set_advertised_speeds(pf, requested_ls);
+ error = ixl_set_advertised_speeds(pf, requested_ls, false);
if (error)
return (error);
@@ -4741,7 +4946,7 @@ ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
sbuf_finish(sbuf);
sbuf_delete(sbuf);
- return 0;
+ return (0);
}
void
@@ -4793,7 +4998,7 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
ifd->ifd_data == NULL) {
device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
__func__);
- device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
+ device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
__func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
device_printf(dev, "%s: data pointer: %p\n", __func__,
ifd->ifd_data);
@@ -4822,7 +5027,8 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
perrno = -EBUSY;
}
- if (status)
+ /* Let the nvmupdate report errors, show them only when debug is enabled */
+ if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
i40e_stat_str(hw, status), perrno);
@@ -4843,6 +5049,9 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
* This routine is called whenever the user queries the status of
* the interface using ifconfig.
*
+ * When adding new media types here, make sure to add them to
+ * ixl_add_ifmedia(), too.
+ *
**********************************************************************/
void
ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
@@ -4852,9 +5061,13 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
struct i40e_hw *hw = &pf->hw;
INIT_DEBUGOUT("ixl_media_status: begin");
+
+ /* Don't touch PF during reset */
+ if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING)
+ return;
+
IXL_PF_LOCK(pf);
- hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
@@ -4887,7 +5100,7 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_1000_LX;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ifmr->ifm_active |= IFM_OTHER;
+ ifmr->ifm_active |= IFM_1000_T;
break;
/* 10 G */
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
@@ -4904,8 +5117,10 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
case I40E_PHY_TYPE_10GBASE_AOC:
- ifmr->ifm_active |= IFM_OTHER;
+ ifmr->ifm_active |= IFM_10G_AOC;
break;
/* 25 G */
case I40E_PHY_TYPE_25GBASE_KR:
@@ -4918,7 +5133,13 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_25G_SR;
break;
case I40E_PHY_TYPE_25GBASE_LR:
- ifmr->ifm_active |= IFM_UNKNOWN;
+ ifmr->ifm_active |= IFM_25G_LR;
+ break;
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ ifmr->ifm_active |= IFM_25G_AOC;
+ break;
+ case I40E_PHY_TYPE_25GBASE_ACC:
+ ifmr->ifm_active |= IFM_25G_ACC;
break;
/* 40 G */
case I40E_PHY_TYPE_40GBASE_CR4:
@@ -5240,8 +5461,8 @@ ixl_phy_type_string(u32 bit_pos, bool ext)
"XLPPI",
"40GBASE-CR4",
"10GBASE-CR1",
- "Reserved (12)",
- "Reserved (13)",
+ "SFP+ Active DA",
+ "QSFP+ Active DA",
"Reserved (14)",
"Reserved (15)",
"Reserved (16)",
@@ -5261,14 +5482,18 @@ ixl_phy_type_string(u32 bit_pos, bool ext)
"20GBASE-KR2",
"Reserved (31)"
};
- static char * ext_phy_types_str[4] = {
+ static char * ext_phy_types_str[8] = {
"25GBASE-KR",
"25GBASE-CR",
"25GBASE-SR",
- "25GBASE-LR"
+ "25GBASE-LR",
+ "25GBASE-AOC",
+ "25GBASE-ACC",
+ "Reserved (6)",
+ "Reserved (7)"
};
- if (ext && bit_pos > 3) return "Invalid_Ext";
+ if (ext && bit_pos > 7) return "Invalid_Ext";
if (bit_pos > 31) return "Invalid";
return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
@@ -5330,7 +5555,6 @@ ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
return (error);
}
- /* TODO: Add 25G types */
sbuf_printf(buf, "\n"
"PHY Type : 0x%02x<%s>\n"
"Speed : 0x%02x\n"
@@ -5429,8 +5653,8 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
abilities.phy_id[0], abilities.phy_id[1],
abilities.phy_id[2], abilities.phy_id[3],
abilities.module_type[0], abilities.module_type[1],
- abilities.module_type[2], abilities.phy_type_ext >> 5,
- abilities.phy_type_ext & 0x1F,
+ abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
+ abilities.fec_cfg_curr_mod_ext_info & 0x1F,
abilities.ext_comp_code);
error = sbuf_finish(buf);
@@ -5744,21 +5968,23 @@ ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
+ bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
+
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
- bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
if (status)
device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
} else {
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
- sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
}
}
+ ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
+
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
@@ -5767,6 +5993,52 @@ ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
return (error);
}
+static void
+ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
+{
+ int i, j, k, width;
+ char c;
+
+ if (length < 1 || buf == NULL) return;
+
+ int byte_stride = 16;
+ int lines = length / byte_stride;
+ int rem = length % byte_stride;
+ if (rem > 0)
+ lines++;
+
+ for (i = 0; i < lines; i++) {
+ width = (rem > 0 && i == lines - 1)
+ ? rem : byte_stride;
+
+ sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
+
+ for (j = 0; j < width; j++)
+ sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
+
+ if (width < byte_stride) {
+ for (k = 0; k < (byte_stride - width); k++)
+ sbuf_printf(sb, " ");
+ }
+
+ if (!text) {
+ sbuf_printf(sb, "\n");
+ continue;
+ }
+
+ for (j = 0; j < width; j++) {
+ c = (char)buf[i * byte_stride + j];
+ if (c < 32 || c > 126)
+ sbuf_printf(sb, ".");
+ else
+ sbuf_printf(sb, "%c", c);
+
+ if (j == width - 1)
+ sbuf_printf(sb, "\n");
+ }
+ }
+}
+
static int
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
{
@@ -5785,20 +6057,20 @@ ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
+ bzero(hlut, sizeof(hlut));
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
- bzero(hlut, sizeof(hlut));
status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
if (status)
device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- sbuf_printf(buf, "%512D", (u_char *)hlut, "");
} else {
for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
reg = rd32(hw, I40E_PFQF_HLUT(i));
- sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ bcopy(&reg, &hlut[i << 2], 4);
}
}
+ ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
error = sbuf_finish(buf);
if (error)
@@ -5958,7 +6230,7 @@ ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abi
return (EIO);
}
- *is_set = !!(abilities->phy_type_ext & bit_pos);
+ *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
return (0);
}
@@ -5973,10 +6245,10 @@ ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abi
/* Set new PHY config */
memset(&config, 0, sizeof(config));
- config.fec_config = abilities->phy_type_ext & ~(bit_pos);
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
if (set)
config.fec_config |= bit_pos;
- if (config.fec_config != abilities->phy_type_ext) {
+ if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.phy_type = abilities->phy_type;
config.phy_type_ext = abilities->phy_type_ext;
@@ -6005,7 +6277,7 @@ ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6023,7 +6295,7 @@ ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6041,7 +6313,7 @@ ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6059,7 +6331,7 @@ ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6077,7 +6349,7 @@ ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
int mode, error = 0;
struct i40e_aq_get_phy_abilities_resp abilities;
- error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode);
+ error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
if (error)
return (error);
/* Read in new mode */
@@ -6088,3 +6360,192 @@ ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
}
+static int
+ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+ enum i40e_status_code status;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ u8 *final_buff;
+ /* This amount is only necessary if reading the entire cluster into memory */
+#define IXL_FINAL_BUFF_SIZE (1280 * 1024)
+ final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
+ if (final_buff == NULL) {
+ device_printf(dev, "Could not allocate memory for output.\n");
+ goto out;
+ }
+ int final_buff_len = 0;
+
+ u8 cluster_id = 1;
+ bool more = true;
+
+ u8 dump_buf[4096];
+ u16 curr_buff_size = 4096;
+ u8 curr_next_table = 0;
+ u32 curr_next_index = 0;
+
+ u16 ret_buff_size;
+ u8 ret_next_table;
+ u32 ret_next_index;
+
+ sbuf_cat(buf, "\n");
+
+ while (more) {
+ status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
+ dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
+ if (status) {
+ device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto free_out;
+ }
+
+ /* copy info out of temp buffer */
+ bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
+ final_buff_len += ret_buff_size;
+
+ if (ret_next_table != curr_next_table) {
+ /* We're done with the current table; we can dump out read data. */
+ sbuf_printf(buf, "%d:", curr_next_table);
+ int bytes_printed = 0;
+ while (bytes_printed <= final_buff_len) {
+ sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
+ bytes_printed += 16;
+ }
+ sbuf_cat(buf, "\n");
+
+ /* The entire cluster has been read; we're finished */
+ if (ret_next_table == 0xFF)
+ break;
+
+ /* Otherwise clear the output buffer and continue reading */
+ bzero(final_buff, IXL_FINAL_BUFF_SIZE);
+ final_buff_len = 0;
+ }
+
+ if (ret_next_index == 0xFFFFFFFF)
+ ret_next_index = 0;
+
+ bzero(dump_buf, sizeof(dump_buf));
+ curr_next_table = ret_next_table;
+ curr_next_index = ret_next_index;
+ }
+
+free_out:
+ free(final_buff, M_DEVBUF);
+out:
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+ int state, new_state;
+ enum i40e_status_code status;
+ state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
+
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &new_state, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ /* Already in requested state */
+ if (new_state == state)
+ return (error);
+
+ if (new_state == 0) {
+ if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
+ device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
+ return (EINVAL);
+ }
+
+ if (pf->hw.aq.api_maj_ver < 1 ||
+ (pf->hw.aq.api_maj_ver == 1 &&
+ pf->hw.aq.api_min_ver < 7)) {
+ device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
+ return (EINVAL);
+ }
+
+ i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+ atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else {
+ status = i40e_aq_start_lldp(&pf->hw, NULL);
+ if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
+ device_printf(dev, "FW LLDP agent is already running\n");
+ atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ }
+
+ return (0);
+}
+
+/*
+ * Get FW LLDP Agent status
+ */
+int
+ixl_get_fw_lldp_status(struct ixl_pf *pf)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_variables lldp_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u8 adminstatus = 0;
+
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xf;
+
+ /* Check if LLDP agent is disabled */
+ if (!adminstatus) {
+ device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
+ atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else
+ atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+
+ return (0);
+}
+
+int
+ixl_attach_get_link_status(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return error;
+ }
+ }
+
+ /* Determine link state */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ return (0);
+}
diff --git a/sys/dev/ixl/ixl_pf_qmgr.c b/sys/dev/ixl/ixl_pf_qmgr.c
index f2842e584dc4..fade11cae918 100644
--- a/sys/dev/ixl/ixl_pf_qmgr.c
+++ b/sys/dev/ixl/ixl_pf_qmgr.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/ixl_pf_qmgr.h b/sys/dev/ixl/ixl_pf_qmgr.h
index d6ad431bd605..0ecda0791931 100644
--- a/sys/dev/ixl/ixl_pf_qmgr.h
+++ b/sys/dev/ixl/ixl_pf_qmgr.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index cda3c528b947..57c9387d99ee 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -57,6 +57,7 @@ static int ixl_xmit(struct ixl_queue *, struct mbuf **);
static int ixl_tx_setup_offload(struct ixl_queue *,
struct mbuf *, u32 *, u32 *);
static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
+static void ixl_queue_sw_irq(struct ixl_vsi *, int);
static inline void ixl_rx_discard(struct rx_ring *, int);
static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
@@ -67,7 +68,9 @@ static inline u32 ixl_get_tx_head(struct ixl_queue *que);
#ifdef DEV_NETMAP
#include <dev/netmap/if_ixl_netmap.h>
+#if __FreeBSD_version >= 1200000
int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1;
+#endif
#endif /* DEV_NETMAP */
/*
@@ -87,6 +90,62 @@ ixl_get_default_rss_key(u32 *key)
bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
}
+/**
+ * i40e_vc_stat_str - convert virtchnl status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *
+i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err)
+{
+ switch (stat_err) {
+ case VIRTCHNL_STATUS_SUCCESS:
+ return "OK";
+ case VIRTCHNL_ERR_PARAM:
+ return "VIRTCHNL_ERR_PARAM";
+ case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+ return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
+ case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+ return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
+ case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+ return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
+ case VIRTCHNL_STATUS_NOT_SUPPORTED:
+ return "VIRTCHNL_STATUS_NOT_SUPPORTED";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/*
+ * PCI BUSMASTER needs to be set for proper operation.
+ */
+void
+ixl_set_busmaster(device_t dev)
+{
+ u16 pci_cmd_word;
+
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+}
+
+/*
+ * Rewrite the ENABLE bit in the MSIX control register
+ */
+void
+ixl_set_msix_enable(device_t dev)
+{
+ int msix_ctrl, rid;
+
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+}
+
+
/*
** Multiqueue Transmit driver
*/
@@ -102,13 +161,13 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
#endif
/*
- ** Which queue to use:
- **
- ** When doing RSS, map it to the same outbound
- ** queue as the incoming flow would be mapped to.
- ** If everything is setup correctly, it should be
- ** the same bucket that the current CPU we're on is.
- */
+ * Which queue to use:
+ *
+ * When doing RSS, map it to the same outbound
+ * queue as the incoming flow would be mapped to.
+ * If everything is setup correctly, it should be
+ * the same bucket that the current CPU we're on is.
+ */
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
#ifdef RSS
if (rss_hash2bucket(m->m_pkthdr.flowid,
@@ -207,11 +266,6 @@ ixl_qflush(struct ifnet *ifp)
if_qflush(ifp);
}
-/*
-** Find mbuf chains passed to the driver
-** that are 'sparse', using more than 8
-** mbufs to deliver an mss-size chunk of data
-*/
static inline bool
ixl_tso_detect_sparse(struct mbuf *mp)
{
@@ -228,9 +282,9 @@ ixl_tso_detect_sparse(struct mbuf *mp)
num++;
mss -= m->m_len % mp->m_pkthdr.tso_segsz;
+ if (num > IXL_SPARSE_CHAIN)
+ return (true);
if (mss < 1) {
- if (num > IXL_SPARSE_CHAIN)
- return (true);
num = (mss == 0) ? 0 : 1;
mss += mp->m_pkthdr.tso_segsz;
}
@@ -369,7 +423,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
last = i; /* descriptor that will get completion IRQ */
- if (++i == que->num_desc)
+ if (++i == que->num_tx_desc)
i = 0;
buf->m_head = NULL;
@@ -382,7 +436,10 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
txr->next_avail = i;
buf->m_head = m_head;
- /* Swap the dma map between the first and last descriptor */
+ /* Swap the dma map between the first and last descriptor.
+ * The descriptor that gets checked on completion will now
+ * have the real map from the first descriptor.
+ */
txr->buffers[first].map = buf->map;
buf->map = map;
bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
@@ -424,7 +481,7 @@ ixl_allocate_tx_data(struct ixl_queue *que)
struct ixl_vsi *vsi = que->vsi;
device_t dev = vsi->dev;
struct ixl_tx_buf *buf;
- int error = 0;
+ int i, error = 0;
/*
* Setup DMA descriptor areas.
@@ -436,13 +493,13 @@ ixl_allocate_tx_data(struct ixl_queue *que)
NULL, NULL, /* filter, filterarg */
IXL_TSO_SIZE, /* maxsize */
IXL_MAX_TX_SEGS, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
+ IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txr->tx_tag))) {
device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
+ return (error);
}
/* Make a special tag for TSO */
@@ -453,34 +510,51 @@ ixl_allocate_tx_data(struct ixl_queue *que)
NULL, NULL, /* filter, filterarg */
IXL_TSO_SIZE, /* maxsize */
IXL_MAX_TSO_SEGS, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
+ IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txr->tso_tag))) {
device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
- goto fail;
+ goto free_tx_dma;
}
if (!(txr->buffers =
(struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
- que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ que->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
error = ENOMEM;
- goto fail;
+ goto free_tx_tso_dma;
}
/* Create the descriptor buffer default dma maps */
buf = txr->buffers;
- for (int i = 0; i < que->num_desc; i++, buf++) {
+ for (i = 0; i < que->num_tx_desc; i++, buf++) {
buf->tag = txr->tx_tag;
error = bus_dmamap_create(buf->tag, 0, &buf->map);
if (error != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
+ goto free_buffers;
}
}
-fail:
+
+ return 0;
+
+free_buffers:
+ while (i--) {
+ buf--;
+ bus_dmamap_destroy(buf->tag, buf->map);
+ }
+
+ free(txr->buffers, M_DEVBUF);
+ txr->buffers = NULL;
+free_tx_tso_dma:
+ bus_dma_tag_destroy(txr->tso_tag);
+ txr->tso_tag = NULL;
+free_tx_dma:
+ bus_dma_tag_destroy(txr->tx_tag);
+ txr->tx_tag = NULL;
+
return (error);
}
@@ -514,7 +588,7 @@ ixl_init_tx_ring(struct ixl_queue *que)
#endif /* DEV_NETMAP */
bzero((void *)txr->base,
- (sizeof(struct i40e_tx_desc)) * que->num_desc);
+ (sizeof(struct i40e_tx_desc)) * que->num_tx_desc);
/* Reset indices */
txr->next_avail = 0;
@@ -523,14 +597,9 @@ ixl_init_tx_ring(struct ixl_queue *que)
/* Reset watchdog status */
txr->watchdog_timer = 0;
-#ifdef IXL_FDIR
- /* Initialize flow director */
- txr->atr_rate = ixl_atr_rate;
- txr->atr_count = 0;
-#endif
/* Free any existing tx mbufs. */
buf = txr->buffers;
- for (int i = 0; i < que->num_desc; i++, buf++) {
+ for (int i = 0; i < que->num_tx_desc; i++, buf++) {
if (buf->m_head != NULL) {
bus_dmamap_sync(buf->tag, buf->map,
BUS_DMASYNC_POSTWRITE);
@@ -556,7 +625,7 @@ ixl_init_tx_ring(struct ixl_queue *que)
}
/* Set number of descriptors available */
- txr->avail = que->num_desc;
+ txr->avail = que->num_tx_desc;
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -577,30 +646,17 @@ ixl_free_que_tx(struct ixl_queue *que)
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
- for (int i = 0; i < que->num_desc; i++) {
+ for (int i = 0; i < que->num_tx_desc; i++) {
buf = &txr->buffers[i];
if (buf->m_head != NULL) {
bus_dmamap_sync(buf->tag, buf->map,
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(buf->tag,
- buf->map);
m_freem(buf->m_head);
buf->m_head = NULL;
- if (buf->map != NULL) {
- bus_dmamap_destroy(buf->tag,
- buf->map);
- buf->map = NULL;
}
- } else if (buf->map != NULL) {
- bus_dmamap_unload(buf->tag,
- buf->map);
- bus_dmamap_destroy(buf->tag,
- buf->map);
- buf->map = NULL;
- }
+ bus_dmamap_unload(buf->tag, buf->map);
+ bus_dmamap_destroy(buf->tag, buf->map);
}
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
if (txr->buffers != NULL) {
free(txr->buffers, M_DEVBUF);
txr->buffers = NULL;
@@ -702,9 +758,6 @@ ixl_tx_setup_offload(struct ixl_queue *que,
*off |= (tcp_hlen >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
-#ifdef IXL_FDIR
- ixl_atr(que, th, etype);
-#endif
break;
case IPPROTO_UDP:
if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
@@ -713,7 +766,6 @@ ixl_tx_setup_offload(struct ixl_queue *que,
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
break;
-
case IPPROTO_SCTP:
if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
@@ -833,7 +885,7 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
buf->m_head = NULL;
buf->eop_index = -1;
- if (++idx == que->num_desc)
+ if (++idx == que->num_tx_desc)
idx = 0;
txr->avail--;
@@ -842,34 +894,32 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
return TRUE;
}
-/*
-** ixl_get_tx_head - Retrieve the value from the
-** location the HW records its HEAD index
-*/
+/*
+ * ixl_get_tx_head - Retrieve the value from the
+ * location the HW records its HEAD index
+ */
static inline u32
ixl_get_tx_head(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
- void *head = &txr->base[que->num_desc];
+ void *head = &txr->base[que->num_tx_desc];
return LE32_TO_CPU(*(volatile __le32 *)head);
}
/**********************************************************************
*
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
+ * Get index of last used descriptor/buffer from hardware, and clean
+ * the descriptors/buffers up to that index.
*
**********************************************************************/
-bool
-ixl_txeof(struct ixl_queue *que)
+static bool
+ixl_txeof_hwb(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
- u32 first, last, head, done, processed;
+ u32 first, last, head, done;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *tx_desc, *eop_desc;
-
mtx_assert(&txr->mtx, MA_OWNED);
#ifdef DEV_NETMAP
@@ -879,12 +929,11 @@ ixl_txeof(struct ixl_queue *que)
#endif /* DEF_NETMAP */
/* These are not the descriptors you seek, move along :) */
- if (txr->avail == que->num_desc) {
+ if (txr->avail == que->num_tx_desc) {
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
- processed = 0;
first = txr->next_to_clean;
buf = &txr->buffers[first];
tx_desc = (struct i40e_tx_desc *)&txr->base[first];
@@ -893,6 +942,10 @@ ixl_txeof(struct ixl_queue *que)
return FALSE;
eop_desc = (struct i40e_tx_desc *)&txr->base[last];
+ /* Sync DMA before reading head index from ring */
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_POSTREAD);
+
/* Get the Head WB value */
head = ixl_get_tx_head(que);
@@ -902,11 +955,9 @@ ixl_txeof(struct ixl_queue *que)
** I do this so the comparison in the
** inner while loop below can be simple
*/
- if (++last == que->num_desc) last = 0;
+ if (++last == que->num_tx_desc) last = 0;
done = last;
- bus_dmamap_sync(txr->dma.tag, txr->dma.map,
- BUS_DMASYNC_POSTREAD);
/*
** The HEAD index of the ring is written in a
** defined location, this rather than a done bit
@@ -917,7 +968,6 @@ ixl_txeof(struct ixl_queue *que)
/* We clean the range of the packet */
while (first != done) {
++txr->avail;
- ++processed;
if (buf->m_head) {
txr->bytes += /* for ITR adjustment */
@@ -934,19 +984,21 @@ ixl_txeof(struct ixl_queue *que)
}
buf->eop_index = -1;
- if (++first == que->num_desc)
+ if (++first == que->num_tx_desc)
first = 0;
buf = &txr->buffers[first];
tx_desc = &txr->base[first];
}
++txr->packets;
+ /* If a packet was successfully cleaned, reset the watchdog timer */
+ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
/* See if there is more work now */
last = buf->eop_index;
if (last != -1) {
eop_desc = &txr->base[last];
/* Get next done point */
- if (++last == que->num_desc) last = 0;
+ if (++last == que->num_tx_desc) last = 0;
done = last;
} else
break;
@@ -956,11 +1008,10 @@ ixl_txeof(struct ixl_queue *que)
txr->next_to_clean = first;
-
/*
* If there are no pending descriptors, clear the timeout.
*/
- if (txr->avail == que->num_desc) {
+ if (txr->avail == que->num_tx_desc) {
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
@@ -968,6 +1019,142 @@ ixl_txeof(struct ixl_queue *que)
return TRUE;
}
+/**********************************************************************
+ *
+ * Use index kept by driver and the flag on each descriptor to find used
+ * descriptor/buffers and clean them up for re-use.
+ *
+ * This method of reclaiming descriptors is current incompatible with
+ * DEV_NETMAP.
+ *
+ * Returns TRUE if there are more descriptors to be cleaned after this
+ * function exits.
+ *
+ **********************************************************************/
+static bool
+ixl_txeof_dwb(struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ u32 first, last, done;
+ u32 limit = 256;
+ struct ixl_tx_buf *buf;
+ struct i40e_tx_desc *tx_desc, *eop_desc;
+
+ mtx_assert(&txr->mtx, MA_OWNED);
+
+ /* There are no descriptors to clean */
+ if (txr->avail == que->num_tx_desc) {
+ atomic_store_rel_32(&txr->watchdog_timer, 0);
+ return FALSE;
+ }
+
+ /* Set starting index/descriptor/buffer */
+ first = txr->next_to_clean;
+ buf = &txr->buffers[first];
+ tx_desc = &txr->base[first];
+
+ /*
+ * This function operates per-packet -- identifies the start of the
+ * packet and gets the index of the last descriptor of the packet from
+ * it, from eop_index.
+ *
+ * If the last descriptor is marked "done" by the hardware, then all
+ * of the descriptors for the packet are cleaned.
+ */
+ last = buf->eop_index;
+ if (last == -1)
+ return FALSE;
+ eop_desc = &txr->base[last];
+
+ /* Sync DMA before reading from ring */
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD);
+
+ /*
+ * Get the index of the first descriptor beyond the EOP and call that
+ * 'done'. Simplifies the comparison for the inner loop below.
+ */
+ if (++last == que->num_tx_desc)
+ last = 0;
+ done = last;
+
+ /*
+ * We find the last completed descriptor by examining each
+ * descriptor's status bits to see if it's done.
+ */
+ do {
+ /* Break if last descriptor in packet isn't marked done */
+ if ((eop_desc->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK)
+ != I40E_TX_DESC_DTYPE_DESC_DONE)
+ break;
+
+ /* Clean the descriptors that make up the processed packet */
+ while (first != done) {
+ /*
+ * If there was a buffer attached to this descriptor,
+ * prevent the adapter from accessing it, and add its
+ * length to the queue's TX stats.
+ */
+ if (buf->m_head) {
+ txr->bytes += buf->m_head->m_pkthdr.len;
+ txr->tx_bytes += buf->m_head->m_pkthdr.len;
+ bus_dmamap_sync(buf->tag, buf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(buf->tag, buf->map);
+ m_freem(buf->m_head);
+ buf->m_head = NULL;
+ }
+ buf->eop_index = -1;
+ ++txr->avail;
+
+ if (++first == que->num_tx_desc)
+ first = 0;
+ buf = &txr->buffers[first];
+ tx_desc = &txr->base[first];
+ }
+ ++txr->packets;
+ /* If a packet was successfully cleaned, reset the watchdog timer */
+ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
+
+ /*
+ * Since buf is the first buffer after the one that was just
+ * cleaned, check if the packet it starts is done, too.
+ */
+ last = buf->eop_index;
+ if (last != -1) {
+ eop_desc = &txr->base[last];
+ /* Get next done point */
+ if (++last == que->num_tx_desc) last = 0;
+ done = last;
+ } else
+ break;
+ } while (--limit);
+
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ txr->next_to_clean = first;
+
+ /*
+ * If there are no pending descriptors, clear the watchdog timer.
+ */
+ if (txr->avail == que->num_tx_desc) {
+ atomic_store_rel_32(&txr->watchdog_timer, 0);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+bool
+ixl_txeof(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+
+ return (vsi->enable_head_writeback) ? ixl_txeof_hwb(que)
+ : ixl_txeof_dwb(que);
+}
+
+
/*********************************************************************
*
* Refresh mbuf buffers for RX descriptor rings
@@ -991,7 +1178,7 @@ ixl_refresh_mbufs(struct ixl_queue *que, int limit)
i = j = rxr->next_refresh;
/* Control the loop with one beyond */
- if (++j == que->num_desc)
+ if (++j == que->num_rx_desc)
j = 0;
while (j != limit) {
@@ -1057,7 +1244,7 @@ no_split:
/* Next is precalculated */
i = j;
rxr->next_refresh = i;
- if (++j == que->num_desc)
+ if (++j == que->num_rx_desc)
j = 0;
}
update:
@@ -1084,15 +1271,6 @@ ixl_allocate_rx_data(struct ixl_queue *que)
struct ixl_rx_buf *buf;
int i, bsize, error;
- bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
- if (!(rxr->buffers =
- (struct ixl_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- return (error);
- }
-
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@@ -1122,25 +1300,50 @@ ixl_allocate_rx_data(struct ixl_queue *que)
NULL, /* lockfuncarg */
&rxr->ptag))) {
device_printf(dev, "Unable to create RX DMA ptag\n");
- return (error);
+ goto free_rx_htag;
}
- for (i = 0; i < que->num_desc; i++) {
+ bsize = sizeof(struct ixl_rx_buf) * que->num_rx_desc;
+ if (!(rxr->buffers =
+ (struct ixl_rx_buf *) malloc(bsize,
+ M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate rx_buffer memory\n");
+ error = ENOMEM;
+ goto free_rx_ptag;
+ }
+
+ for (i = 0; i < que->num_rx_desc; i++) {
buf = &rxr->buffers[i];
error = bus_dmamap_create(rxr->htag,
BUS_DMA_NOWAIT, &buf->hmap);
if (error) {
device_printf(dev, "Unable to create RX head map\n");
- break;
+ goto free_buffers;
}
error = bus_dmamap_create(rxr->ptag,
BUS_DMA_NOWAIT, &buf->pmap);
if (error) {
+ bus_dmamap_destroy(rxr->htag, buf->hmap);
device_printf(dev, "Unable to create RX pkt map\n");
- break;
+ goto free_buffers;
}
}
+ return 0;
+free_buffers:
+ while (i--) {
+ buf = &rxr->buffers[i];
+ bus_dmamap_destroy(rxr->ptag, buf->pmap);
+ bus_dmamap_destroy(rxr->htag, buf->hmap);
+ }
+ free(rxr->buffers, M_DEVBUF);
+ rxr->buffers = NULL;
+free_rx_ptag:
+ bus_dma_tag_destroy(rxr->ptag);
+ rxr->ptag = NULL;
+free_rx_htag:
+ bus_dma_tag_destroy(rxr->htag);
+ rxr->htag = NULL;
return (error);
}
@@ -1173,11 +1376,11 @@ ixl_init_rx_ring(struct ixl_queue *que)
slot = netmap_reset(na, NR_RX, que->me, 0);
#endif /* DEV_NETMAP */
/* Clear the ring contents */
- rsize = roundup2(que->num_desc *
+ rsize = roundup2(que->num_rx_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
bzero((void *)rxr->base, rsize);
/* Cleanup any existing buffers */
- for (int i = 0; i < que->num_desc; i++) {
+ for (int i = 0; i < que->num_rx_desc; i++) {
buf = &rxr->buffers[i];
if (buf->m_head != NULL) {
bus_dmamap_sync(rxr->htag, buf->hmap,
@@ -1201,7 +1404,7 @@ ixl_init_rx_ring(struct ixl_queue *que)
rxr->hdr_split = FALSE;
/* Now replenish the mbufs */
- for (int j = 0; j != que->num_desc; ++j) {
+ for (int j = 0; j != que->num_rx_desc; ++j) {
struct mbuf *mh, *mp;
buf = &rxr->buffers[j];
@@ -1286,7 +1489,7 @@ skip_head:
rxr->bytes = 0;
rxr->discard = FALSE;
- wr32(vsi->hw, rxr->tail, que->num_desc - 1);
+ wr32(vsi->hw, rxr->tail, que->num_rx_desc - 1);
ixl_flush(vsi->hw);
#if defined(INET6) || defined(INET)
@@ -1325,41 +1528,19 @@ ixl_free_que_rx(struct ixl_queue *que)
struct rx_ring *rxr = &que->rxr;
struct ixl_rx_buf *buf;
- INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
-
/* Cleanup any existing buffers */
if (rxr->buffers != NULL) {
- for (int i = 0; i < que->num_desc; i++) {
+ for (int i = 0; i < que->num_rx_desc; i++) {
buf = &rxr->buffers[i];
- if (buf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, buf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, buf->hmap);
- buf->m_head->m_flags |= M_PKTHDR;
- m_freem(buf->m_head);
- }
- if (buf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, buf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, buf->pmap);
- buf->m_pack->m_flags |= M_PKTHDR;
- m_freem(buf->m_pack);
- }
- buf->m_head = NULL;
- buf->m_pack = NULL;
- if (buf->hmap != NULL) {
- bus_dmamap_destroy(rxr->htag, buf->hmap);
- buf->hmap = NULL;
- }
- if (buf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, buf->pmap);
- buf->pmap = NULL;
- }
- }
- if (rxr->buffers != NULL) {
- free(rxr->buffers, M_DEVBUF);
- rxr->buffers = NULL;
+
+ /* Free buffers and unload dma maps */
+ ixl_rx_discard(rxr, i);
+
+ bus_dmamap_destroy(rxr->htag, buf->hmap);
+ bus_dmamap_destroy(rxr->ptag, buf->pmap);
}
+ free(rxr->buffers, M_DEVBUF);
+ rxr->buffers = NULL;
}
if (rxr->htag != NULL) {
@@ -1370,9 +1551,6 @@ ixl_free_que_rx(struct ixl_queue *que)
bus_dma_tag_destroy(rxr->ptag);
rxr->ptag = NULL;
}
-
- INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
- return;
}
static inline void
@@ -1409,32 +1587,35 @@ ixl_rx_discard(struct rx_ring *rxr, int i)
{
struct ixl_rx_buf *rbuf;
+ KASSERT(rxr != NULL, ("Receive ring pointer cannot be null"));
+ KASSERT(i < rxr->que->num_rx_desc, ("Descriptor index must be less than que->num_desc"));
+
rbuf = &rxr->buffers[i];
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
+ /* Free the mbufs in the current chain for the packet */
+ if (rbuf->fmp != NULL) {
+ bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
m_freem(rbuf->fmp);
rbuf->fmp = NULL;
}
/*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
+ * Free the mbufs for the current descriptor; and let ixl_refresh_mbufs()
+ * assign new mbufs to these.
+ */
if (rbuf->m_head) {
+ bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->htag, rbuf->hmap);
m_free(rbuf->m_head);
rbuf->m_head = NULL;
}
if (rbuf->m_pack) {
+ bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, rbuf->pmap);
m_free(rbuf->m_pack);
rbuf->m_pack = NULL;
}
-
- return;
}
#ifdef RSS
@@ -1505,7 +1686,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
union i40e_rx_desc *cur;
struct ixl_rx_buf *rbuf, *nbuf;
-
IXL_RX_LOCK(rxr);
#ifdef DEV_NETMAP
@@ -1586,7 +1766,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
/* Prefetch the next buffer */
if (!eop) {
nextp = i + 1;
- if (nextp == que->num_desc)
+ if (nextp == que->num_rx_desc)
nextp = 0;
nbuf = &rxr->buffers[nextp];
prefetch(nbuf);
@@ -1708,7 +1888,7 @@ next_desc:
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Advance our pointers to the next descriptor. */
- if (++i == que->num_desc)
+ if (++i == que->num_rx_desc)
i = 0;
/* Now send to the stack or do LRO */
@@ -1717,10 +1897,14 @@ next_desc:
IXL_RX_UNLOCK(rxr);
ixl_rx_input(rxr, ifp, sendmp, ptype);
IXL_RX_LOCK(rxr);
+ /*
+ * Update index used in loop in case another
+ * ixl_rxeof() call executes when lock is released
+ */
i = rxr->next_check;
}
- /* Every 8 descriptors we go to refresh mbufs */
+ /* Every 8 descriptors we go to refresh mbufs */
if (processed == 8) {
ixl_refresh_mbufs(que, i);
processed = 0;
@@ -1837,3 +2021,119 @@ ixl_get_counter(if_t ifp, ift_counter cnt)
}
#endif
+/*
+ * Set TX and RX ring size adjusting value to supported range
+ */
+void
+ixl_vsi_setup_rings_size(struct ixl_vsi * vsi, int tx_ring_size, int rx_ring_size)
+{
+ struct device * dev = vsi->dev;
+
+ if (tx_ring_size < IXL_MIN_RING
+ || tx_ring_size > IXL_MAX_RING
+ || tx_ring_size % IXL_RING_INCREMENT != 0) {
+ device_printf(dev, "Invalid tx_ring_size value of %d set!\n",
+ tx_ring_size);
+ device_printf(dev, "tx_ring_size must be between %d and %d, "
+ "inclusive, and must be a multiple of %d\n",
+ IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
+ device_printf(dev, "Using default value of %d instead\n",
+ IXL_DEFAULT_RING);
+ vsi->num_tx_desc = IXL_DEFAULT_RING;
+ } else
+ vsi->num_tx_desc = tx_ring_size;
+
+ if (rx_ring_size < IXL_MIN_RING
+ || rx_ring_size > IXL_MAX_RING
+ || rx_ring_size % IXL_RING_INCREMENT != 0) {
+ device_printf(dev, "Invalid rx_ring_size value of %d set!\n",
+ rx_ring_size);
+ device_printf(dev, "rx_ring_size must be between %d and %d, "
+ "inclusive, and must be a multiple of %d\n",
+ IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
+ device_printf(dev, "Using default value of %d instead\n",
+ IXL_DEFAULT_RING);
+ vsi->num_rx_desc = IXL_DEFAULT_RING;
+ } else
+ vsi->num_rx_desc = rx_ring_size;
+
+ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
+ vsi->num_tx_desc, vsi->num_rx_desc);
+
+}
+
+static void
+ixl_queue_sw_irq(struct ixl_vsi *vsi, int qidx)
+{
+ struct i40e_hw *hw = vsi->hw;
+ u32 reg, mask;
+
+ if ((vsi->flags & IXL_FLAGS_IS_VF) != 0) {
+ mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+
+ reg = I40E_VFINT_DYN_CTLN1(qidx);
+ } else {
+ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
+
+ reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ?
+ I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0;
+ }
+
+ wr32(hw, reg, mask);
+}
+
+int
+ixl_queue_hang_check(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ struct tx_ring *txr;
+ s32 timer, new_timer;
+ int hung = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ txr = &que->txr;
+ /*
+ * If watchdog_timer is equal to defualt value set by ixl_txeof
+ * just substract hz and move on - the queue is most probably
+ * running. Otherwise check the value.
+ */
+ if (atomic_cmpset_rel_32(&txr->watchdog_timer,
+ IXL_WATCHDOG, (IXL_WATCHDOG) - hz) == 0) {
+ timer = atomic_load_acq_32(&txr->watchdog_timer);
+ /*
+ * Again - if the timer was reset to default value
+ * then queue is running. Otherwise check if watchdog
+ * expired and act accrdingly.
+ */
+
+ if (timer > 0 && timer != IXL_WATCHDOG) {
+ new_timer = timer - hz;
+ if (new_timer <= 0) {
+ atomic_store_rel_32(&txr->watchdog_timer, -1);
+ device_printf(dev, "WARNING: queue %d "
+ "appears to be hung!\n", que->me);
+ ++hung;
+ /* Try to unblock the queue with SW IRQ */
+ ixl_queue_sw_irq(vsi, i);
+ } else {
+ /*
+ * If this fails, that means something in the TX path
+ * has updated the watchdog, so it means the TX path
+ * is still working and the watchdog doesn't need
+ * to countdown.
+ */
+ atomic_cmpset_rel_32(&txr->watchdog_timer,
+ timer, new_timer);
+ }
+ }
+ }
+ }
+
+ return (hung);
+}
+
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index aa5fe5f2ecf6..741d677219f6 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -136,8 +136,8 @@ struct ixlv_sc {
int pf_version;
int if_flags;
- bool link_up;
- u32 link_speed;
+ bool link_up;
+ enum virtchnl_link_speed link_speed;
struct mtx mtx;
@@ -176,8 +176,8 @@ struct ixlv_sc {
struct ixl_vc_cmd config_rss_lut_cmd;
/* Virtual comm channel */
- struct i40e_virtchnl_vf_resource *vf_res;
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vf_resource *vf_res;
+ struct virtchnl_vsi_resource *vsi_res;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@@ -222,7 +222,8 @@ void ixlv_del_ether_filters(struct ixlv_sc *);
void ixlv_request_stats(struct ixlv_sc *);
void ixlv_request_reset(struct ixlv_sc *);
void ixlv_vc_completion(struct ixlv_sc *,
- enum i40e_virtchnl_ops, i40e_status, u8 *, u16);
+ enum virtchnl_ops, enum virtchnl_status_code,
+ u8 *, u16);
void ixlv_add_ether_filter(struct ixlv_sc *);
void ixlv_add_vlans(struct ixlv_sc *);
void ixlv_del_vlans(struct ixlv_sc *);
diff --git a/sys/dev/ixl/ixlv_vc_mgr.h b/sys/dev/ixl/ixlv_vc_mgr.h
index 8c17de54c5ee..5cfc082aa9e3 100644
--- a/sys/dev/ixl/ixlv_vc_mgr.h
+++ b/sys/dev/ixl/ixlv_vc_mgr.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index 9c6b869a13ab..3e1ef91c7fde 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2015, Intel Corporation
+ Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
#define IXLV_BUSY_WAIT_COUNT 50
static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
- enum i40e_status_code);
+ enum virtchnl_status_code);
static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
@@ -65,81 +65,81 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
/* Validate message length. */
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- valid_len = sizeof(struct i40e_virtchnl_version_info);
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
break;
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
valid_len = 0;
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
/* Valid length in api v1.0 is 0, v1.1 is 4 */
valid_len = 4;
break;
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_txq_info);
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
- struct i40e_virtchnl_vsi_queue_config_info *vqc =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
- i40e_virtchnl_queue_pair_info));
+ virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
- struct i40e_virtchnl_irq_map_info *vimi =
- (struct i40e_virtchnl_irq_map_info *)msg;
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
+ sizeof(struct virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = true;
}
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
- struct i40e_virtchnl_ether_addr_list *veal =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
- sizeof(struct i40e_virtchnl_ether_addr);
+ sizeof(struct virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
- struct i40e_virtchnl_vlan_filter_list *vfl =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = true;
}
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
- valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
break;
/* These are always errors coming from the VF. */
- case I40E_VIRTCHNL_OP_EVENT:
- case I40E_VIRTCHNL_OP_UNKNOWN:
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
default:
return EPERM;
break;
@@ -159,7 +159,7 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
*/
static int
ixlv_send_pf_msg(struct ixlv_sc *sc,
- enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+ enum virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
@@ -197,12 +197,12 @@ ixlv_send_pf_msg(struct ixlv_sc *sc,
int
ixlv_send_api_ver(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_version_info vvi;
+ struct virtchnl_version_info vvi;
- vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
- vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ vvi.major = VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = VIRTCHNL_VERSION_MINOR;
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
+ return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
(u8 *)&vvi, sizeof(vvi));
}
@@ -216,7 +216,7 @@ ixlv_send_api_ver(struct ixlv_sc *sc)
int
ixlv_verify_api_ver(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_version_info *pf_vvi;
+ struct virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
device_t dev = sc->dev;
@@ -244,8 +244,8 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out_alloc;
}
- if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_VERSION) {
+ if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ VIRTCHNL_OP_VERSION) {
DDPRINTF(dev, "Received unexpected op response: %d\n",
le32toh(event.desc.cookie_high));
/* Don't stop looking for expected response */
@@ -260,10 +260,10 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
break;
}
- pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
- if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) {
+ pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
device_printf(dev, "Critical PF/VF API version mismatch!\n");
err = EIO;
} else
@@ -272,7 +272,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
/* Log PF/VF api versions */
device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
pf_vvi->major, pf_vvi->minor,
- I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
out_alloc:
free(event.msg_buf, M_DEVBUF);
@@ -292,15 +292,15 @@ ixlv_send_vf_config_msg(struct ixlv_sc *sc)
{
u32 caps;
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
- if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ if (sc->pf_version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
+ return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
else
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
(u8 *)&caps, sizeof(caps));
}
@@ -323,8 +323,8 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
u32 retries = 0;
/* Note this assumes a single VSI */
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
@@ -337,8 +337,8 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
i40e_msec_pause(10);
- } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ VIRTCHNL_OP_GET_VF_RESOURCES) {
DDPRINTF(dev, "Received a response from PF,"
" opcode %d, error %d",
le32toh(event.desc.cookie_high),
@@ -391,12 +391,12 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct rx_ring *rxr;
int len, pairs;
- struct i40e_virtchnl_vsi_queue_config_info *vqci;
- struct i40e_virtchnl_queue_pair_info *vqpi;
+ struct virtchnl_vsi_queue_config_info *vqci;
+ struct virtchnl_queue_pair_info *vqpi;
pairs = vsi->num_queues;
- len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
- (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ len = sizeof(struct virtchnl_vsi_queue_config_info) +
+ (sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!vqci) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
@@ -414,23 +414,25 @@ ixlv_configure_queues(struct ixlv_sc *sc)
rxr = &que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
- vqpi->txq.ring_len = que->num_desc;
+ vqpi->txq.ring_len = que->num_tx_desc;
vqpi->txq.dma_ring_addr = txr->dma.pa;
/* Enable Head writeback */
- vqpi->txq.headwb_enabled = 1;
- vqpi->txq.dma_headwb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
+ if (vsi->enable_head_writeback) {
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = txr->dma.pa +
+ (que->num_tx_desc * sizeof(struct i40e_tx_desc));
+ }
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
- vqpi->rxq.ring_len = que->num_desc;
+ vqpi->rxq.ring_len = que->num_rx_desc;
vqpi->rxq.dma_ring_addr = rxr->dma.pa;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi->rxq.splithdr_enabled = 0;
}
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
free(vqci, M_DEVBUF);
}
@@ -443,12 +445,12 @@ ixlv_configure_queues(struct ixlv_sc *sc)
void
ixlv_enable_queues(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -460,12 +462,12 @@ ixlv_enable_queues(struct ixlv_sc *sc)
void
ixlv_disable_queues(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -478,7 +480,7 @@ ixlv_disable_queues(struct ixlv_sc *sc)
void
ixlv_map_queues(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_irq_map_info *vm;
+ struct virtchnl_irq_map_info *vm;
int i, q, len;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
@@ -486,8 +488,8 @@ ixlv_map_queues(struct ixlv_sc *sc)
/* How many queue vectors, adminq uses one */
q = sc->msix - 1;
- len = sizeof(struct i40e_virtchnl_irq_map_info) +
- (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
+ len = sizeof(struct virtchnl_irq_map_info) +
+ (sc->msix * sizeof(struct virtchnl_vector_map));
vm = malloc(len, M_DEVBUF, M_NOWAIT);
if (!vm) {
printf("%s: unable to allocate memory\n", __func__);
@@ -514,7 +516,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 0;
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vm, len);
free(vm, M_DEVBUF);
}
@@ -527,7 +529,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
void
ixlv_add_vlans(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_vlan_filter_list *v;
+ struct virtchnl_vlan_filter_list *v;
struct ixlv_vlan_filter *f, *ftmp;
device_t dev = sc->dev;
int len, i = 0, cnt = 0;
@@ -540,11 +542,11 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (!cnt) { /* no work... */
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
- I40E_SUCCESS);
+ VIRTCHNL_STATUS_SUCCESS);
return;
}
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(cnt * sizeof(u16));
if (len > IXL_AQ_BUF_SZ) {
@@ -576,7 +578,7 @@ ixlv_add_vlans(struct ixlv_sc *sc)
break;
}
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
}
@@ -590,7 +592,7 @@ void
ixlv_del_vlans(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
- struct i40e_virtchnl_vlan_filter_list *v;
+ struct virtchnl_vlan_filter_list *v;
struct ixlv_vlan_filter *f, *ftmp;
int len, i = 0, cnt = 0;
@@ -602,11 +604,11 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (!cnt) { /* no work... */
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
- I40E_SUCCESS);
+ VIRTCHNL_STATUS_SUCCESS);
return;
}
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(cnt * sizeof(u16));
if (len > IXL_AQ_BUF_SZ) {
@@ -639,7 +641,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
break;
}
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
}
@@ -653,7 +655,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
void
ixlv_add_ether_filters(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_ether_addr_list *a;
+ struct virtchnl_ether_addr_list *a;
struct ixlv_mac_filter *f;
device_t dev = sc->dev;
int len, j = 0, cnt = 0;
@@ -666,12 +668,12 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
if (cnt == 0) { /* Should not happen... */
DDPRINTF(dev, "cnt == 0, exiting...");
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
- I40E_SUCCESS);
+ VIRTCHNL_STATUS_SUCCESS);
return;
}
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (cnt * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (cnt * sizeof(struct virtchnl_ether_addr));
a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (a == NULL) {
@@ -699,7 +701,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
DDPRINTF(dev, "len %d, j %d, cnt %d",
len, j, cnt);
ixlv_send_pf_msg(sc,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
+ VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
/* add stats? */
free(a, M_DEVBUF);
return;
@@ -713,7 +715,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
void
ixlv_del_ether_filters(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_ether_addr_list *d;
+ struct virtchnl_ether_addr_list *d;
device_t dev = sc->dev;
struct ixlv_mac_filter *f, *f_temp;
int len, j = 0, cnt = 0;
@@ -726,12 +728,12 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
if (cnt == 0) {
DDPRINTF(dev, "cnt == 0, exiting...");
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
- I40E_SUCCESS);
+ VIRTCHNL_STATUS_SUCCESS);
return;
}
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (cnt * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (cnt * sizeof(struct virtchnl_ether_addr));
d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (d == NULL) {
@@ -757,7 +759,7 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
break;
}
ixlv_send_pf_msg(sc,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
+ VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
/* add stats? */
free(d, M_DEVBUF);
return;
@@ -775,8 +777,8 @@ ixlv_request_reset(struct ixlv_sc *sc)
** the request, this avoids any possibility of
** a mistaken early detection of completion.
*/
- wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
}
/*
@@ -786,12 +788,12 @@ ixlv_request_reset(struct ixlv_sc *sc)
void
ixlv_request_stats(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
int error = 0;
vqs.vsi_id = sc->vsi_res->vsi_id;
/* Low priority, we don't need to error check */
- error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
+ error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs));
#ifdef IXL_DEBUG
if (error)
@@ -836,7 +838,7 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
void
ixlv_config_rss_key(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_rss_key *rss_key_msg;
+ struct virtchnl_rss_key *rss_key_msg;
int msg_len, key_length;
u8 rss_seed[IXL_RSS_KEY_SIZE];
@@ -849,7 +851,7 @@ ixlv_config_rss_key(struct ixlv_sc *sc)
/* Send the fetched key */
key_length = IXL_RSS_KEY_SIZE;
- msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
+ msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rss_key_msg == NULL) {
device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
@@ -863,7 +865,7 @@ ixlv_config_rss_key(struct ixlv_sc *sc)
DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
rss_key_msg->vsi_id, rss_key_msg->key_len);
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)rss_key_msg, msg_len);
free(rss_key_msg, M_DEVBUF);
@@ -872,25 +874,25 @@ ixlv_config_rss_key(struct ixlv_sc *sc)
void
ixlv_set_rss_hena(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_rss_hena hena;
+ struct virtchnl_rss_hena hena;
hena.hena = IXL_DEFAULT_RSS_HENA_X722;
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
}
void
ixlv_config_rss_lut(struct ixlv_sc *sc)
{
- struct i40e_virtchnl_rss_lut *rss_lut_msg;
+ struct virtchnl_rss_lut *rss_lut_msg;
int msg_len;
u16 lut_length;
u32 lut;
int i, que_id;
lut_length = IXL_RSS_VSI_LUT_SIZE;
- msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
+ msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rss_lut_msg == NULL) {
device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
@@ -918,7 +920,7 @@ ixlv_config_rss_lut(struct ixlv_sc *sc)
rss_lut_msg->lut[i] = lut;
}
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)rss_lut_msg, msg_len);
free(rss_lut_msg, M_DEVBUF);
@@ -933,18 +935,18 @@ ixlv_config_rss_lut(struct ixlv_sc *sc)
*/
void
ixlv_vc_completion(struct ixlv_sc *sc,
- enum i40e_virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg, u16 msglen)
+ enum virtchnl_ops v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
- if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)msg;
+ if (v_opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)msg;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
#ifdef IXL_DEBUG
device_printf(dev, "Link change: status %d, speed %d\n",
vpe->event_data.link_event.link_status,
@@ -956,7 +958,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
vpe->event_data.link_event.link_speed;
ixlv_update_link_status(sc);
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
sc->init_state = IXLV_RESET_PENDING;
mtx_unlock(&sc->mtx);
@@ -976,19 +978,19 @@ ixlv_vc_completion(struct ixlv_sc *sc,
if (v_retval) {
device_printf(dev,
"%s: AQ returned error %s to our request %s!\n",
- __func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
+ __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
}
#ifdef IXL_DEBUG
- if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
+ if (v_opcode != VIRTCHNL_OP_GET_STATS)
DDPRINTF(dev, "opcode %d", v_opcode);
#endif
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
v_retval);
if (v_retval) {
@@ -996,23 +998,23 @@ ixlv_vc_completion(struct ixlv_sc *sc,
device_printf(dev, "WARNING: Device may not receive traffic!\n");
}
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
v_retval);
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
v_retval);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
v_retval);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
v_retval);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
v_retval);
if (v_retval == 0) {
@@ -1025,7 +1027,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* TODO: Clear a state flag, so we know we're ready to run init again */
}
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
v_retval);
if (v_retval == 0) {
@@ -1035,23 +1037,23 @@ ixlv_vc_completion(struct ixlv_sc *sc,
vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
v_retval);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
v_retval);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
v_retval);
break;
- case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ case VIRTCHNL_OP_SET_RSS_HENA:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
v_retval);
break;
- case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
v_retval);
break;
@@ -1141,7 +1143,7 @@ ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
static void
ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
- enum i40e_status_code err)
+ enum virtchnl_status_code err)
{
struct ixl_vc_cmd *cmd;
@@ -1150,7 +1152,8 @@ ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
return;
callout_stop(&mgr->callout);
- ixl_vc_process_completion(mgr, err);
+ /* ATM, the virtchnl codes map to i40e ones directly */
+ ixl_vc_process_completion(mgr, (enum i40e_status_code)err);
}
static void
diff --git a/sys/dev/ixl/virtchnl.h b/sys/dev/ixl/virtchnl.h
new file mode 100644
index 000000000000..917f9d7d319e
--- /dev/null
+++ b/sys/dev/ixl/virtchnl.h
@@ -0,0 +1,747 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support; otherwise it will respond with the
+ * number requested.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = FALSE;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = TRUE;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = TRUE;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = TRUE;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/sys/modules/ixl/Makefile b/sys/modules/ixl/Makefile
index a5d6f50dbed7..f69846d5f314 100644
--- a/sys/modules/ixl/Makefile
+++ b/sys/modules/ixl/Makefile
@@ -10,7 +10,7 @@ SRCS += ixl_iw.c
SRCS.PCI_IOV= pci_iov_if.h ixl_pf_iov.c
# Shared source
-SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
+SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e_dcb.c
# Debug messages / sysctls
# CFLAGS += -DIXL_DEBUG