aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ixl
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ixl')
-rw-r--r--sys/dev/ixl/i40e_adminq.c37
-rw-r--r--sys/dev/ixl/i40e_adminq.h1
-rw-r--r--sys/dev/ixl/i40e_adminq_cmd.h192
-rw-r--r--sys/dev/ixl/i40e_common.c507
-rw-r--r--sys/dev/ixl/i40e_devids.h12
-rw-r--r--sys/dev/ixl/i40e_nvm.c118
-rw-r--r--sys/dev/ixl/i40e_osdep.c29
-rw-r--r--sys/dev/ixl/i40e_osdep.h15
-rw-r--r--sys/dev/ixl/i40e_prototype.h43
-rw-r--r--sys/dev/ixl/i40e_register.h1962
-rw-r--r--sys/dev/ixl/i40e_type.h164
-rw-r--r--sys/dev/ixl/i40e_virtchnl.h45
-rw-r--r--sys/dev/ixl/if_ixl.c7148
-rw-r--r--sys/dev/ixl/if_ixlv.c309
-rw-r--r--sys/dev/ixl/ixl.h160
-rw-r--r--sys/dev/ixl/ixl_pf.h208
-rw-r--r--sys/dev/ixl/ixl_pf_iov.c1925
-rw-r--r--sys/dev/ixl/ixl_pf_iov.h62
-rw-r--r--sys/dev/ixl/ixl_pf_main.c5556
-rw-r--r--sys/dev/ixl/ixl_pf_qmgr.c308
-rw-r--r--sys/dev/ixl/ixl_pf_qmgr.h109
-rw-r--r--sys/dev/ixl/ixl_txrx.c64
-rw-r--r--sys/dev/ixl/ixlv.h33
-rw-r--r--sys/dev/ixl/ixlvc.c150
24 files changed, 11697 insertions, 7460 deletions
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index 792f832ac70f..cfffc1f01909 100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -39,16 +39,6 @@
#include "i40e_prototype.h"
/**
- * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase)) ||
- (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
-}
-
-/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -661,13 +651,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = FALSE;
+ hw->nvm_release_on_done = FALSE;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
ret_code = I40E_SUCCESS;
/* success! */
@@ -1081,26 +1067,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = FALSE;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
diff --git a/sys/dev/ixl/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h
index 6448b8be2cf0..a0279273bd94 100644
--- a/sys/dev/ixl/i40e_adminq.h
+++ b/sys/dev/ixl/i40e_adminq.h
@@ -105,7 +105,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 7c0ace2e6b75..764ce11fb772 100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -140,6 +140,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -147,6 +151,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -185,6 +193,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -212,7 +221,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
-
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
@@ -271,6 +279,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -433,6 +445,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -457,13 +470,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -478,17 +493,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -538,6 +555,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -561,6 +579,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -643,6 +711,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -694,6 +764,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -862,8 +933,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1597,15 +1672,12 @@ struct i40e_aq_get_set_hmc_resource_profile {
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
I40E_HMC_PROFILE_DEFAULT = 1,
I40E_HMC_PROFILE_FAVOR_VF = 2,
I40E_HMC_PROFILE_EQUAL = 3,
};
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1641,6 +1713,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1649,6 +1725,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1656,7 +1733,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1689,7 +1767,13 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 mod_type_ext;
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1711,7 +1795,12 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1791,16 +1880,24 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
@@ -1857,7 +1954,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -2296,6 +2396,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index 5a8a8aec73ff..79229752ca29 100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -64,8 +64,24 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_X722_A0:
+ case I40E_DEV_ID_KX_X722:
+ case I40E_DEV_ID_QSFP_X722:
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ case I40E_DEV_ID_X722_A0_VF:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -341,14 +357,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
/* the most we could have left is 16 bytes, pad with zeros */
if (i < len) {
char d_buf[16];
- int j;
+ int j, i_sav;
+ i_sav = i;
memset(d_buf, 0, sizeof(d_buf));
for (j = 0; i < len; j++, i++)
d_buf[j] = buf[i];
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
d_buf[4], d_buf[5], d_buf[6], d_buf[7],
d_buf[8], d_buf[9], d_buf[10], d_buf[11],
d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
@@ -400,6 +417,164 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set TRUE to set the table, FALSE to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set TRUE to set the key, FALSE to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE);
+}
+
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@@ -563,7 +738,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -813,6 +988,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+ case I40E_MAC_X722:
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -832,6 +1008,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -1104,8 +1283,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- /* It can take upto 15 secs for GRST steady state */
- grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
+ grst_del = grst_del * 20;
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -1452,8 +1630,10 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
@@ -1997,15 +2177,45 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
}
/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(0);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -2018,8 +2228,9 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -2192,7 +2403,7 @@ enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
+
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
cmd->seid = CPU_TO_LE16(seid);
@@ -2826,10 +3037,7 @@ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -3026,67 +3234,6 @@ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_get_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * query the HMC profile of the device.
- **/
-enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile *profile,
- u8 *pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *resp =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_query_hmc_resource_profile);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
- I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
- *pe_vf_enabled_count = resp->pe_vf_enabled &
- I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
-
- return status;
-}
-
-/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3603,6 +3750,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
"HW Capability: wr_csr_prot = 0x%llX\n\n",
(p->wr_csr_prot & 0xffff));
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = TRUE;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = TRUE;
+ break;
+ case I40E_AQ_CAP_ID_WOL_AND_PROXY:
+ hw->num_wol_proxy_filters = (u16)number;
+ hw->wol_proxy_vsi_seid = (u16)logical_id;
+ p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+ if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+ else
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+ p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+ p->proxy_support = p->proxy_support;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: WOL proxy filters = %d\n",
+ hw->num_wol_proxy_filters);
+ break;
default:
break;
}
@@ -5211,6 +5378,35 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
}
/**
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+static void i40e_fix_up_geneve_vni(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ int i;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(f[i].flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(f[i].tenant_id);
+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+}
+
+/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
@@ -5230,8 +5426,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- u16 buff_len;
enum i40e_status_code status;
+ u16 buff_len;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -5242,6 +5438,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@@ -5279,6 +5477,8 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@@ -6263,3 +6463,158 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config - pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!proxy_config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+
+ status = i40e_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct i40e_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_ns_proxy_table_entry);
+
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+
+ status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct i40e_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: TRUE to set filter, FALSE to clear filter
+ * @no_wol_tco: if TRUE, pass through packets cannot cause wake-up
+ * if FALSE, pass through packets may cause wake-up
+ * @filter_valid: TRUE if filter action is valid
+ * @no_wol_tco_valid: TRUE if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_wol_filter *cmd =
+ (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+ if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+ return I40E_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return I40E_ERR_PARAM;
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+ buff_len = sizeof(*filter);
+ }
+ if (no_wol_tco)
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
+
+ status = i40e_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_wake_reason_completion *resp =
+ (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
diff --git a/sys/dev/ixl/i40e_devids.h b/sys/dev/ixl/i40e_devids.h
index a898f6d53d49..5725cb96754c 100644
--- a/sys/dev/ixl/i40e_devids.h
+++ b/sys/dev/ixl/i40e_devids.h
@@ -50,8 +50,20 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_X722_A0 0x374C
+#define I40E_DEV_ID_X722_A0_VF 0x374D
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
index 4556d1734f81..151691ec97e2 100644
--- a/sys/dev/ixl/i40e_nvm.c
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -220,7 +220,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ }
return ret_code;
}
@@ -238,7 +246,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ else
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
return ret_code;
}
@@ -330,7 +341,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
+ else
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
return ret_code;
}
@@ -350,7 +364,16 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
return ret_code;
}
@@ -834,10 +857,10 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -851,7 +874,18 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return I40E_SUCCESS;
}
@@ -870,6 +904,14 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return I40E_SUCCESS;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -942,7 +984,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -958,7 +1001,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -971,10 +1015,12 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -992,7 +1038,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -1087,8 +1134,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -1100,7 +1149,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1115,6 +1165,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -1129,7 +1180,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1179,6 +1231,38 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = FALSE;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1340,6 +1424,12 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index 9b77627942e7..2a771515a9ac 100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -32,7 +32,7 @@
******************************************************************************/
/*$FreeBSD$*/
-#include <machine/stdarg.h>
+#include <sys/limits.h>
#include "ixl.h"
@@ -137,7 +137,7 @@ void
i40e_init_spinlock(struct i40e_spinlock *lock)
{
mtx_init(&lock->mutex, "mutex",
- MTX_NETWORK_LOCK, MTX_DEF | MTX_DUPOK);
+ "ixl spinlock", MTX_DEF | MTX_DUPOK);
}
void
@@ -159,11 +159,34 @@ i40e_destroy_spinlock(struct i40e_spinlock *lock)
mtx_destroy(&lock->mutex);
}
+void
+i40e_msec_pause(int msecs)
+{
+ int ticks_to_pause = (msecs * hz) / 1000;
+ int start_ticks = ticks;
+
+ if (cold || SCHEDULER_STOPPED()) {
+ i40e_msec_delay(msecs);
+ return;
+ }
+
+ while (1) {
+ kern_yield(PRI_USER);
+ int yielded_ticks = ticks - start_ticks;
+ if (yielded_ticks > ticks_to_pause)
+ break;
+ else if (yielded_ticks < 0
+ && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
+ break;
+ }
+ }
+}
+
/*
* Helper function for debug statement printing
*/
void
-i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
+i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
{
va_list args;
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index f5ca7f461d30..184f125988da 100644
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -55,7 +55,7 @@
#include <dev/pci/pcireg.h>
#define i40e_usec_delay(x) DELAY(x)
-#define i40e_msec_delay(x) DELAY(1000*(x))
+#define i40e_msec_delay(x) DELAY(1000 * (x))
#define DBG 0
#define MSGOUT(S, A, B) printf(S "\n", A, B)
@@ -75,12 +75,13 @@
#define DEBUGOUT7(S,A,B,C,D,E,F,G)
#endif
-#define UNREFERENCED_XPARAMETER
+/* Remove unused shared code macros */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
#define STATIC static
#define INLINE inline
@@ -110,9 +111,6 @@
#define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y))
-#define BIT(a) (1UL << (a))
-#define BIT_ULL(a) (1ULL << (a))
-
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
@@ -180,10 +178,13 @@ void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
** i40e_debug - OS dependent version of shared code debug printing
*/
enum i40e_debug_mask;
-#define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__)
-extern void i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask,
+#define i40e_debug(h, m, s, ...) i40e_debug_shared(h, m, s, ##__VA_ARGS__)
+extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask,
char *fmt_str, ...);
+/* Non-busy-wait that uses kern_yield() */
+void i40e_msec_pause(int);
+
/*
** This hardware supports either 16 or 32 byte rx descriptors;
** the driver only uses the 32 byte kind.
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index c53945fa30c2..01d11d6335b2 100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -77,6 +77,17 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
@@ -107,6 +118,8 @@ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
@@ -149,7 +162,8 @@ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
@@ -329,10 +343,6 @@ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
u8 tcmap, bool request, u8 *tcmap_ret,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile *profile,
- u8 *pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
@@ -343,10 +353,6 @@ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -386,7 +392,6 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
-
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -448,6 +453,7 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@@ -496,9 +502,26 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index 9fbfc7b826c2..fb41ea23a3d0 100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -3398,4 +3398,1966 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
#endif /* _I40E_REGISTER_H_ */
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index 57351cb0bb04..fa8c7192e99f 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -43,7 +43,6 @@
#include "i40e_lan_hmc.h"
#include "i40e_devids.h"
-#define UNREFERENCED_XPARAMETER
#define BIT(a) (1UL << (a))
#define BIT_ULL(a) (1ULL << (a))
@@ -147,8 +146,10 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE 0
-#define I40E_MDIO_OPCODE_ADDRESS 0
+#define I40E_MDIO_STCODE I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
@@ -177,6 +178,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -193,6 +195,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -275,50 +279,61 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- u32 phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+enum i40e_acpi_programming_method {
+ I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK 1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
+#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -348,6 +363,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -377,6 +397,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
+ bool apm_wol_support;
+ enum i40e_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
};
struct i40e_mac_info {
@@ -622,6 +645,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -634,6 +659,10 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
@@ -644,7 +673,8 @@ struct i40e_hw {
static INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -748,7 +778,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -756,7 +786,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@@ -1134,6 +1164,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1170,15 +1202,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1234,6 +1275,10 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1544,4 +1589,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/sys/dev/ixl/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h
index 7939a0dc5732..4ebe578d1972 100644
--- a/sys/dev/ixl/i40e_virtchnl.h
+++ b/sys/dev/ixl/i40e_virtchnl.h
@@ -88,7 +88,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -162,6 +167,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -170,8 +176,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -330,6 +336,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the rss fields in
+ * the vf resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 8e9ba80987ae..a9221d323a26 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -32,23 +32,17 @@
******************************************************************************/
/*$FreeBSD$*/
-#ifndef IXL_STANDALONE_BUILD
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "opt_rss.h"
-#endif
-
#include "ixl.h"
#include "ixl_pf.h"
-#ifdef RSS
-#include <net/rss_config.h>
+#ifdef PCI_IOV
+#include "ixl_pf_iov.h"
#endif
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.4.27-k";
+char ixl_driver_version[] = "1.6.6-k";
/*********************************************************************
* PCI Device ID Table
@@ -70,6 +64,12 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -79,7 +79,7 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
*********************************************************************/
static char *ixl_strings[] = {
- "Intel(R) Ethernet Connection XL710 Driver"
+ "Intel(R) Ethernet Connection XL710/X722 Driver"
};
@@ -90,146 +90,9 @@ static int ixl_probe(device_t);
static int ixl_attach(device_t);
static int ixl_detach(device_t);
static int ixl_shutdown(device_t);
-static int ixl_get_hw_capabilities(struct ixl_pf *);
-static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
-static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixl_init(void *);
-static void ixl_init_locked(struct ixl_pf *);
-static void ixl_stop(struct ixl_pf *);
-static void ixl_stop_locked(struct ixl_pf *);
-static void ixl_media_status(struct ifnet *, struct ifmediareq *);
-static int ixl_media_change(struct ifnet *);
-static void ixl_update_link_status(struct ixl_pf *);
-static int ixl_allocate_pci_resources(struct ixl_pf *);
-static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
-static int ixl_setup_stations(struct ixl_pf *);
-static int ixl_switch_config(struct ixl_pf *);
-static int ixl_initialize_vsi(struct ixl_vsi *);
-
-static int ixl_setup_adminq_msix(struct ixl_pf *);
-static int ixl_setup_adminq_tq(struct ixl_pf *);
-static int ixl_setup_queue_msix(struct ixl_vsi *);
-static int ixl_setup_queue_tqs(struct ixl_vsi *);
-static int ixl_teardown_adminq_msix(struct ixl_pf *);
-static int ixl_teardown_queue_msix(struct ixl_vsi *);
-static void ixl_configure_intr0_msix(struct ixl_pf *);
-static void ixl_configure_queue_intr_msix(struct ixl_pf *);
-static void ixl_free_queue_tqs(struct ixl_vsi *);
-static void ixl_free_adminq_tq(struct ixl_pf *);
-
-static int ixl_assign_vsi_legacy(struct ixl_pf *);
-static int ixl_init_msix(struct ixl_pf *);
-static void ixl_configure_itr(struct ixl_pf *);
-static void ixl_configure_legacy(struct ixl_pf *);
-static void ixl_free_pci_resources(struct ixl_pf *);
-static void ixl_local_timer(void *);
-static int ixl_setup_interface(device_t, struct ixl_vsi *);
-static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
-static void ixl_config_rss(struct ixl_vsi *);
-static void ixl_set_queue_rx_itr(struct ixl_queue *);
-static void ixl_set_queue_tx_itr(struct ixl_queue *);
-static int ixl_set_advertised_speeds(struct ixl_pf *, int);
-static void ixl_get_initial_advertised_speeds(struct ixl_pf *);
-
-static int ixl_enable_rings(struct ixl_vsi *);
-static int ixl_disable_rings(struct ixl_vsi *);
-static void ixl_enable_intr(struct ixl_vsi *);
-static void ixl_disable_intr(struct ixl_vsi *);
-static void ixl_disable_rings_intr(struct ixl_vsi *);
-
-static void ixl_enable_adminq(struct i40e_hw *);
-static void ixl_disable_adminq(struct i40e_hw *);
-static void ixl_enable_queue(struct i40e_hw *, int);
-static void ixl_disable_queue(struct i40e_hw *, int);
-static void ixl_enable_legacy(struct i40e_hw *);
-static void ixl_disable_legacy(struct i40e_hw *);
-
-static void ixl_set_promisc(struct ixl_vsi *);
-static void ixl_add_multi(struct ixl_vsi *);
-static void ixl_del_multi(struct ixl_vsi *);
-static void ixl_register_vlan(void *, struct ifnet *, u16);
-static void ixl_unregister_vlan(void *, struct ifnet *, u16);
-static void ixl_setup_vlan_filters(struct ixl_vsi *);
-
-static void ixl_init_filters(struct ixl_vsi *);
-static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
-static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
-static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
-static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
-static void ixl_del_hw_filters(struct ixl_vsi *, int);
-static struct ixl_mac_filter *
- ixl_find_filter(struct ixl_vsi *, u8 *, s16);
-static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
-static void ixl_free_mac_filters(struct ixl_vsi *vsi);
-/* Sysctls*/
-static void ixl_add_device_sysctls(struct ixl_pf *);
-
-static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
-static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
-
-#ifdef IXL_DEBUG_SYSCTL
-static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
-static void ixl_print_debug_info(struct ixl_pf *);
-
-static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
-#endif
-
-/* The MSI/X Interrupt handlers */
-static void ixl_intr(void *);
-static void ixl_msix_que(void *);
-static void ixl_msix_adminq(void *);
-static void ixl_handle_mdd_event(struct ixl_pf *);
-
-/* Deferred interrupt tasklets */
-static void ixl_do_adminq(void *, int);
-
-/* Statistics */
-static void ixl_add_hw_stats(struct ixl_pf *);
-static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
- struct sysctl_oid_list *, struct i40e_hw_port_stats *);
-static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
- struct sysctl_oid_list *,
- struct i40e_eth_stats *);
-static void ixl_update_stats_counters(struct ixl_pf *);
-static void ixl_update_eth_stats(struct ixl_vsi *);
-static void ixl_update_vsi_stats(struct ixl_vsi *);
-static void ixl_pf_reset_stats(struct ixl_pf *);
-static void ixl_vsi_reset_stats(struct ixl_vsi *);
-static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
- u64 *, u64 *);
-static void ixl_stat_update32(struct i40e_hw *, u32, bool,
- u64 *, u64 *);
-/* NVM update */
-static int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
-static void ixl_handle_empr_reset(struct ixl_pf *);
-static int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
-
-/* Debug helper functions */
-#ifdef IXL_DEBUG
-static void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
-#endif
-
-#ifdef PCI_IOV
-static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
-
-static int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
-static void ixl_iov_uninit(device_t dev);
-static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
-
-static void ixl_handle_vf_msg(struct ixl_pf *,
- struct i40e_arq_event_info *);
-static void ixl_handle_vflr(void *arg, int pending);
-
-static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
-static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
-#endif
+static int ixl_save_pf_tunables(struct ixl_pf *);
+static int ixl_attach_get_link_status(struct ixl_pf *);
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -263,11 +126,6 @@ MODULE_DEPEND(ixl, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
/*
-** Global reset mutex
-*/
-static struct mtx ixl_reset_mtx;
-
-/*
** TUNEABLE PARAMETERS:
*/
@@ -287,73 +145,72 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
** Number of descriptors per ring:
** - TX and RX are the same size
*/
-static int ixl_ringsz = DEFAULT_RING;
-TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
+static int ixl_ring_size = DEFAULT_RING;
+TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
- &ixl_ringsz, 0, "Descriptor Ring Size");
+ &ixl_ring_size, 0, "Descriptor Ring Size");
/*
** This can be set manually, if left as 0 the
** number of queues will be calculated based
** on cpus and msix vectors available.
*/
-int ixl_max_queues = 0;
+static int ixl_max_queues = 0;
TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
&ixl_max_queues, 0, "Number of Queues");
+static int ixl_enable_tx_fc_filter = 1;
+TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
+ &ixl_enable_tx_fc_filter);
+SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
+ &ixl_enable_tx_fc_filter, 0,
+ "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
+
+static int ixl_core_debug_mask = 0;
+TUNABLE_INT("hw.ixl.core_debug_mask",
+ &ixl_core_debug_mask);
+SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
+ &ixl_core_debug_mask, 0,
+ "Display debug statements that are printed in non-shared code");
+
+static int ixl_shared_debug_mask = 0;
+TUNABLE_INT("hw.ixl.shared_debug_mask",
+ &ixl_shared_debug_mask);
+SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
+ &ixl_shared_debug_mask, 0,
+ "Display debug statements that are printed in shared code");
+
/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
** - default values for static ITR
*/
-int ixl_dynamic_rx_itr = 1;
+static int ixl_dynamic_rx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
-int ixl_dynamic_tx_itr = 1;
+static int ixl_dynamic_tx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
-int ixl_rx_itr = IXL_ITR_8K;
+static int ixl_rx_itr = IXL_ITR_8K;
TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
&ixl_rx_itr, 0, "RX Interrupt Rate");
-int ixl_tx_itr = IXL_ITR_4K;
+static int ixl_tx_itr = IXL_ITR_4K;
TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixl_tx_itr, 0, "TX Interrupt Rate");
-#ifdef IXL_FDIR
-static int ixl_enable_fdir = 1;
-TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
-/* Rate at which we sample */
-int ixl_atr_rate = 20;
-TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
-#endif
-
#ifdef DEV_NETMAP
#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
-static char *ixl_fc_string[6] = {
- "None",
- "Rx",
- "Tx",
- "Full",
- "Priority",
- "Default"
-};
-
-static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
-
-static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
/*********************************************************************
* Device identification routine
*
@@ -371,7 +228,6 @@ ixl_probe(device_t dev)
u16 pci_vendor_id, pci_device_id;
u16 pci_subvendor_id, pci_subdevice_id;
char device_name[256];
- static bool lock_init = FALSE;
#if 0
INIT_DEBUGOUT("ixl_probe: begin");
@@ -398,13 +254,6 @@ ixl_probe(device_t dev)
ixl_strings[ent->index],
ixl_driver_version);
device_set_desc_copy(dev, device_name);
- /* One shot mutex init */
- if (lock_init == FALSE) {
- lock_init = TRUE;
- mtx_init(&ixl_reset_mtx,
- "ixl_reset",
- "IXL RESET Lock", MTX_DEF);
- }
return (BUS_PROBE_DEFAULT);
}
ent++;
@@ -412,6 +261,64 @@ ixl_probe(device_t dev)
return (ENXIO);
}
+static int
+ixl_attach_get_link_status(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return error;
+ }
+ }
+
+ /* Determine link state */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ return (0);
+}
+
+/*
+ * Sanity check and save off tunable values.
+ */
+static int
+ixl_save_pf_tunables(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+
+ /* Save tunable information */
+ pf->enable_msix = ixl_enable_msix;
+ pf->max_queues = ixl_max_queues;
+ pf->ringsz = ixl_ring_size;
+ pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
+ pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
+ pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
+ pf->tx_itr = ixl_tx_itr;
+ pf->rx_itr = ixl_rx_itr;
+ pf->dbg_mask = ixl_core_debug_mask;
+ pf->hw.debug_mask = ixl_shared_debug_mask;
+
+ if (ixl_ring_size < IXL_MIN_RING
+ || ixl_ring_size > IXL_MAX_RING
+ || ixl_ring_size % IXL_RING_INCREMENT != 0) {
+ device_printf(dev, "Invalid ring_size value of %d set!\n",
+ ixl_ring_size);
+ device_printf(dev, "ring_size must be between %d and %d, "
+ "inclusive, and must be a multiple of %d\n",
+ IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
/*********************************************************************
* Device initialization routine
*
@@ -428,12 +335,8 @@ ixl_attach(device_t dev)
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
- u16 bus;
+ enum i40e_status_code status;
int error = 0;
-#ifdef PCI_IOV
- nvlist_t *pf_schema, *vf_schema;
- int iov_error;
-#endif
INIT_DEBUGOUT("ixl_attach: begin");
@@ -449,26 +352,17 @@ ixl_attach(device_t dev)
vsi = &pf->vsi;
vsi->dev = pf->dev;
+ /* Save tunable values */
+ error = ixl_save_pf_tunables(pf);
+ if (error)
+ return (error);
+
/* Core Lock Init*/
IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
/* Set up the timer callout */
callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
- /* Save off the PCI information */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
- hw->subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- hw->subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- hw->bus.device = pci_get_slot(dev);
- hw->bus.func = pci_get_function(dev);
-
- pf->vc_debug_lvl = 1;
-
/* Do PCI setup - map BAR0, etc */
if (ixl_allocate_pci_resources(pf)) {
device_printf(dev, "Allocation of PCI resources failed\n");
@@ -478,42 +372,45 @@ ixl_attach(device_t dev)
/* Establish a clean starting point */
i40e_clear_hw(hw);
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "PF reset failure %d\n", error);
+ status = i40e_pf_reset(hw);
+ if (status) {
+ device_printf(dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, status));
error = EIO;
goto err_out;
}
- /* Set admin queue parameters */
- hw->aq.num_arq_entries = IXL_AQ_LEN;
- hw->aq.num_asq_entries = IXL_AQ_LEN;
- hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
- hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
-
- /* Initialize mac filter list for VSI */
- SLIST_INIT(&vsi->ftl);
-
/* Initialize the shared code */
- error = i40e_init_shared_code(hw);
- if (error) {
- device_printf(dev, "Unable to initialize shared code, error %d\n",
- error);
+ status = i40e_init_shared_code(hw);
+ if (status) {
+ device_printf(dev, "Unable to initialize shared code, error %s\n",
+ i40e_stat_str(hw, status));
error = EIO;
goto err_out;
}
+ /*
+ * Allocate interrupts and figure out number of queues to use
+ * for PF interface
+ */
+ pf->msix = ixl_init_msix(pf);
+
/* Set up the admin queue */
- error = i40e_init_adminq(hw);
- if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
- device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
- error);
+ hw->aq.num_arq_entries = IXL_AQ_LEN;
+ hw->aq.num_asq_entries = IXL_AQ_LEN;
+ hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
+
+ status = i40e_init_adminq(hw);
+ if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
+ device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
+ i40e_stat_str(hw, status));
error = EIO;
goto err_out;
}
ixl_print_nvm_version(pf);
- if (error == I40E_ERR_FIRMWARE_API_VERSION) {
+ if (status == I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "The driver for the device stopped "
"because the NVM image is newer than expected.\n"
"You must install the most recent version of "
@@ -544,24 +441,44 @@ ixl_attach(device_t dev)
}
/* Set up host memory cache */
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ if (status) {
+ device_printf(dev, "init_lan_hmc failed: %s\n",
+ i40e_stat_str(hw, status));
goto err_get_cap;
}
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (status) {
+ device_printf(dev, "configure_lan_hmc failed: %s\n",
+ i40e_stat_str(hw, status));
+ goto err_mac_hmc;
+ }
+
+ /* Init queue allocation manager */
+ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
+ if (error) {
+ device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
+ error);
+ goto err_mac_hmc;
+ }
+ /* reserve a contiguous allocation for the PF's VSI */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
if (error) {
- device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
goto err_mac_hmc;
}
+ device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
+ pf->qtag.num_allocated, pf->qtag.num_active);
/* Disable LLDP from the firmware for certain NVM versions */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4))
i40e_aq_stop_lldp(hw, TRUE, NULL);
+ /* Get MAC addresses from hardware */
i40e_get_mac_addr(hw, hw->mac.addr);
error = i40e_validate_mac_addr(hw->mac.addr);
if (error) {
@@ -571,35 +488,29 @@ ixl_attach(device_t dev)
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
- /* Set up VSI and queues */
- if (ixl_setup_stations(pf) != 0) {
+ /* Initialize mac filter list for VSI */
+ SLIST_INIT(&vsi->ftl);
+
+ /* Set up SW VSI and allocate queue memory and rings */
+ if (ixl_setup_stations(pf)) {
device_printf(dev, "setup stations failed!\n");
error = ENOMEM;
goto err_mac_hmc;
}
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
- goto err_late;
- }
- }
-
- /* Determine link state */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
/* Setup OS network interface / ifnet */
- if (ixl_setup_interface(dev, vsi) != 0) {
+ if (ixl_setup_interface(dev, vsi)) {
device_printf(dev, "interface setup failed!\n");
error = EIO;
goto err_late;
}
+ /* Determine link state */
+ if (ixl_attach_get_link_status(pf)) {
+ error = EINVAL;
+ goto err_late;
+ }
+
error = ixl_switch_config(pf);
if (error) {
device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
@@ -608,17 +519,17 @@ ixl_attach(device_t dev)
}
/* Limit PHY interrupts to link, autoneg, and modules failure */
- error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
+ status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
NULL);
- if (error) {
- device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
- " aq_err %d\n", error, hw->aq.asq_last_status);
+ if (status) {
+ device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
+ " aq_err %s\n", i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto err_late;
}
/* Get the bus configuration and set the shared code's config */
- bus = ixl_get_bus_info(hw, dev);
- i40e_set_pci_config_data(hw, bus);
+ ixl_get_bus_info(hw, dev);
/*
* In MSI-X mode, initialize the Admin Queue interrupt,
@@ -656,26 +567,7 @@ ixl_attach(device_t dev)
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
#ifdef PCI_IOV
- /* SR-IOV is only supported when MSI-X is in use. */
- if (pf->msix > 1) {
- pf_schema = pci_iov_schema_alloc_node();
- vf_schema = pci_iov_schema_alloc_node();
- pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
- pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
- IOV_SCHEMA_HASDEFAULT, TRUE);
- pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
- IOV_SCHEMA_HASDEFAULT, FALSE);
- pci_iov_schema_add_bool(vf_schema, "allow-promisc",
- IOV_SCHEMA_HASDEFAULT, FALSE);
-
- iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
- if (iov_error != 0) {
- device_printf(dev,
- "Failed to initialize SR-IOV (error=%d)\n",
- iov_error);
- } else
- device_printf(dev, "SR-IOV ready\n");
- }
+ ixl_initialize_sriov(pf);
#endif
#ifdef DEV_NETMAP
@@ -685,8 +577,10 @@ ixl_attach(device_t dev)
return (0);
err_late:
- if (vsi->ifp != NULL)
+ if (vsi->ifp != NULL) {
+ ether_ifdetach(vsi->ifp);
if_free(vsi->ifp);
+ }
err_mac_hmc:
i40e_shutdown_lan_hmc(hw);
err_get_cap:
@@ -766,6 +660,7 @@ ixl_detach(device_t dev)
#ifdef DEV_NETMAP
netmap_detach(vsi->ifp);
#endif /* DEV_NETMAP */
+ ixl_pf_qmgr_destroy(&pf->qmgr);
ixl_free_pci_resources(pf);
bus_generic_detach(dev);
if_free(vsi->ifp);
@@ -788,6692 +683,3 @@ ixl_shutdown(device_t dev)
return (0);
}
-
-/*********************************************************************
- *
- * Get the hardware capabilities
- *
- **********************************************************************/
-
-static int
-ixl_get_hw_capabilities(struct ixl_pf *pf)
-{
- struct i40e_aqc_list_capabilities_element_resp *buf;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int error, len;
- u16 needed;
- bool again = TRUE;
-
- len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
-retry:
- if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
- malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate cap memory\n");
- return (ENOMEM);
- }
-
- /* This populates the hw struct */
- error = i40e_aq_discover_capabilities(hw, buf, len,
- &needed, i40e_aqc_opc_list_func_capabilities, NULL);
- free(buf, M_DEVBUF);
- if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
- (again == TRUE)) {
- /* retry once with a larger buffer */
- again = FALSE;
- len = needed;
- goto retry;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
- device_printf(dev, "capability discovery failed: %d\n",
- pf->hw.aq.asq_last_status);
- return (ENODEV);
- }
-
- /* Capture this PF's starting queue pair */
- pf->qbase = hw->func_caps.base_queue;
-
-#ifdef IXL_DEBUG
- device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
- "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
- hw->pf_id, hw->func_caps.num_vfs,
- hw->func_caps.num_msix_vectors,
- hw->func_caps.num_msix_vectors_vf,
- hw->func_caps.fd_filters_guaranteed,
- hw->func_caps.fd_filters_best_effort,
- hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- hw->func_caps.base_queue);
-#endif
- return (error);
-}
-
-static void
-ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
-{
- device_t dev = vsi->dev;
-
- /* Enable/disable TXCSUM/TSO4 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- ifp->if_capenable |= IFCAP_TXCSUM;
- /* enable TXCSUM, restore TSO if previously enabled */
- if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable |= IFCAP_TSO4;
- }
- }
- else if (mask & IFCAP_TSO4) {
- ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- device_printf(dev,
- "TSO4 requires txcsum, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM)
- ifp->if_capenable &= ~IFCAP_TXCSUM;
- else if (mask & IFCAP_TSO4)
- ifp->if_capenable |= IFCAP_TSO4;
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && (ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
- device_printf(dev,
- "TSO4 requires txcsum, disabling both...\n");
- } else if (mask & IFCAP_TSO4)
- ifp->if_capenable &= ~IFCAP_TSO4;
- }
-
- /* Enable/disable TXCSUM_IPV6/TSO6 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
- if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable |= IFCAP_TSO6;
- }
- } else if (mask & IFCAP_TSO6) {
- ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- device_printf(dev,
- "TSO6 requires txcsum6, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6)
- ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
- else if (mask & IFCAP_TSO6)
- ifp->if_capenable |= IFCAP_TSO6;
- } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && (ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- device_printf(dev,
- "TSO6 requires txcsum6, disabling both...\n");
- } else if (mask & IFCAP_TSO6)
- ifp->if_capenable &= ~IFCAP_TSO6;
- }
-}
-
-/*********************************************************************
- * Ioctl entry point
- *
- * ixl_ioctl is called when the user wants to configure the
- * interface.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
- struct ifreq *ifr = (struct ifreq *)data;
- struct ifdrv *ifd = (struct ifdrv *)data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixl_init(pf);
-#ifdef INET
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
-#endif
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXL_MAX_FRAME -
- ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
- error = EINVAL;
- } else {
- IXL_PF_LOCK(pf);
- ifp->if_mtu = ifr->ifr_mtu;
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXL_PF_LOCK(pf);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if ((ifp->if_flags ^ pf->if_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) {
- ixl_set_promisc(vsi);
- }
- } else {
- IXL_PF_UNLOCK(pf);
- ixl_init(pf);
- IXL_PF_LOCK(pf);
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_UNLOCK(pf);
- ixl_stop(pf);
- IXL_PF_LOCK(pf);
- }
- }
- pf->if_flags = ifp->if_flags;
- IXL_PF_UNLOCK(pf);
- break;
- case SIOCSDRVSPEC:
- case SIOCGDRVSPEC:
- IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
- "Info)\n");
-
- /* NVM update command */
- if (ifd->ifd_cmd == I40E_NVM_ACCESS)
- error = ixl_handle_nvmupd_cmd(pf, ifd);
- else
- error = EINVAL;
- break;
- case SIOCADDMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
- ixl_add_multi(vsi);
- ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
- ixl_del_multi(vsi);
- ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
-#ifdef IFM_ETH_XTYPE
- case SIOCGIFXMEDIA:
-#endif
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
-
- ixl_cap_txcsum_tso(vsi, ifp, mask);
-
- if (mask & IFCAP_RXCSUM)
- ifp->if_capenable ^= IFCAP_RXCSUM;
- if (mask & IFCAP_RXCSUM_IPV6)
- ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (mask & IFCAP_VLAN_HWFILTER)
- ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
- if (mask & IFCAP_VLAN_HWTSO)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- }
- VLAN_CAPABILITIES(ifp);
-
- break;
- }
-
- default:
- IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-
-/*********************************************************************
- * Init entry point
- *
- * This routine is used in two ways. It is used by the stack as
- * init entry point in network interface structure. It is also used
- * by the driver as a hw/sw initialization routine to get to a
- * consistent state.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static void
-ixl_init_locked(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
- struct i40e_filter_control_settings filter;
- u8 tmpaddr[ETHER_ADDR_LEN];
- int ret;
-
- mtx_assert(&pf->pf_mtx, MA_OWNED);
- INIT_DEBUGOUT("ixl_init_locked: begin");
-
- ixl_stop_locked(pf);
-
- /* Get the latest mac address... User might use a LAA */
- bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
- I40E_ETH_LENGTH_OF_ADDRESS);
- if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
- (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
- ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
- bcopy(tmpaddr, hw->mac.addr,
- I40E_ETH_LENGTH_OF_ADDRESS);
- ret = i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_ONLY,
- hw->mac.addr, NULL);
- if (ret) {
- device_printf(dev, "LLA address"
- "change failed!!\n");
- return;
- }
- }
-
- ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
- if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
-
- /* Set up the device filtering */
- bzero(&filter, sizeof(filter));
- filter.enable_ethtype = TRUE;
- filter.enable_macvlan = TRUE;
-#ifdef IXL_FDIR
- filter.enable_fdir = TRUE;
-#endif
- filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
- if (i40e_set_filter_control(hw, &filter))
- device_printf(dev, "i40e_set_filter_control() failed\n");
-
- /* Set up RSS */
- ixl_config_rss(vsi);
-
- /* Prepare the VSI: rings, hmc contexts, etc... */
- if (ixl_initialize_vsi(vsi)) {
- device_printf(dev, "initialize vsi failed!!\n");
- return;
- }
-
- /* Add protocol filters to list */
- ixl_init_filters(vsi);
-
- /* Setup vlan's if needed */
- ixl_setup_vlan_filters(vsi);
-
- /* Set up MSI/X routing and the ITR settings */
- if (ixl_enable_msix) {
- ixl_configure_queue_intr_msix(pf);
- ixl_configure_itr(pf);
- } else
- ixl_configure_legacy(pf);
-
- ixl_enable_rings(vsi);
-
- i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
-
- ixl_reconfigure_filters(vsi);
-
- /* And now turn on interrupts */
- ixl_enable_intr(vsi);
-
- /* Get link info */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- ixl_update_link_status(pf);
-
- /* Set initial advertised speed sysctl value */
- ixl_get_initial_advertised_speeds(pf);
-
- /* Start the local timer */
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
- return;
-}
-
-/* For the set_advertise sysctl */
-static void
-ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- enum i40e_status_code status;
- struct i40e_aq_get_phy_abilities_resp abilities;
-
- /* Set initial sysctl values */
- status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
- NULL);
- if (status) {
- /* Non-fatal error */
- device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
- __func__, status);
- return;
- }
-
- if (abilities.link_speed & I40E_LINK_SPEED_40GB)
- pf->advertised_speed |= 0x10;
- if (abilities.link_speed & I40E_LINK_SPEED_20GB)
- pf->advertised_speed |= 0x8;
- if (abilities.link_speed & I40E_LINK_SPEED_10GB)
- pf->advertised_speed |= 0x4;
- if (abilities.link_speed & I40E_LINK_SPEED_1GB)
- pf->advertised_speed |= 0x2;
- if (abilities.link_speed & I40E_LINK_SPEED_100MB)
- pf->advertised_speed |= 0x1;
-}
-
-static int
-ixl_teardown_hw_structs(struct ixl_pf *pf)
-{
- enum i40e_status_code status = 0;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
-
- /* Shutdown LAN HMC */
- if (hw->hmc.hmc_obj) {
- status = i40e_shutdown_lan_hmc(hw);
- if (status) {
- device_printf(dev,
- "init: LAN HMC shutdown failure; status %d\n", status);
- goto err_out;
- }
- }
-
- // XXX: This gets called when we know the adminq is inactive;
- // so we already know it's setup when we get here.
-
- /* Shutdown admin queue */
- status = i40e_shutdown_adminq(hw);
- if (status)
- device_printf(dev,
- "init: Admin Queue shutdown failure; status %d\n", status);
-
-err_out:
- return (status);
-}
-
-static int
-ixl_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- u8 set_fc_err_mask;
- int error = 0;
-
- // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
- i40e_clear_hw(hw);
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "init: PF reset failure");
- error = EIO;
- goto err_out;
- }
-
- error = i40e_init_adminq(hw);
- if (error) {
- device_printf(dev, "init: Admin queue init failure;"
- " status code %d", error);
- error = EIO;
- goto err_out;
- }
-
- i40e_clear_pxe_mode(hw);
-
- error = ixl_get_hw_capabilities(pf);
- if (error) {
- device_printf(dev, "init: Error retrieving HW capabilities;"
- " status code %d\n", error);
- goto err_out;
- }
-
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init: LAN HMC init failed; status code %d\n",
- error);
- error = EIO;
- goto err_out;
- }
-
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "init: LAN HMC config failed; status code %d\n",
- error);
- error = EIO;
- goto err_out;
- }
-
- // XXX: possible fix for panic, but our failure recovery is still broken
- error = ixl_switch_config(pf);
- if (error) {
- device_printf(dev, "init: ixl_switch_config() failed: %d\n",
- error);
- goto err_out;
- }
-
- error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
- NULL);
- if (error) {
- device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
- " aq_err %d\n", error, hw->aq.asq_last_status);
- error = EIO;
- goto err_out;
- }
-
- error = i40e_set_fc(hw, &set_fc_err_mask, true);
- if (error) {
- device_printf(dev, "init: setting link flow control failed; retcode %d,"
- " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
- goto err_out;
- }
-
- // XXX: (Rebuild VSIs?)
-
- /* Firmware delay workaround */
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "init: link restart failed, aq_err %d\n",
- hw->aq.asq_last_status);
- goto err_out;
- }
- }
-
-
-err_out:
- return (error);
-}
-
-static void
-ixl_init(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = pf->dev;
- int error = 0;
-
- /*
- * If the aq is dead here, it probably means something outside of the driver
- * did something to the adapter, like a PF reset.
- * So rebuild the driver's state here if that occurs.
- */
- if (!i40e_check_asq_alive(&pf->hw)) {
- device_printf(dev, "Admin Queue is down; resetting...\n");
- IXL_PF_LOCK(pf);
- ixl_teardown_hw_structs(pf);
- ixl_reset(pf);
- IXL_PF_UNLOCK(pf);
- }
-
- /*
- * Set up LAN queue interrupts here.
- * Kernel interrupt setup functions cannot be called while holding a lock,
- * so this is done outside of init_locked().
- */
- if (pf->msix > 1) {
- /* Teardown existing interrupts, if they exist */
- ixl_teardown_queue_msix(vsi);
- ixl_free_queue_tqs(vsi);
- /* Then set them up again */
- error = ixl_setup_queue_msix(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
- error);
- error = ixl_setup_queue_tqs(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
- error);
- } else
- // possibly broken
- error = ixl_assign_vsi_legacy(pf);
- if (error) {
- device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
- return;
- }
-
- IXL_PF_LOCK(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** MSIX Interrupt Handlers and Tasklets
-*/
-static void
-ixl_handle_que(void *context, int pending)
-{
- struct ixl_queue *que = context;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ifnet *ifp = vsi->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixl_rxeof(que, IXL_RX_LIMIT);
- IXL_TX_LOCK(txr);
- ixl_txeof(que);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->task);
- return;
- }
- }
-
- /* Reenable this interrupt - hmmm */
- ixl_enable_queue(hw, que->me);
- return;
-}
-
-
-/*********************************************************************
- *
- * Legacy Interrupt Service routine
- *
- **********************************************************************/
-void
-ixl_intr(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct ifnet *ifp = vsi->ifp;
- struct tx_ring *txr = &que->txr;
- u32 reg, icr0, mask;
- bool more_tx, more_rx;
-
- ++que->irqs;
-
- /* Protect against spurious interrupts */
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
- icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- reg = rd32(hw, I40E_PFINT_DYN_CTL0);
- reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-
- mask = rd32(hw, I40E_PFINT_ICR0_ENA);
-
-#ifdef PCI_IOV
- if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
-#endif
-
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- taskqueue_enqueue(pf->tq, &pf->adminq);
- return;
- }
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
-
- /* re-enable other interrupt causes */
- wr32(hw, I40E_PFINT_ICR0_ENA, mask);
-
- /* And now the queues */
- reg = rd32(hw, I40E_QINT_RQCTL(0));
- reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), reg);
-
- reg = rd32(hw, I40E_QINT_TQCTL(0));
- reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), reg);
-
- ixl_enable_legacy(hw);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * MSIX VSI Interrupt Service routine
- *
- **********************************************************************/
-void
-ixl_msix_que(void *arg)
-{
- struct ixl_queue *que = arg;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- bool more_tx, more_rx;
-
- /* Protect against spurious interrupts */
- if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- ++que->irqs;
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
-
- ixl_set_queue_rx_itr(que);
- ixl_set_queue_tx_itr(que);
-
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->task);
- else
- ixl_enable_queue(hw, que->me);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * MSIX Admin Queue Interrupt Service routine
- *
- **********************************************************************/
-static void
-ixl_msix_adminq(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, mask, rstat_reg;
- bool do_task = FALSE;
-
- ++pf->admin_irq;
-
- reg = rd32(hw, I40E_PFINT_ICR0);
- mask = rd32(hw, I40E_PFINT_ICR0_ENA);
-
- /* Check on the cause */
- if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
- mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
- do_task = TRUE;
- }
-
- if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
- ixl_handle_mdd_event(pf);
- mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
- }
-
- if (reg & I40E_PFINT_ICR0_GRST_MASK) {
- device_printf(pf->dev, "Reset Requested!\n");
- rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
- rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
- >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- device_printf(pf->dev, "Reset type: ");
- switch (rstat_reg) {
- /* These others might be handled similarly to an EMPR reset */
- case I40E_RESET_CORER:
- printf("CORER\n");
- break;
- case I40E_RESET_GLOBR:
- printf("GLOBR\n");
- break;
- case I40E_RESET_EMPR:
- printf("EMPR\n");
- atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
- break;
- default:
- printf("?\n");
- break;
- }
- // overload admin queue task to check reset progress?
- do_task = TRUE;
- }
-
- if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
- device_printf(pf->dev, "ECC Error detected!\n");
- }
-
- if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- device_printf(pf->dev, "HMC Error detected!\n");
- }
-
- if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
- device_printf(pf->dev, "PCI Exception detected!\n");
- }
-
-#ifdef PCI_IOV
- if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
- mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
- }
-#endif
-
- reg = rd32(hw, I40E_PFINT_DYN_CTL0);
- reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-
- if (do_task)
- taskqueue_enqueue(pf->tq, &pf->adminq);
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
-static void
-ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
-
- INIT_DEBUGOUT("ixl_media_status: begin");
- IXL_PF_LOCK(pf);
-
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- ixl_update_link_status(pf);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
-
- if (!pf->link_up) {
- IXL_PF_UNLOCK(pf);
- return;
- }
-
- ifmr->ifm_status |= IFM_ACTIVE;
-
- /* Hardware always does full-duplex */
- ifmr->ifm_active |= IFM_FDX;
-
- switch (hw->phy.link_info.phy_type) {
- /* 100 M */
- case I40E_PHY_TYPE_100BASE_TX:
- ifmr->ifm_active |= IFM_100_TX;
- break;
- /* 1 G */
- case I40E_PHY_TYPE_1000BASE_T:
- ifmr->ifm_active |= IFM_1000_T;
- break;
- case I40E_PHY_TYPE_1000BASE_SX:
- ifmr->ifm_active |= IFM_1000_SX;
- break;
- case I40E_PHY_TYPE_1000BASE_LX:
- ifmr->ifm_active |= IFM_1000_LX;
- break;
- case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- /* 10 G */
- case I40E_PHY_TYPE_10GBASE_SFPP_CU:
- ifmr->ifm_active |= IFM_10G_TWINAX;
- break;
- case I40E_PHY_TYPE_10GBASE_SR:
- ifmr->ifm_active |= IFM_10G_SR;
- break;
- case I40E_PHY_TYPE_10GBASE_LR:
- ifmr->ifm_active |= IFM_10G_LR;
- break;
- case I40E_PHY_TYPE_10GBASE_T:
- ifmr->ifm_active |= IFM_10G_T;
- break;
- case I40E_PHY_TYPE_XAUI:
- case I40E_PHY_TYPE_XFI:
- case I40E_PHY_TYPE_10GBASE_AOC:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- /* 40 G */
- case I40E_PHY_TYPE_40GBASE_CR4:
- case I40E_PHY_TYPE_40GBASE_CR4_CU:
- ifmr->ifm_active |= IFM_40G_CR4;
- break;
- case I40E_PHY_TYPE_40GBASE_SR4:
- ifmr->ifm_active |= IFM_40G_SR4;
- break;
- case I40E_PHY_TYPE_40GBASE_LR4:
- ifmr->ifm_active |= IFM_40G_LR4;
- break;
- case I40E_PHY_TYPE_XLAUI:
- ifmr->ifm_active |= IFM_OTHER;
- break;
-#ifndef IFM_ETH_XTYPE
- case I40E_PHY_TYPE_1000BASE_KX:
- ifmr->ifm_active |= IFM_1000_CX;
- break;
- case I40E_PHY_TYPE_SGMII:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
- ifmr->ifm_active |= IFM_10G_TWINAX;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ifmr->ifm_active |= IFM_10G_CX4;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ifmr->ifm_active |= IFM_10G_SR;
- break;
- case I40E_PHY_TYPE_SFI:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_40GBASE_AOC:
- ifmr->ifm_active |= IFM_40G_SR4;
- break;
-#else
- case I40E_PHY_TYPE_1000BASE_KX:
- ifmr->ifm_active |= IFM_1000_KX;
- break;
- case I40E_PHY_TYPE_SGMII:
- ifmr->ifm_active |= IFM_1000_SGMII;
- break;
- /* ERJ: What's the difference between these? */
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
- ifmr->ifm_active |= IFM_10G_CR1;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ifmr->ifm_active |= IFM_10G_KX4;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ifmr->ifm_active |= IFM_10G_KR;
- break;
- case I40E_PHY_TYPE_SFI:
- ifmr->ifm_active |= IFM_10G_SFI;
- break;
- /* Our single 20G media type */
- case I40E_PHY_TYPE_20GBASE_KR2:
- ifmr->ifm_active |= IFM_20G_KR2;
- break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- ifmr->ifm_active |= IFM_40G_KR4;
- break;
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_40GBASE_AOC:
- ifmr->ifm_active |= IFM_40G_XLPPI;
- break;
-#endif
- /* Unknown to driver */
- default:
- ifmr->ifm_active |= IFM_UNKNOWN;
- break;
- }
- /* Report flow control status as well */
- if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
- ifmr->ifm_active |= IFM_ETH_TXPAUSE;
- if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
- ifmr->ifm_active |= IFM_ETH_RXPAUSE;
-
- IXL_PF_UNLOCK(pf);
-
- return;
-}
-
-/*
- * NOTE: Fortville does not support forcing media speeds. Instead,
- * use the set_advertise sysctl to set the speeds Fortville
- * will advertise or be allowed to operate at.
- */
-static int
-ixl_media_change(struct ifnet * ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ifmedia *ifm = &vsi->media;
-
- INIT_DEBUGOUT("ixl_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- if_printf(ifp, "Media change is not supported.\n");
-
- return (ENODEV);
-}
-
-
-#ifdef IXL_FDIR
-/*
-** ATR: Application Targetted Receive - creates a filter
-** based on TX flow info that will keep the receive
-** portion of the flow on the same queue. Based on the
-** implementation this is only available for TCP connections
-*/
-void
-ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct tx_ring *txr = &que->txr;
- struct i40e_filter_program_desc *FDIR;
- u32 ptype, dtype;
- int idx;
-
- /* check if ATR is enabled and sample rate */
- if ((!ixl_enable_fdir) || (!txr->atr_rate))
- return;
- /*
- ** We sample all TCP SYN/FIN packets,
- ** or at the selected sample rate
- */
- txr->atr_count++;
- if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
- (txr->atr_count < txr->atr_rate))
- return;
- txr->atr_count = 0;
-
- /* Get a descriptor to use */
- idx = txr->next_avail;
- FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
- if (++idx == que->num_desc)
- idx = 0;
- txr->avail--;
- txr->next_avail = idx;
-
- ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
-
- ptype |= (etype == ETHERTYPE_IP) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
- I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
- I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
-
- ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
-
- dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- /*
- ** We use the TCP TH_FIN as a trigger to remove
- ** the filter, otherwise its an update.
- */
- dtype |= (th->th_flags & TH_FIN) ?
- (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
- (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT);
-
- dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
- I40E_TXD_FLTR_QW1_DEST_SHIFT;
-
- dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
- I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
-
- FDIR->qindex_flex_ptype_vsi = htole32(ptype);
- FDIR->dtype_cmd_cntindex = htole32(dtype);
- return;
-}
-#endif
-
-
-static void
-ixl_set_promisc(struct ixl_vsi *vsi)
-{
- struct ifnet *ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
- int err, mcnt = 0;
- bool uni = FALSE, multi = FALSE;
-
- if (ifp->if_flags & IFF_ALLMULTI)
- multi = TRUE;
- else { /* Need to count the multicast addresses */
- struct ifmultiaddr *ifma;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_MULTICAST_ADDR)
- break;
- mcnt++;
- }
- if_maddr_runlock(ifp);
- }
-
- if (mcnt >= MAX_MULTICAST_ADDR)
- multi = TRUE;
- if (ifp->if_flags & IFF_PROMISC)
- uni = TRUE;
-
- err = i40e_aq_set_vsi_unicast_promiscuous(hw,
- vsi->seid, uni, NULL);
- err = i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, multi, NULL);
- return;
-}
-
-/*********************************************************************
- * Filter Routines
- *
- * Routines for multicast and vlan filter management.
- *
- *********************************************************************/
-static void
-ixl_add_multi(struct ixl_vsi *vsi)
-{
- struct ifmultiaddr *ifma;
- struct ifnet *ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
- int mcnt = 0, flags;
-
- IOCTL_DEBUGOUT("ixl_add_multi: begin");
-
- if_maddr_rlock(ifp);
- /*
- ** First just get a count, to decide if we
- ** we simply use multicast promiscuous.
- */
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- mcnt++;
- }
- if_maddr_runlock(ifp);
-
- if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- /* delete existing MC filters */
- ixl_del_hw_filters(vsi, mcnt);
- i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, TRUE, NULL);
- return;
- }
-
- mcnt = 0;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- ixl_add_mc_filter(vsi,
- (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
- mcnt++;
- }
- if_maddr_runlock(ifp);
- if (mcnt > 0) {
- flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
- ixl_add_hw_filters(vsi, flags, mcnt);
- }
-
- IOCTL_DEBUGOUT("ixl_add_multi: end");
- return;
-}
-
-static void
-ixl_del_multi(struct ixl_vsi *vsi)
-{
- struct ifnet *ifp = vsi->ifp;
- struct ifmultiaddr *ifma;
- struct ixl_mac_filter *f;
- int mcnt = 0;
- bool match = FALSE;
-
- IOCTL_DEBUGOUT("ixl_del_multi: begin");
-
- /* Search for removed multicast addresses */
- if_maddr_rlock(ifp);
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
- match = FALSE;
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
- if (cmp_etheraddr(f->macaddr, mc_addr)) {
- match = TRUE;
- break;
- }
- }
- if (match == FALSE) {
- f->flags |= IXL_FILTER_DEL;
- mcnt++;
- }
- }
- }
- if_maddr_runlock(ifp);
-
- if (mcnt > 0)
- ixl_del_hw_filters(vsi, mcnt);
-}
-
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- * Only runs when the driver is configured UP and RUNNING.
- *
- **********************************************************************/
-
-static void
-ixl_local_timer(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
- int hung = 0;
- u32 mask;
-
- mtx_assert(&pf->pf_mtx, MA_OWNED);
-
- /* Fire off the adminq task */
- taskqueue_enqueue(pf->tq, &pf->adminq);
-
- /* Update stats */
- ixl_update_stats_counters(pf);
-
- /*
- ** Check status of the queues
- */
- mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- /* Any queues with outstanding work get a sw irq */
- if (que->busy)
- wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- /*
- ** Each time txeof runs without cleaning, but there
- ** are uncleaned descriptors it increments busy. If
- ** we get to 5 we declare it hung.
- */
- if (que->busy == IXL_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- vsi->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
- vsi->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXL_MAX_TX_BUSY) {
-#ifdef IXL_DEBUG
- device_printf(dev,"Warning queue %d "
- "appears to be hung!\n", i);
-#endif
- que->busy = IXL_QUEUE_HUNG;
- ++hung;
- }
- }
- /* Only reinit if all queues show hung */
- if (hung == vsi->num_queues)
- goto hung;
-
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
- return;
-
-hung:
- device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
- ixl_init_locked(pf);
-}
-
-/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
-static void
-ixl_update_link_status(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- struct i40e_hw *hw = &pf->hw;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
-
- if (pf->link_up) {
- if (vsi->link_active == FALSE) {
- pf->fc = hw->fc.current_mode;
- if (bootverbose) {
- device_printf(dev,"Link is up %d Gbps %s,"
- " Flow Control: %s\n",
- ((pf->link_speed ==
- I40E_LINK_SPEED_40GB)? 40:10),
- "Full Duplex", ixl_fc_string[pf->fc]);
- }
- vsi->link_active = TRUE;
- /*
- ** Warn user if link speed on NPAR enabled
- ** partition is not at least 10GB
- */
- if (hw->func_caps.npar_enable &&
- (hw->phy.link_info.link_speed ==
- I40E_LINK_SPEED_1GB ||
- hw->phy.link_info.link_speed ==
- I40E_LINK_SPEED_100MB))
- device_printf(dev, "The partition detected"
- "link speed that is less than 10Gbps\n");
- if_link_state_change(ifp, LINK_STATE_UP);
- }
- } else { /* Link down */
- if (vsi->link_active == TRUE) {
- if (bootverbose)
- device_printf(dev, "Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- vsi->link_active = FALSE;
- }
- }
-
- return;
-}
-
-static void
-ixl_stop(struct ixl_pf *pf)
-{
- IXL_PF_LOCK(pf);
- ixl_stop_locked(pf);
- IXL_PF_UNLOCK(pf);
-
- ixl_teardown_queue_msix(&pf->vsi);
- ixl_free_queue_tqs(&pf->vsi);
-}
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-ixl_stop_locked(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
-
- INIT_DEBUGOUT("ixl_stop: begin\n");
-
- IXL_PF_LOCK_ASSERT(pf);
-
- /* Stop the local timer */
- callout_stop(&pf->timer);
-
- ixl_disable_rings_intr(vsi);
- ixl_disable_rings(vsi);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
-}
-
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI
- *
- **********************************************************************/
-static int
-ixl_assign_vsi_legacy(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- int error, rid = 0;
-
- if (pf->msix == 1)
- rid = 1;
- pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &rid, RF_SHAREABLE | RF_ACTIVE);
- if (pf->res == NULL) {
- device_printf(dev, "Unable to allocate"
- " bus resource: vsi legacy/msi interrupt\n");
- return (ENXIO);
- }
-
- /* Set the handler function */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_intr, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "Failed to register legacy/msi handler\n");
- return (error);
- }
- bus_describe_intr(dev, pf->res, pf->tag, "irq0");
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(dev));
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-
- pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
- device_get_nameunit(dev));
-
- return (0);
-}
-
-static int
-ixl_setup_adminq_tq(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int error = 0;
-
- /* Tasklet for Admin Queue interrupts */
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-#ifdef PCI_IOV
- /* VFLR Tasklet */
- TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
-#endif
- /* Create and start Admin Queue taskqueue */
- pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- if (!pf->tq) {
- device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
- return (ENOMEM);
- }
- error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
- device_get_nameunit(dev));
- if (error) {
- device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
- error);
- taskqueue_free(pf->tq);
- return (error);
- }
- return (0);
-}
-
-static int
-ixl_setup_queue_tqs(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
-#ifdef RSS
- cpuset_t cpu_mask;
- int cpu_id;
-#endif
-
- /* Create queue tasks and start queue taskqueues */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(dev), cpu_id);
-#else
- taskqueue_start_threads(&que->tq, 1, PI_NET,
- "%s (que %d)", device_get_nameunit(dev), que->me);
-#endif
- }
-
- return (0);
-}
-
-static void
-ixl_free_adminq_tq(struct ixl_pf *pf)
-{
- if (pf->tq) {
- taskqueue_free(pf->tq);
- pf->tq = NULL;
- }
-}
-
-static void
-ixl_free_queue_tqs(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_free(que->tq);
- que->tq = NULL;
- }
- }
-}
-
-static int
-ixl_setup_adminq_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, error = 0;
-
- /* Admin IRQ rid is 1, vector is 0 */
- rid = 1;
- /* Get interrupt resource from bus */
- pf->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!pf->res) {
- device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
- " interrupt failed [rid=%d]\n", rid);
- return (ENXIO);
- }
- /* Then associate interrupt with handler */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_adminq, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "bus_setup_intr() for Admin Queue"
- " interrupt handler failed, error %d\n", error);
- return (ENXIO);
- }
- error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
- if (error) {
- /* Probably non-fatal? */
- device_printf(dev, "bus_describe_intr() for Admin Queue"
- " interrupt name failed, error %d\n", error);
- }
- pf->admvec = 0;
-
- return (0);
-}
-
-/*
- * Allocate interrupt resources from bus and associate an interrupt handler
- * to those for the VSI's queues.
- */
-static int
-ixl_setup_queue_msix(struct ixl_vsi *vsi)
-{
- device_t dev = vsi->dev;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 1;
-
- /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
- for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
- int cpu_id = i;
- rid = vector + 1;
- txr = &que->txr;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (!que->res) {
- device_printf(dev, "bus_alloc_resource_any() for"
- " Queue %d interrupt failed [rid=%d]\n",
- que->me, rid);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_que, que, &que->tag);
- if (error) {
- device_printf(dev, "bus_setup_intr() for Queue %d"
- " interrupt handler failed, error %d\n",
- que->me, error);
- // TODO: Check for error from this?
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- return (error);
- }
- error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
- if (error) {
- device_printf(dev, "bus_describe_intr() for Queue %d"
- " interrupt name failed, error %d\n",
- que->me, error);
- }
- /* Bind the vector to a CPU */
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
-#endif
- error = bus_bind_intr(dev, que->res, cpu_id);
- if (error) {
- device_printf(dev, "bus_bind_intr() for Queue %d"
- " to CPU %d failed, error %d\n",
- que->me, cpu_id, error);
- }
- que->msix = vector;
- }
-
- return (0);
-}
-
-
-/*
- * Allocate MSI/X vectors
- */
-static int
-ixl_init_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, want, vectors, queues, available;
-
- /* Override by tuneable */
- if (ixl_enable_msix == 0)
- goto no_msix;
-
- /*
- ** When used in a virtualized environment
- ** PCI BUSMASTER capability may not be set
- ** so explicity set it here and rewrite
- ** the ENABLE in the MSIX control register
- ** at this point to cause the host to
- ** successfully initialize us.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- /* First try MSI/X */
- rid = PCIR_BAR(IXL_BAR);
- pf->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!pf->msix_mem) {
- /* May not be enabled */
- device_printf(pf->dev,
- "Unable to map MSIX table\n");
- goto no_msix;
- }
-
- available = pci_msix_count(dev);
- if (available == 0) { /* system has msix disabled */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, pf->msix_mem);
- pf->msix_mem = NULL;
- goto no_msix;
- }
-
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
-
- /* Override with tunable value if tunable is less than autoconfig count */
- if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
- queues = ixl_max_queues;
- else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
- device_printf(dev, "ixl_max_queues > # of cpus, using "
- "autoconfig amount...\n");
- /* Or limit maximum auto-configured queues to 8 */
- else if ((ixl_max_queues == 0) && (queues > 8))
- queues = 8;
-
-#ifdef RSS
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (queues > rss_getnumbuckets())
- queues = rss_getnumbuckets();
-#endif
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for the admin queue.
- */
- want = queues + 1;
- if (want <= available) /* Have enough */
- vectors = want;
- else {
- device_printf(pf->dev,
- "MSIX Configuration Problem, "
- "%d vectors available but %d wanted!\n",
- available, want);
- return (0); /* Will go to Legacy setup */
- }
-
- if (pci_alloc_msix(dev, &vectors) == 0) {
- device_printf(pf->dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
- pf->msix = vectors;
- pf->vsi.num_queues = queues;
-#ifdef RSS
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- if (queues != rss_getnumbuckets()) {
- device_printf(dev,
- "%s: queues (%d) != RSS buckets (%d)"
- "; performance will be impacted.\n",
- __func__, queues, rss_getnumbuckets());
- }
-#endif
- return (vectors);
- }
-no_msix:
- vectors = pci_msi_count(dev);
- pf->vsi.num_queues = 1;
- ixl_max_queues = 1;
- ixl_enable_msix = 0;
- if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
- device_printf(pf->dev, "Using an MSI interrupt\n");
- else {
- vectors = 0;
- device_printf(pf->dev, "Using a Legacy interrupt\n");
- }
- return (vectors);
-}
-
-/*
- * Configure admin queue/misc interrupt cause registers in hardware.
- */
-static void
-ixl_configure_intr0_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- /* First set up the adminq - vector 0 */
- wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
- rd32(hw, I40E_PFINT_ICR0); /* read to clear */
-
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
- I40E_PFINT_ICR0_ENA_GRST_MASK |
- I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
- I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
- I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
- I40E_PFINT_ICR0_ENA_VFLR_MASK |
- I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /*
- * 0x7FF is the end of the queue list.
- * This means we won't use MSI-X vector 0 for a queue interrupt
- * in MSIX mode.
- */
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
- /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
- wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
-
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
-
- wr32(hw, I40E_PFINT_STAT_CTL0, 0);
-}
-
-/*
- * Configure queue interrupt cause registers in hardware.
- */
-static void
-ixl_configure_queue_intr_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- u32 reg;
- u16 vector = 1;
-
- for (int i = 0; i < vsi->num_queues; i++, vector++) {
- wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
- /* First queue type is RX / 0 */
- wr32(hw, I40E_PFINT_LNKLSTN(i), i);
-
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(i), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(i), reg);
- }
-}
-
-/*
- * Configure for MSI single vector operation
- */
-static void
-ixl_configure_legacy(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- wr32(hw, I40E_PFINT_ITR0(0), 0);
- wr32(hw, I40E_PFINT_ITR0(1), 0);
-
- /* Setup "other" causes */
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
- | I40E_PFINT_ICR0_ENA_GRST_MASK
- | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
- | I40E_PFINT_ICR0_ENA_GPIO_MASK
- | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
- | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
- | I40E_PFINT_ICR0_ENA_VFLR_MASK
- | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
- ;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
- /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
- wr32(hw, I40E_PFINT_STAT_CTL0, 0);
-
- /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
- wr32(hw, I40E_PFINT_LNKLST0, 0);
-
- /* Associate the queue pair to the vector and enable the q int */
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
- | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
- | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(0), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
- | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
- | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(0), reg);
-}
-
-
-/*
- * Get initial ITR values from tunable values.
- */
-static void
-ixl_configure_itr(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
-
- vsi->rx_itr_setting = ixl_rx_itr;
- vsi->tx_itr_setting = ixl_tx_itr;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
- vsi->rx_itr_setting);
- rxr->itr = vsi->rx_itr_setting;
- rxr->latency = IXL_AVE_LATENCY;
-
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
- vsi->tx_itr_setting);
- txr->itr = vsi->tx_itr_setting;
- txr->latency = IXL_AVE_LATENCY;
- }
-}
-
-
-static int
-ixl_allocate_pci_resources(struct ixl_pf *pf)
-{
- int rid;
- device_t dev = pf->dev;
-
- rid = PCIR_BAR(0);
- pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(pf->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
- return (ENXIO);
- }
-
- pf->osdep.mem_bus_space_tag =
- rman_get_bustag(pf->pci_mem);
- pf->osdep.mem_bus_space_handle =
- rman_get_bushandle(pf->pci_mem);
- pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
- pf->osdep.flush_reg = I40E_GLGEN_STAT;
- pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
-
- pf->hw.back = &pf->osdep;
-
- /*
- ** Now setup MSI or MSI/X, should
- ** return us the number of supported
- ** vectors. (Will be 1 for MSI)
- */
- pf->msix = ixl_init_msix(pf);
- return (0);
-}
-
-/*
- * Teardown and release the admin queue/misc vector
- * interrupt.
- */
-static int
-ixl_teardown_adminq_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid;
-
- if (pf->admvec) /* we are doing MSIX */
- rid = pf->admvec + 1;
- else
- (pf->msix != 0) ? (rid = 1):(rid = 0);
-
- // TODO: Check for errors from bus_teardown_intr
- // TODO: Check for errors from bus_release_resource
- if (pf->tag != NULL) {
- bus_teardown_intr(dev, pf->res, pf->tag);
- pf->tag = NULL;
- }
- if (pf->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
- pf->res = NULL;
- }
-
- return (0);
-}
-
-static int
-ixl_teardown_queue_msix(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
- int rid, error = 0;
-
- /* We may get here before stations are setup */
- if ((!ixl_enable_msix) || (que == NULL))
- return (0);
-
- /* Release all MSIX queue resources */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- error = bus_teardown_intr(dev, que->res, que->tag);
- if (error) {
- device_printf(dev, "bus_teardown_intr() for"
- " Queue %d interrupt failed\n",
- que->me);
- // return (ENXIO);
- }
- que->tag = NULL;
- }
- if (que->res != NULL) {
- error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- if (error) {
- device_printf(dev, "bus_release_resource() for"
- " Queue %d interrupt failed [rid=%d]\n",
- que->me, rid);
- // return (ENXIO);
- }
- que->res = NULL;
- }
- }
-
- return (0);
-}
-
-static void
-ixl_free_pci_resources(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int memrid;
-
- ixl_teardown_queue_msix(&pf->vsi);
- ixl_teardown_adminq_msix(pf);
-
- if (pf->msix)
- pci_release_msi(dev);
-
- memrid = PCIR_BAR(IXL_BAR);
-
- if (pf->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, pf->msix_mem);
-
- if (pf->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(0), pf->pci_mem);
-
- return;
-}
-
-static void
-ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
-{
- /* Display supported media types */
- if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
- phy_type & (1 << I40E_PHY_TYPE_XFI) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
- phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
-
-#ifndef IFM_ETH_XTYPE
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
- phy_type & (1 << I40E_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
-#else
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
- || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
-#endif
-}
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static int
-ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
-{
- struct ifnet *ifp;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
- struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code aq_error = 0;
-
- INIT_DEBUGOUT("ixl_setup_interface: begin");
-
- ifp = vsi->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = IF_Gbps(40);
- ifp->if_init = ixl_init;
- ifp->if_softc = vsi;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixl_ioctl;
-
-#if __FreeBSD_version >= 1100036
- if_setgetcounterfn(ifp, ixl_get_counter);
-#endif
-
- ifp->if_transmit = ixl_mq_start;
-
- ifp->if_qflush = ixl_qflush;
-
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
-
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
- ifp->if_capabilities |= IFCAP_TSO;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_LRO;
-
- /* VLAN capabilties */
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU
- | IFCAP_VLAN_HWCSUM;
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
- ** Don't turn this on by default, if vlans are
- ** created on another pseudo device (eg. lagg)
- ** then vlan events are not passed thru, breaking
- ** operation, but with HW FILTER off it works. If
- ** using vlans directly on the ixl driver you can
- ** enable this and get full hardware tag filtering.
- */
- ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
- ixl_media_status);
-
- aq_error = i40e_aq_get_phy_capabilities(hw,
- FALSE, TRUE, &abilities, NULL);
- /* May need delay to detect fiber correctly */
- if (aq_error == I40E_ERR_UNKNOWN_PHY) {
- i40e_msec_delay(200);
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
- TRUE, &abilities, NULL);
- }
- if (aq_error) {
- if (aq_error == I40E_ERR_UNKNOWN_PHY)
- device_printf(dev, "Unknown PHY type detected!\n");
- else
- device_printf(dev,
- "Error getting supported media types, err %d,"
- " AQ error %d\n", aq_error, hw->aq.asq_last_status);
- return (0);
- }
-
- ixl_add_ifmedia(vsi, abilities.phy_type);
-
- /* Use autoselect media by default */
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
-
- ether_ifattach(ifp, hw->mac.addr);
-
- return (0);
-}
-
-/*
-** Run when the Admin Queue gets a link state change interrupt.
-*/
-static void
-ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-
- /* Request link status from adapter */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
- /* Print out message if an unqualified module is found */
- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
- (!(status->link_info & I40E_AQ_LINK_UP)))
- device_printf(dev, "Link failed because "
- "an unqualified module was detected!\n");
-
- /* Update OS link info */
- ixl_update_link_status(pf);
-}
-
-/*********************************************************************
- *
- * Get Firmware Switch configuration
- * - this will need to be more robust when more complex
- * switch configurations are enabled.
- *
- **********************************************************************/
-static int
-ixl_switch_config(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = vsi->dev;
- struct i40e_aqc_get_switch_config_resp *sw_config;
- u8 aq_buf[I40E_AQ_LARGE_BUF];
- int ret;
- u16 next = 0;
-
- memset(&aq_buf, 0, sizeof(aq_buf));
- sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
- ret = i40e_aq_get_switch_config(hw, sw_config,
- sizeof(aq_buf), &next, NULL);
- if (ret) {
- device_printf(dev, "aq_get_switch_config() failed, error %d,"
- " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
- return (ret);
- }
-#ifdef IXL_DEBUG
- device_printf(dev,
- "Switch config: header reported: %d in structure, %d total\n",
- sw_config->header.num_reported, sw_config->header.num_total);
- for (int i = 0; i < sw_config->header.num_reported; i++) {
- device_printf(dev,
- "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
- sw_config->element[i].element_type,
- sw_config->element[i].seid,
- sw_config->element[i].uplink_seid,
- sw_config->element[i].downlink_seid);
- }
-#endif
- /* Simplified due to a single VSI at the moment */
- vsi->uplink_seid = sw_config->element[0].uplink_seid;
- vsi->downlink_seid = sw_config->element[0].downlink_seid;
- vsi->seid = sw_config->element[0].seid;
- return (ret);
-}
-
-/*********************************************************************
- *
- * Initialize the VSI: this handles contexts, which means things
- * like the number of descriptors, buffer size,
- * plus we init the rings thru this function.
- *
- **********************************************************************/
-static int
-ixl_initialize_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
- struct i40e_hw *hw = vsi->hw;
- struct i40e_vsi_context ctxt;
- int err = 0;
-
- memset(&ctxt, 0, sizeof(ctxt));
- ctxt.seid = vsi->seid;
- if (pf->veb_seid != 0)
- ctxt.uplink_seid = pf->veb_seid;
- ctxt.pf_num = hw->pf_id;
- err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
- if (err) {
- device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
- " aq_error %d\n", err, hw->aq.asq_last_status);
- return (err);
- }
-#ifdef IXL_DEBUG
- device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
- "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
- "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
- ctxt.uplink_seid, ctxt.vsi_number,
- ctxt.vsis_allocated, ctxt.vsis_unallocated,
- ctxt.flags, ctxt.pf_num, ctxt.vf_num,
- ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
-#endif
- /*
- ** Set the queue and traffic class bits
- ** - when multiple traffic classes are supported
- ** this will need to be more robust.
- */
- ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
- ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
- /* In contig mode, que_mapping[0] is first queue index used by this VSI */
- ctxt.info.queue_mapping[0] = 0;
- /*
- * This VSI will only use traffic class 0; start traffic class 0's
- * queue allocation at queue 0, and assign it 64 (2^6) queues (though
- * the driver may not use all of them).
- */
- ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
- & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
- ((6 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
-
- /* Set VLAN receive stripping mode */
- ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
- ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
- if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
- ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
- else
- ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
-
- /* Keep copy of VSI info in VSI for statistic counters */
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
-
- /* Reset VSI statistics */
- ixl_vsi_reset_stats(vsi);
- vsi->hw_filters_add = 0;
- vsi->hw_filters_del = 0;
-
- ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
-
- err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
- if (err) {
- device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n",
- err, hw->aq.asq_last_status);
- return (err);
- }
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
- struct i40e_hmc_obj_txq tctx;
- struct i40e_hmc_obj_rxq rctx;
- u32 txctl;
- u16 size;
-
- /* Setup the HMC TX Context */
- size = que->num_desc * sizeof(struct i40e_tx_desc);
- memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
- tctx.new_context = 1;
- tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = que->num_desc;
- tctx.fc_ena = 0;
- tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
- /* Enable HEAD writeback */
- tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
- tctx.rdylist_act = 0;
- err = i40e_clear_lan_tx_queue_context(hw, i);
- if (err) {
- device_printf(dev, "Unable to clear TX context\n");
- break;
- }
- err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
- if (err) {
- device_printf(dev, "Unable to set TX context\n");
- break;
- }
- /* Associate the ring with this PF */
- txctl = I40E_QTX_CTL_PF_QUEUE;
- txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
- I40E_QTX_CTL_PF_INDX_MASK);
- wr32(hw, I40E_QTX_CTL(i), txctl);
- ixl_flush(hw);
-
- /* Do ring (re)init */
- ixl_init_tx_ring(que);
-
- /* Next setup the HMC RX Context */
- if (vsi->max_frame_size <= MCLBYTES)
- rxr->mbuf_sz = MCLBYTES;
- else
- rxr->mbuf_sz = MJUMPAGESIZE;
-
- u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
-
- /* Set up an RX context for the HMC */
- memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
- rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
- /* ignore header split for now */
- rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
- rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
- vsi->max_frame_size : max_rxmax;
- rctx.dtype = 0;
- rctx.dsize = 1; /* do 32byte descriptors */
- rctx.hsplit_0 = 0; /* no HDR split initially */
- rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = que->num_desc;
- rctx.tphrdesc_ena = 1;
- rctx.tphwdesc_ena = 1;
- rctx.tphdata_ena = 0;
- rctx.tphhead_ena = 0;
- rctx.lrxqthresh = 2;
- rctx.crcstrip = 1;
- rctx.l2tsel = 1;
- rctx.showiv = 1;
- rctx.fc_ena = 0;
- rctx.prefena = 1;
-
- err = i40e_clear_lan_rx_queue_context(hw, i);
- if (err) {
- device_printf(dev,
- "Unable to clear RX context %d\n", i);
- break;
- }
- err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
- if (err) {
- device_printf(dev, "Unable to set RX context %d\n", i);
- break;
- }
- err = ixl_init_rx_ring(que);
- if (err) {
- device_printf(dev, "Fail in init_rx_ring %d\n", i);
- break;
- }
-#ifdef DEV_NETMAP
- /* preserve queue */
- if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
- struct netmap_adapter *na = NA(vsi->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
- int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
- } else
-#endif /* DEV_NETMAP */
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
- }
- return (err);
-}
-
-
-/*********************************************************************
- *
- * Free all VSI structs.
- *
- **********************************************************************/
-void
-ixl_free_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct ixl_queue *que = vsi->queues;
-
- /* Free station queues */
- if (!vsi->queues)
- goto free_filters;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
- }
- free(vsi->queues, M_DEVBUF);
-
-free_filters:
- /* Free VSI filter list */
- ixl_free_mac_filters(vsi);
-}
-
-static void
-ixl_free_mac_filters(struct ixl_vsi *vsi)
-{
- struct ixl_mac_filter *f;
-
- while (!SLIST_EMPTY(&vsi->ftl)) {
- f = SLIST_FIRST(&vsi->ftl);
- SLIST_REMOVE_HEAD(&vsi->ftl, next);
- free(f, M_DEVBUF);
- }
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for the VSI (virtual station interface) and their
- * associated queues, rings and the descriptors associated with each,
- * called only once at attach.
- *
- **********************************************************************/
-static int
-ixl_setup_stations(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi;
- struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
- int error = I40E_SUCCESS;
-
- vsi = &pf->vsi;
- vsi->back = (void *)pf;
- vsi->hw = &pf->hw;
- vsi->id = 0;
- vsi->num_vlans = 0;
- vsi->back = pf;
-
- /* Get memory for the station queues */
- if (!(vsi->queues =
- (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
- vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
- }
-
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- que->num_desc = ixl_ringsz;
- que->me = i;
- que->vsi = vsi;
- /* mark the queue as active */
- vsi->active_queues |= (u64)1 << que->me;
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL(que->me);
-
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /* Create the TX descriptor ring */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&pf->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(4096, M_DEVBUF,
- M_NOWAIT, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&pf->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring*/
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
- error = ENOMEM;
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- }
-
-early:
- return (error);
-}
-
-/*
-** Provide a update to the queue RX
-** interrupt moderation value.
-*/
-static void
-ixl_set_queue_rx_itr(struct ixl_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct rx_ring *rxr = &que->rxr;
- u16 rx_itr;
- u16 rx_latency = 0;
- int rx_bytes;
-
- /* Idle, do nothing */
- if (rxr->bytes == 0)
- return;
-
- if (ixl_dynamic_rx_itr) {
- rx_bytes = rxr->bytes/rxr->itr;
- rx_itr = rxr->itr;
-
- /* Adjust latency range */
- switch (rxr->latency) {
- case IXL_LOW_LATENCY:
- if (rx_bytes > 10) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (rx_bytes > 20) {
- rx_latency = IXL_BULK_LATENCY;
- rx_itr = IXL_ITR_8K;
- } else if (rx_bytes <= 10) {
- rx_latency = IXL_LOW_LATENCY;
- rx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (rx_bytes <= 20) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- rxr->latency = rx_latency;
-
- if (rx_itr != rxr->itr) {
- /* do an exponential smoothing */
- rx_itr = (10 * rx_itr * rxr->itr) /
- ((9 * rx_itr) + rxr->itr);
- rxr->itr = rx_itr & IXL_MAX_ITR;
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- que->me), rxr->itr);
- }
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->rx_itr_setting = ixl_rx_itr;
- /* Update the hardware if needed */
- if (rxr->itr != vsi->rx_itr_setting) {
- rxr->itr = vsi->rx_itr_setting;
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- que->me), rxr->itr);
- }
- }
- rxr->bytes = 0;
- rxr->packets = 0;
- return;
-}
-
-
-/*
-** Provide a update to the queue TX
-** interrupt moderation value.
-*/
-static void
-ixl_set_queue_tx_itr(struct ixl_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- u16 tx_itr;
- u16 tx_latency = 0;
- int tx_bytes;
-
-
- /* Idle, do nothing */
- if (txr->bytes == 0)
- return;
-
- if (ixl_dynamic_tx_itr) {
- tx_bytes = txr->bytes/txr->itr;
- tx_itr = txr->itr;
-
- switch (txr->latency) {
- case IXL_LOW_LATENCY:
- if (tx_bytes > 10) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (tx_bytes > 20) {
- tx_latency = IXL_BULK_LATENCY;
- tx_itr = IXL_ITR_8K;
- } else if (tx_bytes <= 10) {
- tx_latency = IXL_LOW_LATENCY;
- tx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (tx_bytes <= 20) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- txr->latency = tx_latency;
-
- if (tx_itr != txr->itr) {
- /* do an exponential smoothing */
- tx_itr = (10 * tx_itr * txr->itr) /
- ((9 * tx_itr) + txr->itr);
- txr->itr = tx_itr & IXL_MAX_ITR;
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
- que->me), txr->itr);
- }
-
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->tx_itr_setting = ixl_tx_itr;
- /* Update the hardware if needed */
- if (txr->itr != vsi->tx_itr_setting) {
- txr->itr = vsi->tx_itr_setting;
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
- que->me), txr->itr);
- }
- }
- txr->bytes = 0;
- txr->packets = 0;
- return;
-}
-
-#define QUEUE_NAME_LEN 32
-
-static void
-ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
- struct sysctl_ctx_list *ctx, const char *sysctl_name)
-{
- struct sysctl_oid *tree;
- struct sysctl_oid_list *child;
- struct sysctl_oid_list *vsi_list;
-
- tree = device_get_sysctl_tree(pf->dev);
- child = SYSCTL_CHILDREN(tree);
- vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
- CTLFLAG_RD, NULL, "VSI Number");
- vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
-
- ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
-}
-
-#ifdef IXL_DEBUG
-/**
- * ixl_sysctl_qtx_tail_handler
- * Retrieves I40E_QTX_TAIL value from hardware
- * for a sysctl.
- */
-static int
-ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->txr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-
-/**
- * ixl_sysctl_qrx_tail_handler
- * Retrieves I40E_QRX_TAIL value from hardware
- * for a sysctl.
- */
-static int
-ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->rxr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-#endif
-
-static void
-ixl_add_hw_stats(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *queues = vsi->queues;
- struct i40e_hw_port_stats *pf_stats = &pf->stats;
-
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct sysctl_oid_list *vsi_list;
-
- struct sysctl_oid *queue_node;
- struct sysctl_oid_list *queue_list;
-
- struct tx_ring *txr;
- struct rx_ring *rxr;
- char queue_namebuf[QUEUE_NAME_LEN];
-
- /* Driver statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
- CTLFLAG_RD, &pf->watchdog_events,
- "Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
- CTLFLAG_RD, &pf->admin_irq,
- "Admin Queue IRQ Handled");
-
- ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
- vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
-
- /* Queue statistics */
- for (int q = 0; q < vsi->num_queues; q++) {
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
- queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
- OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- txr = &(queues[q].txr);
- rxr = &(queues[q].rxr);
-
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
- "m_defrag() failed");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(queues[q].irqs),
- "irqs on this queue");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
- CTLFLAG_RD, &(queues[q].tso),
- "TSO");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
- CTLFLAG_RD, &(queues[q].tx_dma_setup),
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &(txr->no_desc),
- "Queue No Descriptor Available");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
- CTLFLAG_RD, &(txr->total_packets),
- "Queue Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(txr->tx_bytes),
- "Queue Bytes Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
- CTLFLAG_RD, &(rxr->rx_packets),
- "Queue Packets Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &(rxr->rx_bytes),
- "Queue Bytes Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
- CTLFLAG_RD, &(rxr->desc_errs),
- "Queue Rx Descriptor Errors");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
- CTLFLAG_RD, &(rxr->itr), 0,
- "Queue Rx ITR Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
- CTLFLAG_RD, &(txr->itr), 0,
- "Queue Tx ITR Interval");
- // Not actual latency; just a calculated value to put in a register
- // TODO: Put in better descriptions here
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_latency",
- CTLFLAG_RD, &(rxr->latency), 0,
- "Queue Rx ITRL Average Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_latency",
- CTLFLAG_RD, &(txr->latency), 0,
- "Queue Tx ITRL Average Interval");
-
-#ifdef IXL_DEBUG
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
- CTLFLAG_RD, &(rxr->not_done),
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
- CTLFLAG_RD, &(rxr->next_refresh), 0,
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
- CTLFLAG_RD, &(rxr->next_check), 0,
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qtx_tail_handler, "IU",
- "Queue Transmit Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
-#endif
- }
-
- /* MAC stats */
- ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
-}
-
-static void
-ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
- struct sysctl_oid_list *child,
- struct i40e_eth_stats *eth_stats)
-{
- struct ixl_sysctl_info ctls[] =
- {
- {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
- {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
- "Unicast Packets Received"},
- {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
- "Multicast Packets Received"},
- {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
- "Broadcast Packets Received"},
- {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
- {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
- {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
- {&eth_stats->tx_multicast, "mcast_pkts_txd",
- "Multicast Packets Transmitted"},
- {&eth_stats->tx_broadcast, "bcast_pkts_txd",
- "Broadcast Packets Transmitted"},
- // end
- {0,0,0}
- };
-
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != NULL)
- {
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-}
-
-static void
-ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
- struct sysctl_oid_list *child,
- struct i40e_hw_port_stats *stats)
-{
- struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
- CTLFLAG_RD, NULL, "Mac Statistics");
- struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
-
- struct i40e_eth_stats *eth_stats = &stats->eth;
- ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
-
- struct ixl_sysctl_info ctls[] =
- {
- {&stats->crc_errors, "crc_errors", "CRC Errors"},
- {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
- {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
- {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
- {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
- /* Packet Reception Stats */
- {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
- {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
- {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
- {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
- {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
- {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
- {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
- {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
- {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
- {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
- {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
- {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
- /* Packet Transmission Stats */
- {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
- {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
- {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
- {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
- {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
- {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
- {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
- /* Flow control */
- {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
- {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
- {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
- {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
- /* End */
- {0,0,0}
- };
-
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != NULL)
- {
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-}
-
-
-/*
-** ixl_config_rss - setup RSS
-** - note this is done for the single vsi
-*/
-static void
-ixl_config_rss(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = vsi->hw;
- u32 lut = 0;
- u64 set_hena = 0, hena;
- int i, j, que_id;
-#ifdef RSS
- u32 rss_hash_config;
- u32 rss_seed[IXL_KEYSZ];
-#else
- u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c,
- 0x35897377, 0x328b25e1, 0x4fa98922,
- 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
-#endif
-
-#ifdef RSS
- /* Fetch the configured RSS key */
- rss_getkey((uint8_t *) &rss_seed);
-#endif
-
- /* Fill out hash function seed */
- for (i = 0; i < IXL_KEYSZ; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
-
- /* Enable PCTYPES for RSS: */
-#ifdef RSS
- rss_hash_config = rss_gethashconfig();
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
- if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
-#else
- set_hena =
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
-#endif
- hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= set_hena;
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
- if (j == vsi->num_queues)
- j = 0;
-#ifdef RSS
- /*
- * Fetch the RSS bucket id for the given indirection entry.
- * Cap it at the number of configured buckets (which is
- * num_queues.)
- */
- que_id = rss_get_indirection_to_bucket(i);
- que_id = que_id % vsi->num_queues;
-#else
- que_id = j;
-#endif
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (que_id &
- ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
- ixl_flush(hw);
-}
-
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- ++vsi->num_vlans;
- ixl_add_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- --vsi->num_vlans;
- ixl_del_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** This routine updates vlan filters, called by init
-** it scans the filter table and then updates the hw
-** after a soft reset.
-*/
-static void
-ixl_setup_vlan_filters(struct ixl_vsi *vsi)
-{
- struct ixl_mac_filter *f;
- int cnt = 0, flags;
-
- if (vsi->num_vlans == 0)
- return;
- /*
- ** Scan the filter list for vlan entries,
- ** mark them for addition and then call
- ** for the AQ update.
- */
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags & IXL_FILTER_VLAN) {
- f->flags |=
- (IXL_FILTER_ADD |
- IXL_FILTER_USED);
- cnt++;
- }
- }
- if (cnt == 0) {
- printf("setup vlan: no filters found!\n");
- return;
- }
- flags = IXL_FILTER_VLAN;
- flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- ixl_add_hw_filters(vsi, flags, cnt);
- return;
-}
-
-/*
-** Initialize filter list and add filters that the hardware
-** needs to know about.
-**
-** Requires VSI's filter list & seid to be set before calling.
-*/
-static void
-ixl_init_filters(struct ixl_vsi *vsi)
-{
- /* Add broadcast address */
- ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
-
- /*
- * Prevent Tx flow control frames from being sent out by
- * non-firmware transmitters.
- */
- i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
-}
-
-/*
-** This routine adds mulicast filters
-*/
-static void
-ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
-{
- struct ixl_mac_filter *f;
-
- /* Does one already exist */
- f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
- if (f != NULL)
- return;
-
- f = ixl_get_filter(vsi);
- if (f == NULL) {
- printf("WARNING: no filter available!!\n");
- return;
- }
- bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
- f->vlan = IXL_VLAN_ANY;
- f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
- | IXL_FILTER_MC);
-
- return;
-}
-
-static void
-ixl_reconfigure_filters(struct ixl_vsi *vsi)
-{
-
- ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
-}
-
-/*
-** This routine adds macvlan filters
-*/
-static void
-ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
-{
- struct ixl_mac_filter *f, *tmp;
- struct ixl_pf *pf;
- device_t dev;
-
- DEBUGOUT("ixl_add_filter: begin");
-
- pf = vsi->back;
- dev = pf->dev;
-
- /* Does one already exist */
- f = ixl_find_filter(vsi, macaddr, vlan);
- if (f != NULL)
- return;
- /*
- ** Is this the first vlan being registered, if so we
- ** need to remove the ANY filter that indicates we are
- ** not in a vlan, and replace that with a 0 filter.
- */
- if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
- tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
- if (tmp != NULL) {
- ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
- ixl_add_filter(vsi, macaddr, 0);
- }
- }
-
- f = ixl_get_filter(vsi);
- if (f == NULL) {
- device_printf(dev, "WARNING: no filter available!!\n");
- return;
- }
- bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
- f->vlan = vlan;
- f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- if (f->vlan != IXL_VLAN_ANY)
- f->flags |= IXL_FILTER_VLAN;
- else
- vsi->num_macs++;
-
- ixl_add_hw_filters(vsi, f->flags, 1);
- return;
-}
-
-static void
-ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
-{
- struct ixl_mac_filter *f;
-
- f = ixl_find_filter(vsi, macaddr, vlan);
- if (f == NULL)
- return;
-
- f->flags |= IXL_FILTER_DEL;
- ixl_del_hw_filters(vsi, 1);
- vsi->num_macs--;
-
- /* Check if this is the last vlan removal */
- if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
- /* Switch back to a non-vlan filter */
- ixl_del_filter(vsi, macaddr, 0);
- ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
- }
- return;
-}
-
-/*
-** Find the filter with both matching mac addr and vlan id
-*/
-static struct ixl_mac_filter *
-ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
-{
- struct ixl_mac_filter *f;
- bool match = FALSE;
-
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (!cmp_etheraddr(f->macaddr, macaddr))
- continue;
- if (f->vlan == vlan) {
- match = TRUE;
- break;
- }
- }
-
- if (!match)
- f = NULL;
- return (f);
-}
-
-/*
-** This routine takes additions to the vsi filter
-** table and creates an Admin Queue call to create
-** the filters in the hardware.
-*/
-static void
-ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
-{
- struct i40e_aqc_add_macvlan_element_data *a, *b;
- struct ixl_mac_filter *f;
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- device_t dev;
- int err, j = 0;
-
- pf = vsi->back;
- dev = pf->dev;
- hw = &pf->hw;
- IXL_PF_LOCK_ASSERT(pf);
-
- a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (a == NULL) {
- device_printf(dev, "add_hw_filters failed to get memory\n");
- return;
- }
-
- /*
- ** Scan the filter list, each time we find one
- ** we add it to the admin queue array and turn off
- ** the add bit.
- */
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags == flags) {
- b = &a[j]; // a pox on fvl long names :)
- bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
- if (f->vlan == IXL_VLAN_ANY) {
- b->vlan_tag = 0;
- b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
- } else {
- b->vlan_tag = f->vlan;
- b->flags = 0;
- }
- b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
- f->flags &= ~IXL_FILTER_ADD;
- j++;
- }
- if (j == cnt)
- break;
- }
- if (j > 0) {
- err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
- if (err)
- device_printf(dev, "aq_add_macvlan err %d, "
- "aq_error %d\n", err, hw->aq.asq_last_status);
- else
- vsi->hw_filters_add += j;
- }
- free(a, M_DEVBUF);
- return;
-}
-
-/*
-** This routine takes removals in the vsi filter
-** table and creates an Admin Queue call to delete
-** the filters in the hardware.
-*/
-static void
-ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
-{
- struct i40e_aqc_remove_macvlan_element_data *d, *e;
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- device_t dev;
- struct ixl_mac_filter *f, *f_temp;
- int err, j = 0;
-
- DEBUGOUT("ixl_del_hw_filters: begin\n");
-
- pf = vsi->back;
- hw = &pf->hw;
- dev = pf->dev;
-
- d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (d == NULL) {
- printf("del hw filter failed to get memory\n");
- return;
- }
-
- SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
- if (f->flags & IXL_FILTER_DEL) {
- e = &d[j]; // a pox on fvl long names :)
- bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
- e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
- e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- /* delete entry from vsi list */
- SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
- free(f, M_DEVBUF);
- j++;
- }
- if (j == cnt)
- break;
- }
- if (j > 0) {
- err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
- /* NOTE: returns ENOENT every time but seems to work fine,
- so we'll ignore that specific error. */
- // TODO: Does this still occur on current firmwares?
- if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
- int sc = 0;
- for (int i = 0; i < j; i++)
- sc += (!d[i].error_code);
- vsi->hw_filters_del += sc;
- device_printf(dev,
- "Failed to remove %d/%d filters, aq error %d\n",
- j - sc, j, hw->aq.asq_last_status);
- } else
- vsi->hw_filters_del += j;
- }
- free(d, M_DEVBUF);
-
- DEBUGOUT("ixl_del_hw_filters: end\n");
- return;
-}
-
-static int
-ixl_enable_rings(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int index, error;
- u32 reg;
-
- error = 0;
- for (int i = 0; i < vsi->num_queues; i++) {
- index = vsi->first_queue + i;
- i40e_pre_tx_queue_cfg(hw, index, TRUE);
-
- reg = rd32(hw, I40E_QTX_ENA(index));
- reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
- wr32(hw, I40E_QTX_ENA(index), reg);
- /* Verify the enable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QTX_ENA(index));
- if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
- break;
- i40e_msec_delay(10);
- }
- if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
- device_printf(pf->dev, "TX queue %d disabled!\n",
- index);
- error = ETIMEDOUT;
- }
-
- reg = rd32(hw, I40E_QRX_ENA(index));
- reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
- wr32(hw, I40E_QRX_ENA(index), reg);
- /* Verify the enable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QRX_ENA(index));
- if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
- break;
- i40e_msec_delay(10);
- }
- if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
- device_printf(pf->dev, "RX queue %d disabled!\n",
- index);
- error = ETIMEDOUT;
- }
- }
-
- return (error);
-}
-
-static int
-ixl_disable_rings(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int index, error;
- u32 reg;
-
- error = 0;
- for (int i = 0; i < vsi->num_queues; i++) {
- index = vsi->first_queue + i;
-
- i40e_pre_tx_queue_cfg(hw, index, FALSE);
- i40e_usec_delay(500);
-
- reg = rd32(hw, I40E_QTX_ENA(index));
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QTX_ENA(index), reg);
- /* Verify the disable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QTX_ENA(index));
- if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- i40e_msec_delay(10);
- }
- if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
- device_printf(pf->dev, "TX queue %d still enabled!\n",
- index);
- error = ETIMEDOUT;
- }
-
- reg = rd32(hw, I40E_QRX_ENA(index));
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QRX_ENA(index), reg);
- /* Verify the disable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QRX_ENA(index));
- if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- i40e_msec_delay(10);
- }
- if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
- device_printf(pf->dev, "RX queue %d still enabled!\n",
- index);
- error = ETIMEDOUT;
- }
- }
-
- return (error);
-}
-
-/**
- * ixl_handle_mdd_event
- *
- * Called from interrupt handler to identify possibly malicious vfs
- * (But also detects events from the PF, as well)
- **/
-static void
-ixl_handle_mdd_event(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- bool mdd_detected = false;
- bool pf_mdd_detected = false;
- u32 reg;
-
- /* find what triggered the MDD event */
- reg = rd32(hw, I40E_GL_MDET_TX);
- if (reg & I40E_GL_MDET_TX_VALID_MASK) {
- u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
- I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
- I40E_GL_MDET_TX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT;
- device_printf(dev,
- "Malicious Driver Detection event 0x%02x"
- " on TX queue %d pf number 0x%02x\n",
- event, queue, pf_num);
- wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
- mdd_detected = true;
- }
- reg = rd32(hw, I40E_GL_MDET_RX);
- if (reg & I40E_GL_MDET_RX_VALID_MASK) {
- u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
- I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
- I40E_GL_MDET_RX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT;
- device_printf(dev,
- "Malicious Driver Detection event 0x%02x"
- " on RX queue %d of function 0x%02x\n",
- event, queue, func);
- wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
- mdd_detected = true;
- }
-
- if (mdd_detected) {
- reg = rd32(hw, I40E_PF_MDET_TX);
- if (reg & I40E_PF_MDET_TX_VALID_MASK) {
- wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
- device_printf(dev,
- "MDD TX event is for this function 0x%08x",
- reg);
- pf_mdd_detected = true;
- }
- reg = rd32(hw, I40E_PF_MDET_RX);
- if (reg & I40E_PF_MDET_RX_VALID_MASK) {
- wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
- device_printf(dev,
- "MDD RX event is for this function 0x%08x",
- reg);
- pf_mdd_detected = true;
- }
- }
-
- /* re-enable mdd interrupt cause */
- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
- reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- ixl_flush(hw);
-}
-
-static void
-ixl_enable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- if (ixl_enable_msix) {
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_enable_queue(hw, que->me);
- } else
- ixl_enable_legacy(hw);
-}
-
-static void
-ixl_disable_rings_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_disable_queue(hw, que->me);
-}
-
-static void
-ixl_disable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
-
- if (ixl_enable_msix)
- ixl_disable_adminq(hw);
- else
- ixl_disable_legacy(hw);
-}
-
-static void
-ixl_enable_adminq(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
- ixl_flush(hw);
-}
-
-static void
-ixl_disable_adminq(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
- ixl_flush(hw);
-}
-
-static void
-ixl_enable_queue(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
-}
-
-static void
-ixl_disable_queue(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
-}
-
-static void
-ixl_enable_legacy(struct i40e_hw *hw)
-{
- u32 reg;
- reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-}
-
-static void
-ixl_disable_legacy(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-}
-
-static void
-ixl_update_stats_counters(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_vf *vf;
-
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
-
- /* Update hw stats */
- ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
- pf->stat_offsets_loaded,
- &osd->crc_errors, &nsd->crc_errors);
- ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
- pf->stat_offsets_loaded,
- &osd->illegal_bytes, &nsd->illegal_bytes);
- ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
- ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_discards,
- &nsd->eth.rx_discards);
- ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
- I40E_GLPRT_UPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_unicast,
- &nsd->eth.rx_unicast);
- ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
- I40E_GLPRT_UPTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_unicast,
- &nsd->eth.tx_unicast);
- ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_multicast,
- &nsd->eth.rx_multicast);
- ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
- I40E_GLPRT_MPTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_multicast,
- &nsd->eth.tx_multicast);
- ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
- I40E_GLPRT_BPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_broadcast,
- &nsd->eth.rx_broadcast);
- ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
- I40E_GLPRT_BPTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_broadcast,
- &nsd->eth.tx_broadcast);
-
- ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_dropped_link_down,
- &nsd->tx_dropped_link_down);
- ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_local_faults,
- &nsd->mac_local_faults);
- ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_remote_faults,
- &nsd->mac_remote_faults);
- ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_length_errors,
- &nsd->rx_length_errors);
-
- /* Flow control (LFC) stats */
- ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_rx, &nsd->link_xon_rx);
- ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_tx, &nsd->link_xon_tx);
- ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_rx, &nsd->link_xoff_rx);
- ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_tx, &nsd->link_xoff_tx);
-
- /* Packet size stats rx */
- ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_64, &nsd->rx_size_64);
- ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_127, &nsd->rx_size_127);
- ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_255, &nsd->rx_size_255);
- ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_511, &nsd->rx_size_511);
- ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1023, &nsd->rx_size_1023);
- ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1522, &nsd->rx_size_1522);
- ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_big, &nsd->rx_size_big);
-
- /* Packet size stats tx */
- ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_64, &nsd->tx_size_64);
- ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_127, &nsd->tx_size_127);
- ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_255, &nsd->tx_size_255);
- ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_511, &nsd->tx_size_511);
- ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1023, &nsd->tx_size_1023);
- ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1522, &nsd->tx_size_1522);
- ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_big, &nsd->tx_size_big);
-
- ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_undersize, &nsd->rx_undersize);
- ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_fragments, &nsd->rx_fragments);
- ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
- ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_jabber, &nsd->rx_jabber);
- pf->stat_offsets_loaded = true;
- /* End hw stats */
-
- /* Update vsi stats */
- ixl_update_vsi_stats(vsi);
-
- for (int i = 0; i < pf->num_vfs; i++) {
- vf = &pf->vfs[i];
- if (vf->vf_flags & VF_FLAG_ENABLED)
- ixl_update_eth_stats(&pf->vfs[i].vsi);
- }
-}
-
-static int
-ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = pf->dev;
- bool is_up = false;
- int error = 0;
-
- is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
-
- /* Teardown */
- if (is_up)
- ixl_stop(pf);
- error = i40e_shutdown_lan_hmc(hw);
- if (error)
- device_printf(dev,
- "Shutdown LAN HMC failed with code %d\n", error);
- ixl_disable_adminq(hw);
- ixl_teardown_adminq_msix(pf);
- error = i40e_shutdown_adminq(hw);
- if (error)
- device_printf(dev,
- "Shutdown Admin queue failed with code %d\n", error);
-
- /* Setup */
- error = i40e_init_adminq(hw);
- if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
- device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
- error);
- }
- error = ixl_setup_adminq_msix(pf);
- if (error) {
- device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
- error);
- }
- ixl_configure_intr0_msix(pf);
- ixl_enable_adminq(hw);
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init_lan_hmc failed: %d\n", error);
- }
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "configure_lan_hmc failed: %d\n", error);
- }
- if (is_up)
- ixl_init(pf);
-
- return (0);
-}
-
-static void
-ixl_handle_empr_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int count = 0;
- u32 reg;
-
- /* Typically finishes within 3-4 seconds */
- while (count++ < 100) {
- reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
- if (reg)
- i40e_msec_delay(100);
- else
- break;
- }
-#ifdef IXL_DEBUG
- // Reset-related
- device_printf(dev, "EMPR reset wait count: %d\n", count);
-#endif
-
- device_printf(dev, "Rebuilding driver state...\n");
- ixl_rebuild_hw_structs_after_reset(pf);
- device_printf(dev, "Rebuilding driver state done.\n");
-
- atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
-}
-
-/*
-** Tasklet handler for MSIX Adminq interrupts
-** - do outside interrupt since it might sleep
-*/
-static void
-ixl_do_adminq(void *context, int pending)
-{
- struct ixl_pf *pf = context;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_arq_event_info event;
- i40e_status ret;
- device_t dev = pf->dev;
- u32 loop = 0;
- u16 opcode, result;
-
- if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
- /* Flag cleared at end of this function */
- ixl_handle_empr_reset(pf);
- return;
- }
-
- /* Admin Queue handling */
- event.buf_len = IXL_AQ_BUF_SZ;
- event.msg_buf = malloc(event.buf_len,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!event.msg_buf) {
- device_printf(dev, "%s: Unable to allocate memory for Admin"
- " Queue event!\n", __func__);
- return;
- }
-
- IXL_PF_LOCK(pf);
- /* clean and process any events */
- do {
- ret = i40e_clean_arq_element(hw, &event, &result);
- if (ret)
- break;
- opcode = LE16_TO_CPU(event.desc.opcode);
-#ifdef IXL_DEBUG
- device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__,
- opcode);
-#endif
- switch (opcode) {
- case i40e_aqc_opc_get_link_status:
- ixl_link_event(pf, &event);
- break;
- case i40e_aqc_opc_send_msg_to_pf:
-#ifdef PCI_IOV
- ixl_handle_vf_msg(pf, &event);
-#endif
- break;
- case i40e_aqc_opc_event_lan_overflow:
- default:
- break;
- }
-
- } while (result && (loop++ < IXL_ADM_LIMIT));
-
- free(event.msg_buf, M_DEVBUF);
-
- /*
- * If there are still messages to process, reschedule ourselves.
- * Otherwise, re-enable our interrupt and go to sleep.
- */
- if (result > 0)
- taskqueue_enqueue(pf->tq, &pf->adminq);
- else
- ixl_enable_adminq(hw);
-
- IXL_PF_UNLOCK(pf);
-}
-
-/**
- * Update VSI-specific ethernet statistics counters.
- **/
-void
-ixl_update_eth_stats(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_eth_stats *es;
- struct i40e_eth_stats *oes;
- struct i40e_hw_port_stats *nsd;
- u16 stat_idx = vsi->info.stat_counter_idx;
-
- es = &vsi->eth_stats;
- oes = &vsi->eth_stats_offsets;
- nsd = &pf->stats;
-
- /* Gather up the stats that the hw collects */
- ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_errors, &es->tx_errors);
- ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_discards, &es->rx_discards);
-
- ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
- I40E_GLV_GORCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_bytes, &es->rx_bytes);
- ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
- I40E_GLV_UPRCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_unicast, &es->rx_unicast);
- ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
- I40E_GLV_MPRCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_multicast, &es->rx_multicast);
- ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
- I40E_GLV_BPRCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_broadcast, &es->rx_broadcast);
-
- ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
- I40E_GLV_GOTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_bytes, &es->tx_bytes);
- ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
- I40E_GLV_UPTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_unicast, &es->tx_unicast);
- ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
- I40E_GLV_MPTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_multicast, &es->tx_multicast);
- ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
- I40E_GLV_BPTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_broadcast, &es->tx_broadcast);
- vsi->stat_offsets_loaded = true;
-}
-
-static void
-ixl_update_vsi_stats(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf;
- struct ifnet *ifp;
- struct i40e_eth_stats *es;
- u64 tx_discards;
-
- struct i40e_hw_port_stats *nsd;
-
- pf = vsi->back;
- ifp = vsi->ifp;
- es = &vsi->eth_stats;
- nsd = &pf->stats;
-
- ixl_update_eth_stats(vsi);
-
- tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
- for (int i = 0; i < vsi->num_queues; i++)
- tx_discards += vsi->queues[i].txr.br->br_drops;
-
- /* Update ifnet stats */
- IXL_SET_IPACKETS(vsi, es->rx_unicast +
- es->rx_multicast +
- es->rx_broadcast);
- IXL_SET_OPACKETS(vsi, es->tx_unicast +
- es->tx_multicast +
- es->tx_broadcast);
- IXL_SET_IBYTES(vsi, es->rx_bytes);
- IXL_SET_OBYTES(vsi, es->tx_bytes);
- IXL_SET_IMCASTS(vsi, es->rx_multicast);
- IXL_SET_OMCASTS(vsi, es->tx_multicast);
-
- IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
- nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
- nsd->rx_jabber);
- IXL_SET_OERRORS(vsi, es->tx_errors);
- IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
- IXL_SET_OQDROPS(vsi, tx_discards);
- IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
- IXL_SET_COLLISIONS(vsi, 0);
-}
-
-/**
- * Reset all of the stats for the given pf
- **/
-void ixl_pf_reset_stats(struct ixl_pf *pf)
-{
- bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
- bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
- pf->stat_offsets_loaded = false;
-}
-
-/**
- * Resets all stats of the given vsi
- **/
-void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
-{
- bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
- bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
- vsi->stat_offsets_loaded = false;
-}
-
-/**
- * Read and update a 48 bit stat from the hw
- *
- * Since the device stats are not reset at PFReset, they likely will not
- * be zeroed when the driver starts. We'll save the first values read
- * and use them as offsets to be subtracted from the raw values in order
- * to report stats that count from zero.
- **/
-static void
-ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
- bool offset_loaded, u64 *offset, u64 *stat)
-{
- u64 new_data;
-
-#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
- new_data = rd64(hw, loreg);
-#else
- /*
- * Use two rd32's instead of one rd64; FreeBSD versions before
- * 10 don't support 8 byte bus reads/writes.
- */
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
-#endif
-
- if (!offset_loaded)
- *offset = new_data;
- if (new_data >= *offset)
- *stat = new_data - *offset;
- else
- *stat = (new_data + ((u64)1 << 48)) - *offset;
- *stat &= 0xFFFFFFFFFFFFULL;
-}
-
-/**
- * Read and update a 32 bit stat from the hw
- **/
-static void
-ixl_stat_update32(struct i40e_hw *hw, u32 reg,
- bool offset_loaded, u64 *offset, u64 *stat)
-{
- u32 new_data;
-
- new_data = rd32(hw, reg);
- if (!offset_loaded)
- *offset = new_data;
- if (new_data >= *offset)
- *stat = (u32)(new_data - *offset);
- else
- *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
-}
-
-static void
-ixl_add_device_sysctls(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
-
- /* Set up sysctls */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_current_speed, "A", "Current Port Speed");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
-
-#if 0
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "rx_itr", CTLFLAG_RW,
- &ixl_rx_itr, 0, "RX ITR");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
- &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "tx_itr", CTLFLAG_RW,
- &ixl_tx_itr, 0, "TX ITR");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
- &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
-#endif
-
-#ifdef IXL_DEBUG_SYSCTL
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
- ixl_debug_info, "I", "Debug Information");
-
- /* Shared-code debug message level */
- SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug_mask", CTLFLAG_RW,
- &pf->hw.debug_mask, 0, "Debug Message Level");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
-
-#ifdef PCI_IOV
- SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
- 0, "PF/VF Virtual Channel debug level");
-#endif
-#endif
-}
-
-/*
-** Set flow control using sysctl:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
-static int
-ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- /*
- * TODO: ensure tx CRC by hardware should be enabled
- * if tx flow control is enabled.
- * ^ N/A for 40G ports
- */
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int requested_fc, error = 0;
- enum i40e_status_code aq_error = 0;
- u8 fc_aq_err = 0;
-
- /* Get request */
- requested_fc = pf->fc;
- error = sysctl_handle_int(oidp, &requested_fc, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- if (requested_fc < 0 || requested_fc > 3) {
- device_printf(dev,
- "Invalid fc mode; valid modes are 0 through 3\n");
- return (EINVAL);
- }
-
- /* Set fc ability for port */
- hw->fc.requested_mode = requested_fc;
- aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
- if (aq_error) {
- device_printf(dev,
- "%s: Error setting new fc mode %d; fc_err %#x\n",
- __func__, aq_error, fc_aq_err);
- return (EIO);
- }
- pf->fc = requested_fc;
-
- /* Get new link state */
- i40e_msec_delay(250);
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
- return (0);
-}
-
-static int
-ixl_current_speed(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- int error = 0, index = 0;
-
- char *speeds[] = {
- "Unknown",
- "100M",
- "1G",
- "10G",
- "40G",
- "20G"
- };
-
- ixl_update_link_status(pf);
-
- switch (hw->phy.link_info.link_speed) {
- case I40E_LINK_SPEED_100MB:
- index = 1;
- break;
- case I40E_LINK_SPEED_1GB:
- index = 2;
- break;
- case I40E_LINK_SPEED_10GB:
- index = 3;
- break;
- case I40E_LINK_SPEED_40GB:
- index = 4;
- break;
- case I40E_LINK_SPEED_20GB:
- index = 5;
- break;
- case I40E_LINK_SPEED_UNKNOWN:
- default:
- index = 0;
- break;
- }
-
- error = sysctl_handle_string(oidp, speeds[index],
- strlen(speeds[index]), req);
- return (error);
-}
-
-static int
-ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct i40e_aq_get_phy_abilities_resp abilities;
- struct i40e_aq_set_phy_config config;
- enum i40e_status_code aq_error = 0;
-
- /* Get current capability information */
- aq_error = i40e_aq_get_phy_capabilities(hw,
- FALSE, FALSE, &abilities, NULL);
- if (aq_error) {
- device_printf(dev,
- "%s: Error getting phy capabilities %d,"
- " aq error: %d\n", __func__, aq_error,
- hw->aq.asq_last_status);
- return (EAGAIN);
- }
-
- /* Prepare new config */
- bzero(&config, sizeof(config));
- config.phy_type = abilities.phy_type;
- config.abilities = abilities.abilities
- | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- /* Translate into aq cmd link_speed */
- if (speeds & 0x10)
- config.link_speed |= I40E_LINK_SPEED_40GB;
- if (speeds & 0x8)
- config.link_speed |= I40E_LINK_SPEED_20GB;
- if (speeds & 0x4)
- config.link_speed |= I40E_LINK_SPEED_10GB;
- if (speeds & 0x2)
- config.link_speed |= I40E_LINK_SPEED_1GB;
- if (speeds & 0x1)
- config.link_speed |= I40E_LINK_SPEED_100MB;
-
- /* Do aq command & restart link */
- aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
- if (aq_error) {
- device_printf(dev,
- "%s: Error setting new phy config %d,"
- " aq error: %d\n", __func__, aq_error,
- hw->aq.asq_last_status);
- return (EAGAIN);
- }
-
- /*
- ** This seems a bit heavy handed, but we
- ** need to get a reinit on some devices
- */
- IXL_PF_LOCK(pf);
- ixl_stop_locked(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
-
- return (0);
-}
-
-/*
-** Control link advertise speed:
-** Flags:
-** 0x1 - advertise 100 Mb
-** 0x2 - advertise 1G
-** 0x4 - advertise 10G
-** 0x8 - advertise 20G
-** 0x10 - advertise 40G
-**
-** Set to 0 to disable link
-*/
-static int
-ixl_set_advertise(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int requested_ls = 0;
- int error = 0;
-
- /* Read in new mode */
- requested_ls = pf->advertised_speed;
- error = sysctl_handle_int(oidp, &requested_ls, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- /* Check for sane value */
- if (requested_ls > 0x10) {
- device_printf(dev, "Invalid advertised speed; "
- "valid modes are 0x1 through 0x10\n");
- return (EINVAL);
- }
- /* Then check for validity based on adapter type */
- switch (hw->device_id) {
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- /* BaseT */
- if (requested_ls & ~(0x7)) {
- device_printf(dev,
- "Only 100M/1G/10G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_20G_KR2:
- case I40E_DEV_ID_20G_KR2_A:
- /* 20G */
- if (requested_ls & ~(0xE)) {
- device_printf(dev,
- "Only 1G/10G/20G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_KX_B:
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- /* 40G */
- if (requested_ls & ~(0x10)) {
- device_printf(dev,
- "Only 40G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- default:
- /* 10G (1G) */
- if (requested_ls & ~(0x6)) {
- device_printf(dev,
- "Only 1/10Gbs speeds are supported on this device.\n");
- return (EINVAL);
- }
- break;
- }
-
- /* Exit if no change */
- if (pf->advertised_speed == requested_ls)
- return (0);
-
- error = ixl_set_advertised_speeds(pf, requested_ls);
- if (error)
- return (error);
-
- pf->advertised_speed = requested_ls;
- ixl_update_link_status(pf);
- return (0);
-}
-
-/*
-** Get the width and transaction speed of
-** the bus this adapter is plugged into.
-*/
-static u16
-ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
-{
- u16 link;
- u32 offset;
-
- /* Get the PCI Express Capabilities offset */
- pci_find_cap(dev, PCIY_EXPRESS, &offset);
-
- /* ...and read the Link Status Register */
- link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
-
- switch (link & I40E_PCI_LINK_WIDTH) {
- case I40E_PCI_LINK_WIDTH_1:
- hw->bus.width = i40e_bus_width_pcie_x1;
- break;
- case I40E_PCI_LINK_WIDTH_2:
- hw->bus.width = i40e_bus_width_pcie_x2;
- break;
- case I40E_PCI_LINK_WIDTH_4:
- hw->bus.width = i40e_bus_width_pcie_x4;
- break;
- case I40E_PCI_LINK_WIDTH_8:
- hw->bus.width = i40e_bus_width_pcie_x8;
- break;
- default:
- hw->bus.width = i40e_bus_width_unknown;
- break;
- }
-
- switch (link & I40E_PCI_LINK_SPEED) {
- case I40E_PCI_LINK_SPEED_2500:
- hw->bus.speed = i40e_bus_speed_2500;
- break;
- case I40E_PCI_LINK_SPEED_5000:
- hw->bus.speed = i40e_bus_speed_5000;
- break;
- case I40E_PCI_LINK_SPEED_8000:
- hw->bus.speed = i40e_bus_speed_8000;
- break;
- default:
- hw->bus.speed = i40e_bus_speed_unknown;
- break;
- }
-
- device_printf(dev,"PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
- (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
- (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
- (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
- ("Unknown"));
-
- if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
- (hw->bus.speed < i40e_bus_speed_8000)) {
- device_printf(dev, "PCI-Express bandwidth available"
- " for this device\n may be insufficient for"
- " optimal performance.\n");
- device_printf(dev, "For expected performance a x8 "
- "PCIE Gen3 slot is required.\n");
- }
-
- return (link);
-}
-
-static int
-ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- struct sbuf *sbuf;
-
- sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- ixl_nvm_version_str(hw, sbuf);
- sbuf_finish(sbuf);
- sbuf_delete(sbuf);
-
- return 0;
-}
-
-#ifdef IXL_DEBUG
-static void
-ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
-{
- if ((nvma->command == I40E_NVM_READ) &&
- ((nvma->config & 0xFF) == 0xF) &&
- (((nvma->config & 0xF00) >> 8) == 0xF) &&
- (nvma->offset == 0) &&
- (nvma->data_size == 1)) {
- // device_printf(dev, "- Get Driver Status Command\n");
- }
- else if (nvma->command == I40E_NVM_READ) {
-
- }
- else {
- switch (nvma->command) {
- case 0xB:
- device_printf(dev, "- command: I40E_NVM_READ\n");
- break;
- case 0xC:
- device_printf(dev, "- command: I40E_NVM_WRITE\n");
- break;
- default:
- device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
- break;
- }
-
- device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
- device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
- device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
- device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
- }
-}
-#endif
-
-static int
-ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
-{
- struct i40e_hw *hw = &pf->hw;
- struct i40e_nvm_access *nvma;
- device_t dev = pf->dev;
- enum i40e_status_code status = 0;
- int perrno;
-
- DEBUGFUNC("ixl_handle_nvmupd_cmd");
-
- /* Sanity checks */
- if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
- ifd->ifd_data == NULL) {
- device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
- __func__);
- device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
- __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
- device_printf(dev, "%s: data pointer: %p\n", __func__,
- ifd->ifd_data);
- return (EINVAL);
- }
-
- nvma = (struct i40e_nvm_access *)ifd->ifd_data;
-
-#ifdef IXL_DEBUG
- ixl_print_nvm_cmd(dev, nvma);
-#endif
-
- if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
- int count = 0;
- while (count++ < 100) {
- i40e_msec_delay(100);
- if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
- break;
- }
- }
-
- if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
- IXL_PF_LOCK(pf);
- status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
- IXL_PF_UNLOCK(pf);
- } else {
- perrno = -EBUSY;
- }
-
- if (status)
- device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
- status, perrno);
-
- /*
- * -EPERM is actually ERESTART, which the kernel interprets as it needing
- * to run this ioctl again. So use -EACCES for -EPERM instead.
- */
- if (perrno == -EPERM)
- return (-EACCES);
- else
- return (perrno);
-}
-
-#ifdef IXL_DEBUG_SYSCTL
-static int
-ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_link_status link_status;
- char buf[512];
-
- enum i40e_status_code aq_error = 0;
-
- aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
- if (aq_error) {
- printf("i40e_aq_get_link_info() error %d\n", aq_error);
- return (EPERM);
- }
-
- sprintf(buf, "\n"
- "PHY Type : %#04x\n"
- "Speed : %#04x\n"
- "Link info: %#04x\n"
- "AN info : %#04x\n"
- "Ext info : %#04x\n"
- "Max Frame: %d\n"
- "Pacing : %#04x\n"
- "CRC En? : %d",
- link_status.phy_type, link_status.link_speed,
- link_status.link_info, link_status.an_info,
- link_status.ext_info, link_status.max_frame_size,
- link_status.pacing, link_status.crc_enable);
-
- return (sysctl_handle_string(oidp, buf, strlen(buf), req));
-}
-
-static int
-ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- char buf[512];
- enum i40e_status_code aq_error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
-
- aq_error = i40e_aq_get_phy_capabilities(hw,
- TRUE, FALSE, &abilities, NULL);
- if (aq_error) {
- printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
- return (EPERM);
- }
-
- sprintf(buf, "\n"
- "PHY Type : %#010x\n"
- "Speed : %#04x\n"
- "Abilities: %#04x\n"
- "EEE cap : %#06x\n"
- "EEER reg : %#010x\n"
- "D3 Lpan : %#04x",
- abilities.phy_type, abilities.link_speed,
- abilities.abilities, abilities.eee_capability,
- abilities.eeer_val, abilities.d3_lpan);
-
- return (sysctl_handle_string(oidp, buf, strlen(buf), req));
-}
-
-static int
-ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_mac_filter *f;
- char *buf, *buf_i;
-
- int error = 0;
- int ftl_len = 0;
- int ftl_counter = 0;
- int buf_len = 0;
- int entry_len = 42;
-
- SLIST_FOREACH(f, &vsi->ftl, next) {
- ftl_len++;
- }
-
- if (ftl_len < 1) {
- sysctl_handle_string(oidp, "(none)", 6, req);
- return (0);
- }
-
- buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
- buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
-
- sprintf(buf_i++, "\n");
- SLIST_FOREACH(f, &vsi->ftl, next) {
- sprintf(buf_i,
- MAC_FORMAT ", vlan %4d, flags %#06x",
- MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
- buf_i += entry_len;
- /* don't print '\n' for last entry */
- if (++ftl_counter != ftl_len) {
- sprintf(buf_i, "\n");
- buf_i++;
- }
- }
-
- error = sysctl_handle_string(oidp, buf, strlen(buf), req);
- if (error)
- printf("sysctl error: %d\n", error);
- free(buf, M_DEVBUF);
- return error;
-}
-
-#define IXL_SW_RES_SIZE 0x14
-static int
-ixl_res_alloc_cmp(const void *a, const void *b)
-{
- const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
- one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
- two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
-
- return ((int)one->resource_type - (int)two->resource_type);
-}
-
-/*
- * Longest string length: 25
- */
-static char *
-ixl_switch_res_type_string(u8 type)
-{
- static char * ixl_switch_res_type_strings[0x14] = {
- "VEB",
- "VSI",
- "Perfect Match MAC address",
- "S-tag",
- "(Reserved)",
- "Multicast hash entry",
- "Unicast hash entry",
- "VLAN",
- "VSI List entry",
- "(Reserved)",
- "VLAN Statistic Pool",
- "Mirror Rule",
- "Queue Set",
- "Inner VLAN Forward filter",
- "(Reserved)",
- "Inner MAC",
- "IP",
- "GRE/VN1 Key",
- "VN2 Key",
- "Tunneling Port"
- };
-
- if (type < 0x14)
- return ixl_switch_res_type_strings[type];
- else
- return "(Reserved)";
-}
-
-static int
-ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct sbuf *buf;
- int error = 0;
-
- u8 num_entries;
- struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
-
- buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- if (!buf) {
- device_printf(dev, "Could not allocate sbuf for output.\n");
- return (ENOMEM);
- }
-
- bzero(resp, sizeof(resp));
- error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
- resp,
- IXL_SW_RES_SIZE,
- NULL);
- if (error) {
- device_printf(dev,
- "%s: get_switch_resource_alloc() error %d, aq error %d\n",
- __func__, error, hw->aq.asq_last_status);
- sbuf_delete(buf);
- return error;
- }
-
- /* Sort entries by type for display */
- qsort(resp, num_entries,
- sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
- &ixl_res_alloc_cmp);
-
- sbuf_cat(buf, "\n");
- sbuf_printf(buf, "# of entries: %d\n", num_entries);
- sbuf_printf(buf,
- " Type | Guaranteed | Total | Used | Un-allocated\n"
- " | (this) | (all) | (this) | (all) \n");
- for (int i = 0; i < num_entries; i++) {
- sbuf_printf(buf,
- "%25s | %10d %5d %6d %12d",
- ixl_switch_res_type_string(resp[i].resource_type),
- resp[i].guaranteed,
- resp[i].total,
- resp[i].used,
- resp[i].total_unalloced);
- if (i < num_entries - 1)
- sbuf_cat(buf, "\n");
- }
-
- error = sbuf_finish(buf);
- if (error)
- device_printf(dev, "Error finishing sbuf: %d\n", error);
-
- sbuf_delete(buf);
- return error;
-}
-
-/*
-** Caller must init and delete sbuf; this function will clear and
-** finish it for caller.
-**
-** XXX: Cannot use the SEID for this, since there is no longer a
-** fixed mapping between SEID and element type.
-*/
-static char *
-ixl_switch_element_string(struct sbuf *s,
- struct i40e_aqc_switch_config_element_resp *element)
-{
- sbuf_clear(s);
-
- switch (element->element_type) {
- case I40E_AQ_SW_ELEM_TYPE_MAC:
- sbuf_printf(s, "MAC %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_PF:
- sbuf_printf(s, "PF %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_VF:
- sbuf_printf(s, "VF %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_EMP:
- sbuf_cat(s, "EMP");
- break;
- case I40E_AQ_SW_ELEM_TYPE_BMC:
- sbuf_cat(s, "BMC");
- break;
- case I40E_AQ_SW_ELEM_TYPE_PV:
- sbuf_cat(s, "PV");
- break;
- case I40E_AQ_SW_ELEM_TYPE_VEB:
- sbuf_cat(s, "VEB");
- break;
- case I40E_AQ_SW_ELEM_TYPE_PA:
- sbuf_cat(s, "PA");
- break;
- case I40E_AQ_SW_ELEM_TYPE_VSI:
- sbuf_printf(s, "VSI %3d", element->element_info);
- break;
- default:
- sbuf_cat(s, "?");
- break;
- }
-
- sbuf_finish(s);
- return sbuf_data(s);
-}
-
-static int
-ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct sbuf *buf;
- struct sbuf *nmbuf;
- int error = 0;
- u16 next = 0;
- u8 aq_buf[I40E_AQ_LARGE_BUF];
-
- struct i40e_aqc_get_switch_config_resp *sw_config;
- sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
-
- buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- if (!buf) {
- device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
- return (ENOMEM);
- }
-
- error = i40e_aq_get_switch_config(hw, sw_config,
- sizeof(aq_buf), &next, NULL);
- if (error) {
- device_printf(dev,
- "%s: aq_get_switch_config() error %d, aq error %d\n",
- __func__, error, hw->aq.asq_last_status);
- sbuf_delete(buf);
- return error;
- }
- if (next)
- device_printf(dev, "%s: TODO: get more config with SEID %d\n",
- __func__, next);
-
- nmbuf = sbuf_new_auto();
- if (!nmbuf) {
- device_printf(dev, "Could not allocate sbuf for name output.\n");
- sbuf_delete(buf);
- return (ENOMEM);
- }
-
- sbuf_cat(buf, "\n");
- // Assuming <= 255 elements in switch
- sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
- sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
- /* Exclude:
- ** Revision -- all elements are revision 1 for now
- */
- sbuf_printf(buf,
- "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
- " | | | (uplink)\n");
- for (int i = 0; i < sw_config->header.num_reported; i++) {
- // "%4d (%8s) | %8s %8s %#8x",
- sbuf_printf(buf, "%4d", sw_config->element[i].seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
- &sw_config->element[i]));
- sbuf_cat(buf, " | ");
- sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
- if (i < sw_config->header.num_reported - 1)
- sbuf_cat(buf, "\n");
- }
- sbuf_delete(nmbuf);
-
- error = sbuf_finish(buf);
- if (error)
- device_printf(dev, "Error finishing sbuf: %d\n", error);
-
- sbuf_delete(buf);
-
- return (error);
-}
-
-static int
-ixl_debug_info(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf;
- int error, input = 0;
-
- error = sysctl_handle_int(oidp, &input, 0, req);
-
- if (error || !req->newptr)
- return (error);
-
- if (input == 1) {
- pf = (struct ixl_pf *)arg1;
- ixl_print_debug_info(pf);
- }
-
- return (error);
-}
-
-static void
-ixl_print_debug_info(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct rx_ring *rxr = &que->rxr;
- struct tx_ring *txr = &que->txr;
- u32 reg;
-
-
- printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
- printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
- printf("RX next check = %x\n", rxr->next_check);
- printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
- printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
- printf("TX desc avail = %x\n", txr->avail);
-
- reg = rd32(hw, I40E_GLV_GORCL(0xc));
- printf("RX Bytes = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
- printf("Port RX Bytes = %x\n", reg);
- reg = rd32(hw, I40E_GLV_RDPC(0xc));
- printf("RX discard = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
- printf("Port RX discard = %x\n", reg);
-
- reg = rd32(hw, I40E_GLV_TEPC(0xc));
- printf("TX errors = %x\n", reg);
- reg = rd32(hw, I40E_GLV_GOTCL(0xc));
- printf("TX Bytes = %x\n", reg);
-
- reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
- printf("RX undersize = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
- printf("RX fragments = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
- printf("RX oversize = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
- printf("RX length error = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
- printf("mac remote fault = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
- printf("mac local fault = %x\n", reg);
-}
-
-#endif /* IXL_DEBUG_SYSCTL */
-
-#ifdef PCI_IOV
-static int
-ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- struct ixl_vsi *vsi;
- struct i40e_vsi_context vsi_ctx;
- int i;
- uint16_t first_queue;
- enum i40e_status_code code;
-
- hw = &pf->hw;
- vsi = &pf->vsi;
-
- vsi_ctx.pf_num = hw->pf_id;
- vsi_ctx.uplink_seid = pf->veb_seid;
- vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
- vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
- vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
-
- bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
-
- vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- vsi_ctx.info.switch_id = htole16(0);
-
- vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
- vsi_ctx.info.sec_flags = 0;
- if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
- vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
-
- /* TODO: If a port VLAN is set, then this needs to be changed */
- vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
- vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
- I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
-
- vsi_ctx.info.valid_sections |=
- htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
- vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
- first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
- for (i = 0; i < IXLV_MAX_QUEUES; i++)
- vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
- for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
- vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
-
- vsi_ctx.info.tc_mapping[0] = htole16(
- (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
- (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
-
- code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
- if (code != I40E_SUCCESS)
- return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
- vf->vsi.seid = vsi_ctx.seid;
- vf->vsi.vsi_num = vsi_ctx.vsi_number;
- vf->vsi.first_queue = first_queue;
- vf->vsi.num_queues = IXLV_MAX_QUEUES;
-
- code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
- if (code != I40E_SUCCESS)
- return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
-
- code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
- if (code != I40E_SUCCESS) {
- device_printf(pf->dev, "Failed to disable BW limit: %d\n",
- ixl_adminq_err_to_errno(hw->aq.asq_last_status));
- return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
- }
-
- memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
- return (0);
-}
-
-static int
-ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- int error;
-
- hw = &pf->hw;
-
- error = ixl_vf_alloc_vsi(pf, vf);
- if (error != 0)
- return (error);
-
- vf->vsi.hw_filters_add = 0;
- vf->vsi.hw_filters_del = 0;
- ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
- ixl_reconfigure_filters(&vf->vsi);
-
- return (0);
-}
-
-static void
-ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
- uint32_t val)
-{
- uint32_t qtable;
- int index, shift;
-
- /*
- * Two queues are mapped in a single register, so we have to do some
- * gymnastics to convert the queue number into a register index and
- * shift.
- */
- index = qnum / 2;
- shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
-
- qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
- qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
- qtable |= val << shift;
- i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
-}
-
-static void
-ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t qtable;
- int i;
-
- hw = &pf->hw;
-
- /*
- * Contiguous mappings aren't actually supported by the hardware,
- * so we have to use non-contiguous mappings.
- */
- i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
- I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
-
- wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
- I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
-
- for (i = 0; i < vf->vsi.num_queues; i++) {
- qtable = (vf->vsi.first_queue + i) <<
- I40E_VPLAN_QTABLE_QINDEX_SHIFT;
-
- wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
- }
-
- /* Map queues allocated to VF to its VSI. */
- for (i = 0; i < vf->vsi.num_queues; i++)
- ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
-
- /* Set rest of VSI queues as unused. */
- for (; i < IXL_MAX_VSI_QUEUES; i++)
- ixl_vf_map_vsi_queue(hw, vf, i,
- I40E_VSILAN_QTABLE_QINDEX_0_MASK);
-
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw;
-
- hw = &pf->hw;
-
- if (vsi->seid == 0)
- return;
-
- i40e_aq_delete_element(hw, vsi->seid, NULL);
-}
-
-static void
-ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
-{
-
- wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
-{
-
- wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t vfint_reg, vpint_reg;
- int i;
-
- hw = &pf->hw;
-
- ixl_vf_vsi_release(pf, &vf->vsi);
-
- /* Index 0 has a special register. */
- ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
-
- for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
- vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
- ixl_vf_disable_queue_intr(hw, vfint_reg);
- }
-
- /* Index 0 has a special register. */
- ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
-
- for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
- vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
- ixl_vf_unregister_intr(hw, vpint_reg);
- }
-
- vf->vsi.num_queues = 0;
-}
-
-static int
-ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- int i;
- uint16_t global_vf_num;
- uint32_t ciad;
-
- hw = &pf->hw;
- global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
-
- wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
- (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
- for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
- ciad = rd32(hw, I40E_PF_PCI_CIAD);
- if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
- return (0);
- DELAY(1);
- }
-
- return (ETIMEDOUT);
-}
-
-static void
-ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t vfrtrig;
-
- hw = &pf->hw;
-
- vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
- vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
- ixl_flush(hw);
-
- ixl_reinit_vf(pf, vf);
-}
-
-static void
-ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t vfrstat, vfrtrig;
- int i, error;
-
- hw = &pf->hw;
-
- error = ixl_flush_pcie(pf, vf);
- if (error != 0)
- device_printf(pf->dev,
- "Timed out waiting for PCIe activity to stop on VF-%d\n",
- vf->vf_num);
-
- for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
- DELAY(10);
-
- vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
- if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
- break;
- }
-
- if (i == IXL_VF_RESET_TIMEOUT)
- device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
-
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
-
- vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
- vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
-
- if (vf->vsi.seid != 0)
- ixl_disable_rings(&vf->vsi);
-
- ixl_vf_release_resources(pf, vf);
- ixl_vf_setup_vsi(pf, vf);
- ixl_vf_map_queues(pf, vf);
-
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
- ixl_flush(hw);
-}
-
-static const char *
-ixl_vc_opcode_str(uint16_t op)
-{
-
- switch (op) {
- case I40E_VIRTCHNL_OP_VERSION:
- return ("VERSION");
- case I40E_VIRTCHNL_OP_RESET_VF:
- return ("RESET_VF");
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- return ("GET_VF_RESOURCES");
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- return ("CONFIG_TX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- return ("CONFIG_RX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- return ("CONFIG_VSI_QUEUES");
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- return ("CONFIG_IRQ_MAP");
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- return ("ENABLE_QUEUES");
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- return ("DISABLE_QUEUES");
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- return ("ADD_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- return ("DEL_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- return ("ADD_VLAN");
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- return ("DEL_VLAN");
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- return ("CONFIG_PROMISCUOUS_MODE");
- case I40E_VIRTCHNL_OP_GET_STATS:
- return ("GET_STATS");
- case I40E_VIRTCHNL_OP_FCOE:
- return ("FCOE");
- case I40E_VIRTCHNL_OP_EVENT:
- return ("EVENT");
- default:
- return ("UNKNOWN");
- }
-}
-
-static int
-ixl_vc_opcode_level(uint16_t opcode)
-{
- switch (opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS:
- return (10);
- default:
- return (5);
- }
-}
-
-static void
-ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
- enum i40e_status_code status, void *msg, uint16_t len)
-{
- struct i40e_hw *hw;
- int global_vf_id;
-
- hw = &pf->hw;
- global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
-
- I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
- "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
- ixl_vc_opcode_str(op), op, status, vf->vf_num);
-
- i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
-}
-
-static void
-ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
-{
-
- ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
-}
-
-static void
-ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
- enum i40e_status_code status, const char *file, int line)
-{
-
- I40E_VC_DEBUG(pf, 1,
- "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
- ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
- ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
-}
-
-static void
-ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_version_info reply;
-
- if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
- I40E_ERR_PARAM);
- return;
- }
-
- vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
-
- reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
- reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
- sizeof(reply));
-}
-
-static void
-ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
-
- if (msg_size != 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
- I40E_ERR_PARAM);
- return;
- }
-
- ixl_reset_vf(pf, vf);
-
- /* No response to a reset message. */
-}
-
-static void
-ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vf_resource reply;
-
- if ((vf->version == 0 && msg_size != 0) ||
- (vf->version == 1 && msg_size != 4)) {
- device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
- " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
- vf->version);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_ERR_PARAM);
- return;
- }
-
- bzero(&reply, sizeof(reply));
-
- if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
- reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
- else
- reply.vf_offload_flags = *(u32 *)msg;
-
- reply.num_vsis = 1;
- reply.num_queue_pairs = vf->vsi.num_queues;
- reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
- reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
- reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
- memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
-
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_SUCCESS, &reply, sizeof(reply));
-}
-
-static int
-ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
- struct i40e_virtchnl_txq_info *info)
-{
- struct i40e_hw *hw;
- struct i40e_hmc_obj_txq txq;
- uint16_t global_queue_num, global_vf_num;
- enum i40e_status_code status;
- uint32_t qtx_ctl;
-
- hw = &pf->hw;
- global_queue_num = vf->vsi.first_queue + info->queue_id;
- global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
- bzero(&txq, sizeof(txq));
-
- status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
-
- txq.head_wb_ena = info->headwb_enabled;
- txq.head_wb_addr = info->dma_headwb_addr;
- txq.qlen = info->ring_len;
- txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
- txq.rdylist_act = 0;
-
- status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
- (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
- (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
- wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
- ixl_flush(hw);
-
- return (0);
-}
-
-static int
-ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
- struct i40e_virtchnl_rxq_info *info)
-{
- struct i40e_hw *hw;
- struct i40e_hmc_obj_rxq rxq;
- uint16_t global_queue_num;
- enum i40e_status_code status;
-
- hw = &pf->hw;
- global_queue_num = vf->vsi.first_queue + info->queue_id;
- bzero(&rxq, sizeof(rxq));
-
- if (info->databuffer_size > IXL_VF_MAX_BUFFER)
- return (EINVAL);
-
- if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
- info->max_pkt_size < ETHER_MIN_LEN)
- return (EINVAL);
-
- if (info->splithdr_enabled) {
- if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
- return (EINVAL);
-
- rxq.hsplit_0 = info->rx_split_pos &
- (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
- rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
-
- rxq.dtype = 2;
- }
-
- status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
- rxq.qlen = info->ring_len;
-
- rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
-
- rxq.dsize = 1;
- rxq.crcstrip = 1;
- rxq.l2tsel = 1;
-
- rxq.rxmax = info->max_pkt_size;
- rxq.tphrdesc_ena = 1;
- rxq.tphwdesc_ena = 1;
- rxq.tphdata_ena = 1;
- rxq.tphhead_ena = 1;
- rxq.lrxqthresh = 2;
- rxq.prefena = 1;
-
- status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- return (0);
-}
-
-static void
-ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vsi_queue_config_info *info;
- struct i40e_virtchnl_queue_pair_info *pair;
- int i;
-
- if (msg_size < sizeof(*info)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- info = msg;
- if (info->num_queue_pairs == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- if (info->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < info->num_queue_pairs; i++) {
- pair = &info->qpair[i];
-
- if (pair->txq.vsi_id != vf->vsi.vsi_num ||
- pair->rxq.vsi_id != vf->vsi.vsi_num ||
- pair->txq.queue_id != pair->rxq.queue_id ||
- pair->txq.queue_id >= vf->vsi.num_queues) {
-
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
- return;
- }
-
- if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
- return;
- }
-
- if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
- return;
- }
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
-}
-
-static void
-ixl_vf_set_qctl(struct ixl_pf *pf,
- const struct i40e_virtchnl_vector_map *vector,
- enum i40e_queue_type cur_type, uint16_t cur_queue,
- enum i40e_queue_type *last_type, uint16_t *last_queue)
-{
- uint32_t offset, qctl;
- uint16_t itr_indx;
-
- if (cur_type == I40E_QUEUE_TYPE_RX) {
- offset = I40E_QINT_RQCTL(cur_queue);
- itr_indx = vector->rxitr_idx;
- } else {
- offset = I40E_QINT_TQCTL(cur_queue);
- itr_indx = vector->txitr_idx;
- }
-
- qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
- (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
-
- wr32(&pf->hw, offset, qctl);
-
- *last_type = cur_type;
- *last_queue = cur_queue;
-}
-
-static void
-ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
- const struct i40e_virtchnl_vector_map *vector)
-{
- struct i40e_hw *hw;
- u_int qindex;
- enum i40e_queue_type type, last_type;
- uint32_t lnklst_reg;
- uint16_t rxq_map, txq_map, cur_queue, last_queue;
-
- hw = &pf->hw;
-
- rxq_map = vector->rxq_map;
- txq_map = vector->txq_map;
-
- last_queue = IXL_END_OF_INTR_LNKLST;
- last_type = I40E_QUEUE_TYPE_RX;
-
- /*
- * The datasheet says to optimize performance, RX queues and TX queues
- * should be interleaved in the interrupt linked list, so we process
- * both at once here.
- */
- while ((rxq_map != 0) || (txq_map != 0)) {
- if (txq_map != 0) {
- qindex = ffs(txq_map) - 1;
- type = I40E_QUEUE_TYPE_TX;
- cur_queue = vf->vsi.first_queue + qindex;
- ixl_vf_set_qctl(pf, vector, type, cur_queue,
- &last_type, &last_queue);
- txq_map &= ~(1 << qindex);
- }
-
- if (rxq_map != 0) {
- qindex = ffs(rxq_map) - 1;
- type = I40E_QUEUE_TYPE_RX;
- cur_queue = vf->vsi.first_queue + qindex;
- ixl_vf_set_qctl(pf, vector, type, cur_queue,
- &last_type, &last_queue);
- rxq_map &= ~(1 << qindex);
- }
- }
-
- if (vector->vector_id == 0)
- lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
- else
- lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
- vf->vf_num);
- wr32(hw, lnklst_reg,
- (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
- (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
-
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_irq_map_info *map;
- struct i40e_virtchnl_vector_map *vector;
- struct i40e_hw *hw;
- int i, largest_txq, largest_rxq;
-
- hw = &pf->hw;
-
- if (msg_size < sizeof(*map)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- map = msg;
- if (map->num_vectors == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < map->num_vectors; i++) {
- vector = &map->vecmap[i];
-
- if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
- vector->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
- return;
- }
-
- if (vector->rxq_map != 0) {
- largest_rxq = fls(vector->rxq_map) - 1;
- if (largest_rxq >= vf->vsi.num_queues) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- if (vector->txq_map != 0) {
- largest_txq = fls(vector->txq_map) - 1;
- if (largest_txq >= vf->vsi.num_queues) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
- vector->txitr_idx > IXL_MAX_ITR_IDX) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- ixl_vf_config_vector(pf, vf, vector);
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
-}
-
-static void
-ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_queue_select *select;
- int error;
-
- if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- select = msg;
- if (select->vsi_id != vf->vsi.vsi_num ||
- select->rx_queues == 0 || select->tx_queues == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- error = ixl_enable_rings(&vf->vsi);
- if (error) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_TIMEOUT);
- return;
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
-}
-
-static void
-ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
- void *msg, uint16_t msg_size)
-{
- struct i40e_virtchnl_queue_select *select;
- int error;
-
- if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- select = msg;
- if (select->vsi_id != vf->vsi.vsi_num ||
- select->rx_queues == 0 || select->tx_queues == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- error = ixl_disable_rings(&vf->vsi);
- if (error) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_TIMEOUT);
- return;
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
-}
-
-static boolean_t
-ixl_zero_mac(const uint8_t *addr)
-{
- uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
-
- return (cmp_etheraddr(addr, zero));
-}
-
-static boolean_t
-ixl_bcast_mac(const uint8_t *addr)
-{
-
- return (cmp_etheraddr(addr, ixl_bcast_addr));
-}
-
-static int
-ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
-{
-
- if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
- return (EINVAL);
-
- /*
- * If the VF is not allowed to change its MAC address, don't let it
- * set a MAC filter for an address that is not a multicast address and
- * is not its assigned MAC.
- */
- if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
- !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
- return (EPERM);
-
- return (0);
-}
-
-static void
-ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_ether_addr_list *addr_list;
- struct i40e_virtchnl_ether_addr *addr;
- struct ixl_vsi *vsi;
- int i;
- size_t expected_size;
-
- vsi = &vf->vsi;
-
- if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- addr_list = msg;
- expected_size = sizeof(*addr_list) +
- addr_list->num_elements * sizeof(*addr);
-
- if (addr_list->num_elements == 0 ||
- addr_list->vsi_id != vsi->vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
- return;
- }
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- addr = &addr_list->list[i];
- ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
-}
-
-static void
-ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_ether_addr_list *addr_list;
- struct i40e_virtchnl_ether_addr *addr;
- size_t expected_size;
- int i;
-
- if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- addr_list = msg;
- expected_size = sizeof(*addr_list) +
- addr_list->num_elements * sizeof(*addr);
-
- if (addr_list->num_elements == 0 ||
- addr_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- addr = &addr_list->list[i];
- if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
- return;
- }
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- addr = &addr_list->list[i];
- ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
-}
-
-static enum i40e_status_code
-ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_vsi_context vsi_ctx;
-
- vsi_ctx.seid = vf->vsi.seid;
-
- bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
- vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
- vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
- I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
- return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
-}
-
-static void
-ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vlan_filter_list *filter_list;
- enum i40e_status_code code;
- size_t expected_size;
- int i;
-
- if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- filter_list = msg;
- expected_size = sizeof(*filter_list) +
- filter_list->num_elements * sizeof(uint16_t);
- if (filter_list->num_elements == 0 ||
- filter_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < filter_list->num_elements; i++) {
- if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- code = ixl_vf_enable_vlan_strip(pf, vf);
- if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- }
-
- for (i = 0; i < filter_list->num_elements; i++)
- ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
-}
-
-static void
-ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vlan_filter_list *filter_list;
- int i;
- size_t expected_size;
-
- if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- filter_list = msg;
- expected_size = sizeof(*filter_list) +
- filter_list->num_elements * sizeof(uint16_t);
- if (filter_list->num_elements == 0 ||
- filter_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < filter_list->num_elements; i++) {
- if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < filter_list->num_elements; i++)
- ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
-}
-
-static void
-ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
- void *msg, uint16_t msg_size)
-{
- struct i40e_virtchnl_promisc_info *info;
- enum i40e_status_code code;
-
- if (msg_size != sizeof(*info)) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
- if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
- info = msg;
- if (info->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
- code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
- info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
- if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
- return;
- }
-
- code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
- info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
- if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
- return;
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
-}
-
-static void
-ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_queue_select *queue;
-
- if (msg_size != sizeof(*queue)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_ERR_PARAM);
- return;
- }
-
- queue = msg;
- if (queue->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_ERR_PARAM);
- return;
- }
-
- ixl_update_eth_stats(&vf->vsi);
-
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
-}
-
-static void
-ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
-{
- struct ixl_vf *vf;
- void *msg;
- uint16_t vf_num, msg_size;
- uint32_t opcode;
-
- vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
- opcode = le32toh(event->desc.cookie_high);
-
- if (vf_num >= pf->num_vfs) {
- device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
- return;
- }
-
- vf = &pf->vfs[vf_num];
- msg = event->msg_buf;
- msg_size = event->msg_len;
-
- I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
- "Got msg %s(%d) from VF-%d of size %d\n",
- ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
-
- switch (opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- ixl_vf_version_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_RESET_VF:
- ixl_vf_reset_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_GET_STATS:
- ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
- break;
-
- /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- default:
- i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
- break;
- }
-}
-
-/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
-static void
-ixl_handle_vflr(void *arg, int pending)
-{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- uint16_t global_vf_num;
- uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
- int i;
-
- pf = arg;
- hw = &pf->hw;
-
- IXL_PF_LOCK(pf);
- for (i = 0; i < pf->num_vfs; i++) {
- global_vf_num = hw->func_caps.vf_base_id + i;
-
- vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
- vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
- vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
- if (vflrstat & vflrstat_mask) {
- wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
- vflrstat_mask);
-
- ixl_reinit_vf(pf, &pf->vfs[i]);
- }
- }
-
- icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
- icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
- ixl_flush(hw);
-
- IXL_PF_UNLOCK(pf);
-}
-
-static int
-ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
-{
-
- switch (err) {
- case I40E_AQ_RC_EPERM:
- return (EPERM);
- case I40E_AQ_RC_ENOENT:
- return (ENOENT);
- case I40E_AQ_RC_ESRCH:
- return (ESRCH);
- case I40E_AQ_RC_EINTR:
- return (EINTR);
- case I40E_AQ_RC_EIO:
- return (EIO);
- case I40E_AQ_RC_ENXIO:
- return (ENXIO);
- case I40E_AQ_RC_E2BIG:
- return (E2BIG);
- case I40E_AQ_RC_EAGAIN:
- return (EAGAIN);
- case I40E_AQ_RC_ENOMEM:
- return (ENOMEM);
- case I40E_AQ_RC_EACCES:
- return (EACCES);
- case I40E_AQ_RC_EFAULT:
- return (EFAULT);
- case I40E_AQ_RC_EBUSY:
- return (EBUSY);
- case I40E_AQ_RC_EEXIST:
- return (EEXIST);
- case I40E_AQ_RC_EINVAL:
- return (EINVAL);
- case I40E_AQ_RC_ENOTTY:
- return (ENOTTY);
- case I40E_AQ_RC_ENOSPC:
- return (ENOSPC);
- case I40E_AQ_RC_ENOSYS:
- return (ENOSYS);
- case I40E_AQ_RC_ERANGE:
- return (ERANGE);
- case I40E_AQ_RC_EFLUSHED:
- return (EINVAL); /* No exact equivalent in errno.h */
- case I40E_AQ_RC_BAD_ADDR:
- return (EFAULT);
- case I40E_AQ_RC_EMODE:
- return (EPERM);
- case I40E_AQ_RC_EFBIG:
- return (EFBIG);
- default:
- return (EINVAL);
- }
-}
-
-static int
-ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
-{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- struct ixl_vsi *pf_vsi;
- enum i40e_status_code ret;
- int i, error;
-
- pf = device_get_softc(dev);
- hw = &pf->hw;
- pf_vsi = &pf->vsi;
-
- IXL_PF_LOCK(pf);
- pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
- M_ZERO);
-
- if (pf->vfs == NULL) {
- error = ENOMEM;
- goto fail;
- }
-
- for (i = 0; i < num_vfs; i++)
- sysctl_ctx_init(&pf->vfs[i].ctx);
-
- ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
- 1, FALSE, &pf->veb_seid, FALSE, NULL);
- if (ret != I40E_SUCCESS) {
- error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
- device_printf(dev, "add_veb failed; code=%d error=%d", ret,
- error);
- goto fail;
- }
-
- // TODO: [Configure MSI-X here]
- ixl_enable_adminq(hw);
-
- pf->num_vfs = num_vfs;
- IXL_PF_UNLOCK(pf);
- return (0);
-
-fail:
- free(pf->vfs, M_IXL);
- pf->vfs = NULL;
- IXL_PF_UNLOCK(pf);
- return (error);
-}
-
-static void
-ixl_iov_uninit(device_t dev)
-{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- struct ixl_vsi *vsi;
- struct ifnet *ifp;
- struct ixl_vf *vfs;
- int i, num_vfs;
-
- pf = device_get_softc(dev);
- hw = &pf->hw;
- vsi = &pf->vsi;
- ifp = vsi->ifp;
-
- IXL_PF_LOCK(pf);
- for (i = 0; i < pf->num_vfs; i++) {
- if (pf->vfs[i].vsi.seid != 0)
- i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
- }
-
- if (pf->veb_seid != 0) {
- i40e_aq_delete_element(hw, pf->veb_seid, NULL);
- pf->veb_seid = 0;
- }
-
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
- ixl_disable_intr(vsi);
-
- vfs = pf->vfs;
- num_vfs = pf->num_vfs;
-
- pf->vfs = NULL;
- pf->num_vfs = 0;
- IXL_PF_UNLOCK(pf);
-
- /* Do this after the unlock as sysctl_ctx_free might sleep. */
- for (i = 0; i < num_vfs; i++)
- sysctl_ctx_free(&vfs[i].ctx);
- free(vfs, M_IXL);
-}
-
-static int
-ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
-{
- char sysctl_name[QUEUE_NAME_LEN];
- struct ixl_pf *pf;
- struct ixl_vf *vf;
- const void *mac;
- size_t size;
- int error;
-
- pf = device_get_softc(dev);
- vf = &pf->vfs[vfnum];
-
- IXL_PF_LOCK(pf);
- vf->vf_num = vfnum;
-
- vf->vsi.back = pf;
- vf->vf_flags = VF_FLAG_ENABLED;
- SLIST_INIT(&vf->vsi.ftl);
-
- error = ixl_vf_setup_vsi(pf, vf);
- if (error != 0)
- goto out;
-
- if (nvlist_exists_binary(params, "mac-addr")) {
- mac = nvlist_get_binary(params, "mac-addr", &size);
- bcopy(mac, vf->mac, ETHER_ADDR_LEN);
-
- if (nvlist_get_bool(params, "allow-set-mac"))
- vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
- } else
- /*
- * If the administrator has not specified a MAC address then
- * we must allow the VF to choose one.
- */
- vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
-
- if (nvlist_get_bool(params, "mac-anti-spoof"))
- vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
-
- if (nvlist_get_bool(params, "allow-promisc"))
- vf->vf_flags |= VF_FLAG_PROMISC_CAP;
-
- /* TODO: Get VLAN that PF has set for the VF */
-
- vf->vf_flags |= VF_FLAG_VLAN_CAP;
-
- ixl_reset_vf(pf, vf);
-out:
- IXL_PF_UNLOCK(pf);
- if (error == 0) {
- snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
- ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
- }
-
- return (error);
-}
-#endif /* PCI_IOV */
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index 9be8a36e8816..9c70d79816c0 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -32,23 +32,13 @@
******************************************************************************/
/*$FreeBSD$*/
-#ifndef IXL_STANDALONE_BUILD
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "opt_rss.h"
-#endif
-
#include "ixl.h"
#include "ixlv.h"
-#ifdef RSS
-#include <net/rss_config.h>
-#endif
-
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.2.11-k";
+char ixlv_driver_version[] = "1.4.6-k";
/*********************************************************************
* PCI Device ID Table
@@ -64,6 +54,9 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -73,7 +66,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
*********************************************************************/
static char *ixlv_strings[] = {
- "Intel(R) Ethernet Connection XL710 VF Driver"
+ "Intel(R) Ethernet Connection XL710/X722 VF Driver"
};
@@ -119,6 +112,7 @@ static void ixlv_set_queue_rx_itr(struct ixl_queue *);
static void ixlv_set_queue_tx_itr(struct ixl_queue *);
static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
enum i40e_status_code);
+static void ixlv_configure_itr(struct ixlv_sc *);
static void ixlv_enable_adminq_irq(struct i40e_hw *);
static void ixlv_disable_adminq_irq(struct i40e_hw *);
@@ -137,8 +131,10 @@ static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
static void ixlv_add_sysctls(struct ixlv_sc *);
+#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
+#endif
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -306,7 +302,7 @@ ixlv_attach(device_t dev)
/* Allocate filter lists */
ixlv_init_filters(sc);
- /* Core Lock Init*/
+ /* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
@@ -365,14 +361,16 @@ ixlv_attach(device_t dev)
goto err_aq;
}
- INIT_DBG_DEV(dev, "VF config from PF:");
- INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
+ device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
sc->vf_res->num_vsis,
sc->vf_res->num_queue_pairs,
sc->vf_res->max_vectors,
- sc->vf_res->max_mtu);
- INIT_DBG_DEV(dev, "Offload flags: %#010x",
- sc->vf_res->vf_offload_flags);
+ sc->vf_res->rss_key_size,
+ sc->vf_res->rss_lut_size);
+#ifdef IXL_DEBUG
+ device_printf(dev, "Offload flags: 0x%b\n",
+ sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
+#endif
/* got VF config message back from PF, now we can parse it */
for (int i = 0; i < sc->vf_res->num_vsis; i++) {
@@ -396,6 +394,14 @@ ixlv_attach(device_t dev)
bcopy(addr, hw->mac.addr, sizeof(addr));
}
+ /* Now that the number of queues for this VF is known, set up interrupts */
+ sc->msix = ixlv_init_msix(sc);
+ /* We fail without MSIX support */
+ if (sc->msix == 0) {
+ error = ENXIO;
+ goto err_res_buf;
+ }
+
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
sc->link_up = TRUE;
@@ -419,7 +425,12 @@ ixlv_attach(device_t dev)
INIT_DBG_DEV(dev, "Queue memory and interface setup");
/* Do queue interrupt setup */
- ixlv_assign_msix(sc);
+ if (ixlv_assign_msix(sc) != 0) {
+ device_printf(dev, "%s: allocating queue interrupts failed!\n",
+ __func__);
+ error = ENXIO;
+ goto out;
+ }
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
@@ -829,8 +840,8 @@ ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
*/
if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
if_printf(sc->vsi.ifp,
- "Error %d waiting for PF to complete operation %d\n",
- code, cmd->request);
+ "Error %s waiting for PF to complete operation %d\n",
+ i40e_stat_str(&sc->hw, code), cmd->request);
}
}
@@ -901,6 +912,9 @@ ixlv_init_locked(struct ixlv_sc *sc)
ixl_init_rx_ring(que);
}
+ /* Set initial ITR values */
+ ixlv_configure_itr(sc);
+
/* Configure queues */
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
@@ -936,18 +950,31 @@ ixlv_init(void *arg)
struct ixlv_sc *sc = vsi->back;
int retries = 0;
+ /* Prevent init from running again while waiting for AQ calls
+ * made in init_locked() to complete. */
mtx_lock(&sc->mtx);
+ if (sc->init_in_progress) {
+ mtx_unlock(&sc->mtx);
+ return;
+ } else
+ sc->init_in_progress = true;
+
ixlv_init_locked(sc);
mtx_unlock(&sc->mtx);
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < IXLV_AQ_MAX_ERR) {
- i40e_msec_delay(25);
+ i40e_msec_pause(25);
}
- if (retries >= IXLV_AQ_MAX_ERR)
+ if (retries >= IXLV_AQ_MAX_ERR) {
if_printf(vsi->ifp,
"Init failed to complete in allotted time!\n");
+ }
+
+ mtx_lock(&sc->mtx);
+ sc->init_in_progress = false;
+ mtx_unlock(&sc->mtx);
}
/*
@@ -990,8 +1017,8 @@ ixlv_setup_vc(struct ixlv_sc *sc)
/* Need to set these AQ paramters before initializing AQ */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
- hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
- hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+ hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
/* Initialize admin queue */
@@ -1021,13 +1048,13 @@ retry_send:
while (!i40e_asq_done(hw)) {
if (++asq_retries > IXLV_AQ_MAX_ERR) {
i40e_shutdown_adminq(hw);
- DDPRINTF(dev, "Admin Queue timeout "
- "(waiting for send_api_ver), %d more retries...",
+ device_printf(dev, "Admin Queue timeout "
+ "(waiting for send_api_ver), %d more tries...\n",
IXLV_AQ_MAX_ERR - (i + 1));
ret_error = 3;
break;
}
- i40e_msec_delay(10);
+ i40e_msec_pause(10);
}
if (asq_retries > IXLV_AQ_MAX_ERR)
continue;
@@ -1055,7 +1082,7 @@ retry_send:
if (error) {
device_printf(dev,
"%s: Unable to verify API version,"
- " error %d\n", __func__, error);
+ " error %s\n", __func__, i40e_stat_str(hw, error));
ret_error = 5;
}
break;
@@ -1096,7 +1123,7 @@ retry_config:
ret_error = 3;
goto fail;
}
- i40e_msec_delay(10);
+ i40e_msec_pause(10);
}
INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
@@ -1149,6 +1176,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
int rid, want, vectors, queues, available;
+ int auto_max_queues;
rid = PCIR_BAR(IXL_BAR);
sc->msix_mem = bus_alloc_resource_any(dev,
@@ -1156,7 +1184,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
if (!sc->msix_mem) {
/* May not be enabled */
device_printf(sc->dev,
- "Unable to map MSIX table \n");
+ "Unable to map MSIX table\n");
goto fail;
}
@@ -1168,20 +1196,30 @@ ixlv_init_msix(struct ixlv_sc *sc)
goto fail;
}
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
+ /* Clamp queues to number of CPUs and # of MSI-X vectors available */
+ auto_max_queues = min(mp_ncpus, available - 1);
+ /* Clamp queues to # assigned to VF by PF */
+ auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
- /* Override with hardcoded value if sane */
- if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
+ /* Override with tunable value if tunable is less than autoconfig count */
+ if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
queues = ixlv_max_queues;
+ /* Use autoconfig amount if that's lower */
+ else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
+ device_printf(dev, "ixlv_max_queues (%d) is too large, using "
+ "autoconfig amount (%d)...\n",
+ ixlv_max_queues, auto_max_queues);
+ queues = auto_max_queues;
+ }
+ /* Limit maximum auto-configured queues to 8 if no user value is set */
+ else
+ queues = min(auto_max_queues, 8);
+
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
- /* Enforce the VF max value */
- if (queues > IXLV_MAX_QUEUES)
- queues = IXLV_MAX_QUEUES;
/*
** Want one vector (RX/TX pair) per queue
@@ -1225,25 +1263,6 @@ ixlv_init_msix(struct ixlv_sc *sc)
sc->vsi.num_queues = queues;
}
- /*
- ** Explicitly set the guest PCI BUSMASTER capability
- ** and we must rewrite the ENABLE in the MSIX control
- ** register again at this point to cause the host to
- ** successfully initialize us.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
/* Next we need to setup the vector for the Admin Queue */
rid = 1; // zero vector + 1
sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
@@ -1280,7 +1299,7 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
&rid, RF_ACTIVE);
if (!(sc->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
+ device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
@@ -1294,18 +1313,29 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->hw.back = &sc->osdep;
- /* Disable adminq interrupts */
- ixlv_disable_adminq_irq(&sc->hw);
-
/*
- ** Now setup MSI/X, it will return
- ** us the number of supported vectors
+ ** Explicitly set the guest PCI BUSMASTER capability
+ ** and we must rewrite the ENABLE in the MSIX control
+ ** register again at this point to cause the host to
+ ** successfully initialize us.
+ **
+ ** This must be set before accessing any registers.
*/
- sc->msix = ixlv_init_msix(sc);
+ {
+ u16 pci_cmd_word;
+ int msix_ctrl;
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
- /* We fail without MSIX support */
- if (sc->msix == 0)
- return (ENXIO);
+ /* Disable adminq interrupts (just in case) */
+ ixlv_disable_adminq_irq(&sc->hw);
return (0);
}
@@ -1330,8 +1360,10 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
bus_teardown_intr(dev, que->res, que->tag);
que->tag = NULL;
}
- if (que->res != NULL)
+ if (que->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ que->res = NULL;
+ }
}
early:
@@ -1340,8 +1372,10 @@ early:
bus_teardown_intr(dev, sc->res, sc->tag);
sc->tag = NULL;
}
- if (sc->res != NULL)
+ if (sc->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
+ sc->res = NULL;
+ }
pci_release_msi(dev);
@@ -1352,8 +1386,6 @@ early:
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), sc->pci_mem);
-
- return;
}
/*
@@ -1418,7 +1450,6 @@ ixlv_assign_msix(struct ixlv_sc *sc)
#endif
bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
- vsi->que_mask |= (u64)(1 << que->msix);
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixlv_handle_que, que);
que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
@@ -1454,7 +1485,7 @@ ixlv_reset(struct ixlv_sc *sc)
if (sc->init_state != IXLV_RESET_PENDING)
ixlv_request_reset(sc);
- i40e_msec_delay(100);
+ i40e_msec_pause(100);
error = ixlv_reset_complete(hw);
if (error) {
device_printf(dev, "%s: VF reset failed\n",
@@ -1484,6 +1515,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
{
u32 reg;
+ /* Wait up to ~10 seconds */
for (int i = 0; i < 100; i++) {
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -1491,7 +1523,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
if ((reg == I40E_VFR_VFACTIVE) ||
(reg == I40E_VFR_COMPLETED))
return (0);
- i40e_msec_delay(100);
+ i40e_msec_pause(100);
}
return (EBUSY);
@@ -1522,7 +1554,7 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = 4000000000; // ??
+ ifp->if_baudrate = IF_Gbps(40);
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1686,7 +1718,7 @@ ixlv_setup_queues(struct ixlv_sc *sc)
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
- /* Allocate receive soft structs for the ring*/
+ /* Allocate receive soft structs for the ring */
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
@@ -1896,18 +1928,48 @@ ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
u32 reg;
reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
}
static void
ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
{
- wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
+ wr32(hw, I40E_VFINT_DYN_CTLN1(id),
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
rd32(hw, I40E_VFGEN_RSTAT);
return;
}
+/*
+ * Get initial ITR values from tunable values.
+ */
+static void
+ixlv_configure_itr(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->rx_itr_setting = ixlv_rx_itr;
+ vsi->tx_itr_setting = ixlv_tx_itr;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IXL_AVE_LATENCY;
+
+ wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IXL_AVE_LATENCY;
+ }
+}
/*
** Provide a update to the queue RX
@@ -2251,7 +2313,7 @@ ixlv_add_multi(struct ixl_vsi *vsi)
}
if_maddr_runlock(ifp);
- // TODO: Remove -- cannot set promiscuous mode in a VF
+ /* TODO: Remove -- cannot set promiscuous mode in a VF */
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
@@ -2381,7 +2443,8 @@ ixlv_local_timer(void *arg)
** Check status on the queues for a hang
*/
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++,que++) {
/* Any queues with outstanding work get a sw irq */
@@ -2522,28 +2585,17 @@ ixlv_free_queues(struct ixl_vsi *vsi)
free(vsi->queues, M_DEVBUF);
}
-
-/*
-** ixlv_config_rss - setup RSS
-**
-** RSS keys and table are cleared on VF reset.
-*/
static void
-ixlv_config_rss(struct ixlv_sc *sc)
+ixlv_config_rss_reg(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
u32 lut = 0;
u64 set_hena = 0, hena;
int i, j, que_id;
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
#ifdef RSS
u32 rss_hash_config;
- u32 rss_seed[IXL_KEYSZ];
-#else
- u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c,
- 0x35897377, 0x328b25e1, 0x4fa98922,
- 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
#endif
/* Don't set up RSS if using a single queue */
@@ -2557,9 +2609,12 @@ ixlv_config_rss(struct ixlv_sc *sc)
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
+#else
+ ixl_get_default_rss_key(rss_seed);
#endif
+
/* Fill out hash function seed */
- for (i = 0; i <= IXL_KEYSZ; i++)
+ for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
@@ -2580,18 +2635,7 @@ ixlv_config_rss(struct ixlv_sc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
- set_hena =
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+ set_hena = IXL_DEFAULT_RSS_HENA;
#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@@ -2599,9 +2643,8 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
- // TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
/* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+ for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
if (j == vsi->num_queues)
j = 0;
#ifdef RSS
@@ -2616,16 +2659,46 @@ ixlv_config_rss(struct ixlv_sc *sc)
que_id = j;
#endif
/* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (que_id & 0xF);
+ lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
/* On i = 3, we have 4 entries in lut; write to the register */
if ((i & 3) == 3) {
- wr32(hw, I40E_VFQF_HLUT(i), lut);
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
}
}
ixl_flush(hw);
}
+static void
+ixlv_config_rss_pf(struct ixlv_sc *sc)
+{
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
+ IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
+
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
+ IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
+
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
+ IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
+}
+
+/*
+** ixlv_config_rss - setup RSS
+**
+** RSS keys and table are cleared on VF reset.
+*/
+static void
+ixlv_config_rss(struct ixlv_sc *sc)
+{
+ if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
+ DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
+ ixlv_config_rss_reg(sc);
+ } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
+ ixlv_config_rss_pf(sc);
+ } else
+ device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
+}
/*
** This routine refreshes vlan filters, called by init
@@ -2868,8 +2941,8 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
- CTLFLAG_RD, &(queues[q].tx_dma_setup),
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
+ CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
@@ -2886,7 +2959,14 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
-
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+
+#ifdef IXL_DEBUG
/* Examine queue state */
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
@@ -2898,6 +2978,7 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
sizeof(struct ixl_queue),
ixlv_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
+#endif
}
}
@@ -2932,6 +3013,7 @@ ixlv_free_filters(struct ixlv_sc *sc)
return;
}
+#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler
* Retrieves I40E_QTX_TAIL1 value from hardware
@@ -2975,4 +3057,5 @@ ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
return error;
return (0);
}
+#endif
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 68fd3f18c4b6..4c6b8b837205 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -36,7 +36,6 @@
#ifndef _IXL_H_
#define _IXL_H_
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf_ring.h>
@@ -90,29 +89,26 @@
#include <sys/smp.h>
#include <sys/sbuf.h>
#include <machine/smp.h>
+#include <machine/stdarg.h>
-#ifdef PCI_IOV
-#include <sys/nv.h>
-#include <sys/iov_schema.h>
-#include <dev/pci/pci_iov.h>
+#ifdef RSS
+#include <net/rss_config.h>
#endif
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
#include "i40e_type.h"
#include "i40e_prototype.h"
-#if defined(IXL_DEBUG) || defined(IXL_DEBUG_SYSCTL)
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
-#endif /* IXL_DEBUG || IXL_DEBUG_SYSCTL */
#ifdef IXL_DEBUG
-/* Enable debug sysctls */
-#ifndef IXL_DEBUG_SYSCTL
-#define IXL_DEBUG_SYSCTL 1
-#endif
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
@@ -157,6 +153,26 @@
#define HW_DEBUGOUT(...)
#endif /* IXL_DEBUG */
+enum ixl_dbg_mask {
+ IXL_DBG_INFO = 0x00000001,
+ IXL_DBG_EN_DIS = 0x00000002,
+ IXL_DBG_AQ = 0x00000004,
+ IXL_DBG_NVMUPD = 0x00000008,
+
+ IXL_DBG_IOCTL_KNOWN = 0x00000010,
+ IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
+ IXL_DBG_IOCTL_ALL = 0x00000030,
+
+ I40E_DEBUG_RSS = 0x00000100,
+
+ IXL_DBG_IOV = 0x00001000,
+ IXL_DBG_IOV_VC = 0x00002000,
+
+ IXL_DBG_SWITCH_INFO = 0x00010000,
+
+ IXL_DBG_ALL = 0xFFFFFFFF
+};
+
/* Tunables */
/*
@@ -167,27 +183,28 @@
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
-#define DEFAULT_RING 1024
-#define PERFORM_RING 2048
-#define MAX_RING 4096
-#define MIN_RING 32
+#define DEFAULT_RING 1024
+#define IXL_MAX_RING 8160
+#define IXL_MIN_RING 32
+#define IXL_RING_INCREMENT 32
+
+#define IXL_AQ_LEN 256
+#define IXL_AQ_LEN_MAX 1024
/*
** Default number of entries in Tx queue buf_ring.
*/
-#define SMALL_TXBRSZ 4096
-/* This may require mbuf cluster tuning */
-#define DEFAULT_TXBRSZ (SMALL_TXBRSZ * SMALL_TXBRSZ)
+#define DEFAULT_TXBRSZ 4096
/* Alignment for rings */
-#define DBA_ALIGN 128
+#define DBA_ALIGN 128
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
-#define IXL_WATCHDOG (10 * hz)
+#define IXL_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
@@ -196,11 +213,6 @@
#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
-/* Flow control constants */
-#define IXL_FC_PAUSE 0xFFFF
-#define IXL_FC_HI 0x20000
-#define IXL_FC_LO 0x10000
-
#define MAX_MULTICAST_ADDR 128
#define IXL_BAR 3
@@ -208,10 +220,6 @@
#define IXL_TSO_SIZE 65535
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
-/* Controls the length of the Admin Queue */
-#define IXL_AQ_LEN 256
-#define IXL_AQ_LEN_MAX 1024
-#define IXL_AQ_BUFSZ 4096
#define IXL_RX_LIMIT 512
#define IXL_RX_ITR 0
#define IXL_TX_ITR 1
@@ -219,25 +227,29 @@
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
-#define IXL_MAX_TSO_SEGS 66
+#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
-#define IXL_KEYSZ 10
+
+#define IXL_RSS_KEY_SIZE_REG 13
+#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
+#define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */
+#define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F
+#define IXL_RSS_VF_LUT_ENTRY_MASK 0xF
#define IXL_VF_MAX_BUFFER 0x3F80
#define IXL_VF_MAX_HDR_BUFFER 0x840
#define IXL_VF_MAX_FRAME 0x3FFF
-/* ERJ: hardware can support ~1.5k filters between all functions */
-#define IXL_MAX_FILTERS 256
-#define IXL_MAX_TX_BUSY 10
+/* ERJ: hardware can support ~2k (SW5+) filters between all functions */
+#define IXL_MAX_FILTERS 256
+#define IXL_MAX_TX_BUSY 10
#define IXL_NVM_VERSION_LO_SHIFT 0
#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT)
#define IXL_NVM_VERSION_HI_SHIFT 12
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
-
/*
* Interrupt Moderation parameters
*/
@@ -298,6 +310,19 @@
#define IXL_END_OF_INTR_LNKLST 0x7FF
+#define IXL_DEFAULT_RSS_HENA (\
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@@ -337,11 +362,6 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
-/* Pre-10.2 media type compatibility */
-#if __FreeBSD_version < 1002000
-#define IFM_OTHER IFM_UNKNOWN
-#endif
-
/*
*****************************************************************************
* vendor_info_array
@@ -480,15 +500,12 @@ struct ixl_queue {
u64 mbuf_defrag_failed;
u64 mbuf_hdr_failed;
u64 mbuf_pkt_failed;
- u64 tx_map_avail;
- u64 tx_dma_setup;
+ u64 tx_dmamap_failed;
u64 dropped_pkts;
};
/*
-** Virtual Station interface:
-** there would be one of these per traffic class/type
-** for now just one, and its embedded in the pf
+** Virtual Station Interface
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
struct ixl_vsi {
@@ -498,27 +515,25 @@ struct ixl_vsi {
struct i40e_hw *hw;
struct ifmedia media;
enum i40e_vsi_type type;
- u64 que_mask;
int id;
- u16 vsi_num;
- u16 msix_base; /* station base MSIX vector */
- u16 first_queue;
u16 num_queues;
u32 rx_itr_setting;
u32 tx_itr_setting;
+ u16 max_frame_size;
+
struct ixl_queue *queues; /* head of queues */
+
+ u16 vsi_num;
bool link_active;
u16 seid;
u16 uplink_seid;
u16 downlink_seid;
- u16 max_frame_size;
- u16 rss_table_size;
- u16 rss_size;
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
+ /* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
eventhandler_tag vlan_attach;
@@ -565,7 +580,7 @@ ixl_rx_unrefreshed(struct ixl_queue *que)
else
return ((que->num_desc + rxr->next_check) -
rxr->next_refresh - 1);
-}
+}
/*
** Find the next available unused filter
@@ -601,6 +616,28 @@ cmp_etheraddr(const u8 *ea1, const u8 *ea2)
}
/*
+ * Return next largest power of 2, unsigned
+ *
+ * Public domain, from Bit Twiddling Hacks
+ */
+static inline u32
+next_power_of_two(u32 n)
+{
+ n--;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ n++;
+
+ /* Next power of two > 0 is 1 */
+ n += (n == 0);
+
+ return (n);
+}
+
+/*
* Info for stats sysctls
*/
struct ixl_sysctl_info {
@@ -609,7 +646,8 @@ struct ixl_sysctl_info {
char *description;
};
-extern int ixl_atr_rate;
+static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*********************************************************************
* TXRX Function prototypes
@@ -620,18 +658,18 @@ void ixl_init_tx_ring(struct ixl_queue *);
int ixl_init_rx_ring(struct ixl_queue *);
bool ixl_rxeof(struct ixl_queue *, int);
bool ixl_txeof(struct ixl_queue *);
+void ixl_free_que_tx(struct ixl_queue *);
+void ixl_free_que_rx(struct ixl_queue *);
+
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
-void ixl_qflush(struct ifnet *);
void ixl_free_vsi(struct ixl_vsi *);
-void ixl_free_que_tx(struct ixl_queue *);
-void ixl_free_que_rx(struct ixl_queue *);
-#ifdef IXL_FDIR
-void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
-#endif
+void ixl_qflush(struct ifnet *);
+
+/* Common function prototypes between PF/VF driver */
#if __FreeBSD_version >= 1100000
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
-
+void ixl_get_default_rss_key(u32 *);
#endif /* _IXL_H_ */
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index 65915bea1544..1c31a9466579 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -36,6 +36,9 @@
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
+#include "ixl.h"
+#include "ixl_pf_qmgr.h"
+
#define VF_FLAG_ENABLED 0x01
#define VF_FLAG_SET_MAC_CAP 0x02
#define VF_FLAG_VLAN_CAP 0x04
@@ -52,6 +55,7 @@ struct ixl_vf {
uint16_t vf_num;
uint32_t version;
+ struct ixl_pf_qtag qtag;
struct sysctl_ctx_list ctx;
};
@@ -60,6 +64,7 @@ struct ixl_pf {
struct i40e_hw hw;
struct i40e_osdep osdep;
struct device *dev;
+ struct ixl_vsi vsi;
struct resource *pci_mem;
struct resource *msix_mem;
@@ -77,6 +82,19 @@ struct ixl_pf {
int if_flags;
int state;
+ struct ixl_pf_qmgr qmgr;
+ struct ixl_pf_qtag qtag;
+
+ /* Tunable values */
+ bool enable_msix;
+ int max_queues;
+ int ringsz;
+ bool enable_tx_fc_filter;
+ int dynamic_rx_itr;
+ int dynamic_tx_itr;
+ int tx_itr;
+ int rx_itr;
+
struct mtx pf_mtx;
u32 qbase;
@@ -87,17 +105,8 @@ struct ixl_pf {
bool link_up;
u32 link_speed;
int advertised_speed;
- int fc; /* local flow ctrl setting */
-
- /*
- ** Network interfaces
- ** These are the traffic class holders, and
- ** will have a stack interface and queues
- ** associated with them.
- ** NOTE: The PF has only a single interface,
- ** so it is embedded in the PF struct.
- */
- struct ixl_vsi vsi;
+ int fc; /* link flow ctrl setting */
+ enum ixl_dbg_mask dbg_mask;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@@ -108,6 +117,7 @@ struct ixl_pf {
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
+ /* SR-IOV */
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
@@ -149,42 +159,18 @@ struct ixl_pf {
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
-/*** Functions / Macros ***/
+static char *ixl_fc_string[6] = {
+ "None",
+ "Rx",
+ "Tx",
+ "Full",
+ "Priority",
+ "Default"
+};
-/*
-** Put the NVM, EEtrackID, and OEM version information into a string
-*/
-static void
-ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
-{
- u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
- u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
- u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
-
- sbuf_printf(buf,
- "nvm %x.%02x etid %08x oem %d.%d.%d",
- (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
- IXL_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
- IXL_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack,
- oem_ver, oem_build, oem_patch);
-}
-
-static void
-ixl_print_nvm_version(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct sbuf *sbuf;
-
- sbuf = sbuf_new_auto();
- ixl_nvm_version_str(hw, sbuf);
- sbuf_finish(sbuf);
- device_printf(dev, "%s\n", sbuf_data(sbuf));
- sbuf_delete(sbuf);
-}
+static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
+/*** Functions / Macros ***/
#define I40E_VC_DEBUG(pf, level, ...) \
do { \
if ((pf)->vc_debug_lvl >= (level)) \
@@ -201,4 +187,136 @@ ixl_print_nvm_version(struct ixl_pf *pf)
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
+/* For stats sysctl naming */
+#define QUEUE_NAME_LEN 32
+
+/*
+ * PF-only function declarations
+ */
+
+void ixl_set_busmaster(device_t);
+int ixl_setup_interface(device_t, struct ixl_vsi *);
+void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
+
+void ixl_handle_que(void *context, int pending);
+
+void ixl_init(void *);
+void ixl_local_timer(void *);
+void ixl_register_vlan(void *, struct ifnet *, u16);
+void ixl_unregister_vlan(void *, struct ifnet *, u16);
+void ixl_intr(void *);
+void ixl_msix_que(void *);
+void ixl_msix_adminq(void *);
+void ixl_do_adminq(void *, int);
+
+int ixl_res_alloc_cmp(const void *, const void *);
+char * ixl_switch_res_type_string(u8);
+char * ixl_switch_element_string(struct sbuf *,
+ struct i40e_aqc_switch_config_element_resp *);
+void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *, struct i40e_hw_port_stats *);
+void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *,
+ struct i40e_eth_stats *);
+
+void ixl_media_status(struct ifnet *, struct ifmediareq *);
+int ixl_media_change(struct ifnet *);
+int ixl_ioctl(struct ifnet *, u_long, caddr_t);
+
+void ixl_enable_adminq(struct i40e_hw *);
+void ixl_get_bus_info(struct i40e_hw *, device_t);
+void ixl_disable_adminq(struct i40e_hw *);
+void ixl_enable_queue(struct i40e_hw *, int);
+void ixl_disable_queue(struct i40e_hw *, int);
+void ixl_enable_legacy(struct i40e_hw *);
+void ixl_disable_legacy(struct i40e_hw *);
+void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
+void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
+ u64 *, u64 *);
+void ixl_stat_update32(struct i40e_hw *, u32, bool,
+ u64 *, u64 *);
+
+void ixl_stop(struct ixl_pf *);
+void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
+int ixl_get_hw_capabilities(struct ixl_pf *);
+void ixl_update_link_status(struct ixl_pf *);
+int ixl_allocate_pci_resources(struct ixl_pf *);
+int ixl_setup_stations(struct ixl_pf *);
+int ixl_switch_config(struct ixl_pf *);
+void ixl_stop_locked(struct ixl_pf *);
+int ixl_teardown_hw_structs(struct ixl_pf *);
+int ixl_reset(struct ixl_pf *);
+void ixl_init_locked(struct ixl_pf *);
+void ixl_set_rss_key(struct ixl_pf *);
+void ixl_set_rss_pctypes(struct ixl_pf *);
+void ixl_set_rss_hlut(struct ixl_pf *);
+int ixl_setup_adminq_msix(struct ixl_pf *);
+int ixl_setup_adminq_tq(struct ixl_pf *);
+int ixl_teardown_adminq_msix(struct ixl_pf *);
+void ixl_configure_intr0_msix(struct ixl_pf *);
+void ixl_configure_queue_intr_msix(struct ixl_pf *);
+void ixl_free_adminq_tq(struct ixl_pf *);
+int ixl_assign_vsi_legacy(struct ixl_pf *);
+int ixl_init_msix(struct ixl_pf *);
+void ixl_configure_itr(struct ixl_pf *);
+void ixl_configure_legacy(struct ixl_pf *);
+void ixl_free_pci_resources(struct ixl_pf *);
+void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
+void ixl_config_rss(struct ixl_pf *);
+int ixl_set_advertised_speeds(struct ixl_pf *, int);
+void ixl_get_initial_advertised_speeds(struct ixl_pf *);
+void ixl_print_nvm_version(struct ixl_pf *pf);
+void ixl_add_device_sysctls(struct ixl_pf *);
+void ixl_handle_mdd_event(struct ixl_pf *);
+void ixl_add_hw_stats(struct ixl_pf *);
+void ixl_update_stats_counters(struct ixl_pf *);
+void ixl_pf_reset_stats(struct ixl_pf *);
+void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
+
+int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
+void ixl_handle_empr_reset(struct ixl_pf *);
+int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
+
+void ixl_set_queue_rx_itr(struct ixl_queue *);
+void ixl_set_queue_tx_itr(struct ixl_queue *);
+
+void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
+void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
+void ixl_reconfigure_filters(struct ixl_vsi *vsi);
+
+int ixl_disable_rings(struct ixl_vsi *);
+int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
+
+int ixl_enable_rings(struct ixl_vsi *);
+int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
+
+void ixl_update_eth_stats(struct ixl_vsi *);
+void ixl_disable_intr(struct ixl_vsi *);
+void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
+int ixl_initialize_vsi(struct ixl_vsi *);
+void ixl_add_ifmedia(struct ixl_vsi *, u32);
+int ixl_setup_queue_msix(struct ixl_vsi *);
+int ixl_setup_queue_tqs(struct ixl_vsi *);
+int ixl_teardown_queue_msix(struct ixl_vsi *);
+void ixl_free_queue_tqs(struct ixl_vsi *);
+void ixl_enable_intr(struct ixl_vsi *);
+void ixl_disable_rings_intr(struct ixl_vsi *);
+void ixl_set_promisc(struct ixl_vsi *);
+void ixl_add_multi(struct ixl_vsi *);
+void ixl_del_multi(struct ixl_vsi *);
+void ixl_setup_vlan_filters(struct ixl_vsi *);
+void ixl_init_filters(struct ixl_vsi *);
+void ixl_add_hw_filters(struct ixl_vsi *, int, int);
+void ixl_del_hw_filters(struct ixl_vsi *, int);
+struct ixl_mac_filter *
+ ixl_find_filter(struct ixl_vsi *, u8 *, s16);
+void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
+void ixl_free_mac_filters(struct ixl_vsi *vsi);
+void ixl_update_vsi_stats(struct ixl_vsi *);
+void ixl_vsi_reset_stats(struct ixl_vsi *);
+
#endif /* _IXL_PF_H_ */
diff --git a/sys/dev/ixl/ixl_pf_iov.c b/sys/dev/ixl/ixl_pf_iov.c
new file mode 100644
index 000000000000..a8c8b29cc605
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_iov.c
@@ -0,0 +1,1925 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixl_pf_iov.h"
+
+/* Private functions */
+static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
+static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
+static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
+
+static bool ixl_zero_mac(const uint8_t *addr);
+static bool ixl_bcast_mac(const uint8_t *addr);
+
+static const char * ixl_vc_opcode_str(uint16_t op);
+static int ixl_vc_opcode_level(uint16_t opcode);
+
+static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
+
+static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
+static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
+static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
+static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
+static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
+static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
+static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
+static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
+static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
+ enum i40e_queue_type *last_type, uint16_t *last_queue);
+static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
+static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
+
+static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
+
+void
+ixl_initialize_sriov(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ nvlist_t *pf_schema, *vf_schema;
+ int iov_error;
+
+ /* SR-IOV is only supported when MSI-X is in use. */
+ if (pf->msix <= 1)
+ return;
+
+ pf_schema = pci_iov_schema_alloc_node();
+ vf_schema = pci_iov_schema_alloc_node();
+ pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+ pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+ IOV_SCHEMA_HASDEFAULT, TRUE);
+ pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_uint16(vf_schema, "num-queues",
+ IOV_SCHEMA_HASDEFAULT,
+ max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
+
+ iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
+ if (iov_error != 0) {
+ device_printf(dev,
+ "Failed to initialize SR-IOV (error=%d)\n",
+ iov_error);
+ } else
+ device_printf(dev, "SR-IOV ready\n");
+
+ pf->vc_debug_lvl = 1;
+}
+
+/*
+ * Allocate the VSI for a VF.
+ */
+static int
+ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ device_t dev;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ struct i40e_vsi_context vsi_ctx;
+ int i;
+ enum i40e_status_code code;
+
+ hw = &pf->hw;
+ vsi = &pf->vsi;
+ dev = pf->dev;
+
+ vsi_ctx.pf_num = hw->pf_id;
+ vsi_ctx.uplink_seid = pf->veb_seid;
+ vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
+ vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+ vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
+
+ bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+
+ vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ vsi_ctx.info.switch_id = htole16(0);
+
+ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ vsi_ctx.info.sec_flags = 0;
+ if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
+ vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ vsi_ctx.info.valid_sections |=
+ htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+ vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+
+ /* ERJ: Only scattered allocation is supported for VFs right now */
+ for (i = 0; i < vf->qtag.num_active; i++)
+ vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
+ for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
+ vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
+
+ vsi_ctx.info.tc_mapping[0] = htole16(
+ (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+
+ code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
+ if (code != I40E_SUCCESS)
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ vf->vsi.seid = vsi_ctx.seid;
+ vf->vsi.vsi_num = vsi_ctx.vsi_number;
+ // vf->vsi.first_queue = vf->qtag.qidx[0];
+ vf->vsi.num_queues = vf->qtag.num_active;
+
+ code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
+ if (code != I40E_SUCCESS)
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+
+ code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
+ if (code != I40E_SUCCESS) {
+ device_printf(dev, "Failed to disable BW limit: %d\n",
+ ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ }
+
+ memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
+ return (0);
+}
+
+static int
+ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ int error;
+
+ hw = &pf->hw;
+
+ error = ixl_vf_alloc_vsi(pf, vf);
+ if (error != 0)
+ return (error);
+
+ vf->vsi.hw_filters_add = 0;
+ vf->vsi.hw_filters_del = 0;
+ ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+ ixl_reconfigure_filters(&vf->vsi);
+
+ return (0);
+}
+
+static void
+ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
+ uint32_t val)
+{
+ uint32_t qtable;
+ int index, shift;
+
+ /*
+ * Two queues are mapped in a single register, so we have to do some
+ * gymnastics to convert the queue number into a register index and
+ * shift.
+ */
+ index = qnum / 2;
+ shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
+
+ qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
+ qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
+ qtable |= val << shift;
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
+}
+
+static void
+ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t qtable;
+ int i;
+
+ hw = &pf->hw;
+
+ /*
+ * Contiguous mappings aren't actually supported by the hardware,
+ * so we have to use non-contiguous mappings.
+ */
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* Enable LAN traffic on this VF */
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
+ I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+ /* Program index of each VF queue into PF queue space
+ * (This is only needed if QTABLE is enabled) */
+ for (i = 0; i < vf->vsi.num_queues; i++) {
+ qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
+ I40E_VPLAN_QTABLE_QINDEX_SHIFT;
+
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
+ }
+ for (; i < IXL_MAX_VSI_QUEUES; i++)
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
+ I40E_VPLAN_QTABLE_QINDEX_MASK);
+
+ /* Map queues allocated to VF to its VSI;
+ * This mapping matches the VF-wide mapping since the VF
+ * is only given a single VSI */
+ for (i = 0; i < vf->vsi.num_queues; i++)
+ ixl_vf_map_vsi_queue(hw, vf, i,
+ ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
+
+ /* Set rest of VSI queues as unused. */
+ for (; i < IXL_MAX_VSI_QUEUES; i++)
+ ixl_vf_map_vsi_queue(hw, vf, i,
+ I40E_VSILAN_QTABLE_QINDEX_0_MASK);
+
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw;
+
+ hw = &pf->hw;
+
+ if (vsi->seid == 0)
+ return;
+
+ i40e_aq_delete_element(hw, vsi->seid, NULL);
+}
+
+static void
+ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
+{
+
+ wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
+{
+
+ wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfint_reg, vpint_reg;
+ int i;
+
+ hw = &pf->hw;
+
+ ixl_vf_vsi_release(pf, &vf->vsi);
+
+ /* Index 0 has a special register. */
+ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
+
+ for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+ vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
+ ixl_vf_disable_queue_intr(hw, vfint_reg);
+ }
+
+ /* Index 0 has a special register. */
+ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
+
+ for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+ vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
+ ixl_vf_unregister_intr(hw, vpint_reg);
+ }
+
+ vf->vsi.num_queues = 0;
+}
+
+static int
+ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ int i;
+ uint16_t global_vf_num;
+ uint32_t ciad;
+
+ hw = &pf->hw;
+ global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+
+ wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
+ (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+ ciad = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
+ return (0);
+ DELAY(1);
+ }
+
+ return (ETIMEDOUT);
+}
+
+static void
+ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfrtrig;
+
+ hw = &pf->hw;
+
+ vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+ vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+ ixl_flush(hw);
+
+ ixl_reinit_vf(pf, vf);
+}
+
+static void
+ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfrstat, vfrtrig;
+ int i, error;
+
+ hw = &pf->hw;
+
+ error = ixl_flush_pcie(pf, vf);
+ if (error != 0)
+ device_printf(pf->dev,
+ "Timed out waiting for PCIe activity to stop on VF-%d\n",
+ vf->vf_num);
+
+ for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+ DELAY(10);
+
+ vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
+ if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
+
+ if (i == IXL_VF_RESET_TIMEOUT)
+ device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
+
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
+
+ vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+ vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+
+ if (vf->vsi.seid != 0)
+ ixl_disable_rings(&vf->vsi);
+
+ ixl_vf_release_resources(pf, vf);
+ ixl_vf_setup_vsi(pf, vf);
+ ixl_vf_map_queues(pf, vf);
+
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
+ ixl_flush(hw);
+}
+
+static const char *
+ixl_vc_opcode_str(uint16_t op)
+{
+
+ switch (op) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ return ("VERSION");
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ return ("RESET_VF");
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ return ("GET_VF_RESOURCES");
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ return ("CONFIG_TX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ return ("CONFIG_RX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ return ("CONFIG_VSI_QUEUES");
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ return ("CONFIG_IRQ_MAP");
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ return ("ENABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ return ("DISABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ return ("ADD_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ return ("DEL_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ return ("ADD_VLAN");
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ return ("DEL_VLAN");
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ return ("CONFIG_PROMISCUOUS_MODE");
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return ("GET_STATS");
+ case I40E_VIRTCHNL_OP_FCOE:
+ return ("FCOE");
+ case I40E_VIRTCHNL_OP_EVENT:
+ return ("EVENT");
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ return ("CONFIG_RSS_KEY");
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ return ("CONFIG_RSS_LUT");
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ return ("GET_RSS_HENA_CAPS");
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ return ("SET_RSS_HENA");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+static int
+ixl_vc_opcode_level(uint16_t opcode)
+{
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return (10);
+ default:
+ return (5);
+ }
+}
+
+static void
+ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+ enum i40e_status_code status, void *msg, uint16_t len)
+{
+ struct i40e_hw *hw;
+ int global_vf_id;
+
+ hw = &pf->hw;
+ global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
+
+ I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
+ "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
+ ixl_vc_opcode_str(op), op, status, vf->vf_num);
+
+ i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
+}
+
+static void
+ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
+{
+
+ ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
+}
+
+static void
+ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+ enum i40e_status_code status, const char *file, int line)
+{
+
+ I40E_VC_DEBUG(pf, 1,
+ "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
+ ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
+ status, vf->vf_num, file, line);
+ ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
+}
+
+static void
+ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_version_info reply;
+
+ if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
+
+ reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
+ sizeof(reply));
+}
+
+static void
+ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+
+ if (msg_size != 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_reset_vf(pf, vf);
+
+ /* No response to a reset message. */
+}
+
+static void
+ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vf_resource reply;
+
+ if ((vf->version == 0 && msg_size != 0) ||
+ (vf->version == 1 && msg_size != 4)) {
+ device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
+ " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
+ vf->version);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ bzero(&reply, sizeof(reply));
+
+ if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
+ reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ else
+ /* Force VF RSS setup by PF in 1.1+ VFs */
+ reply.vf_offload_flags = *(u32 *)msg & (
+ I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
+
+ reply.num_vsis = 1;
+ reply.num_queue_pairs = vf->vsi.num_queues;
+ reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ reply.rss_key_size = 52;
+ reply.rss_lut_size = 64;
+ reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
+ reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
+ memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
+
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_SUCCESS, &reply, sizeof(reply));
+}
+
+static int
+ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+ struct i40e_virtchnl_txq_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_hmc_obj_txq txq;
+ uint16_t global_queue_num, global_vf_num;
+ enum i40e_status_code status;
+ uint32_t qtx_ctl;
+
+ hw = &pf->hw;
+ global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
+ global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+ bzero(&txq, sizeof(txq));
+
+ DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
+ vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
+
+ status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
+
+ txq.head_wb_ena = info->headwb_enabled;
+ txq.head_wb_addr = info->dma_headwb_addr;
+ txq.qlen = info->ring_len;
+ txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
+ txq.rdylist_act = 0;
+
+ status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
+ (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
+ (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
+ ixl_flush(hw);
+
+ ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
+
+ return (0);
+}
+
+static int
+ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+ struct i40e_virtchnl_rxq_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_hmc_obj_rxq rxq;
+ uint16_t global_queue_num;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+ global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
+ bzero(&rxq, sizeof(rxq));
+
+ DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
+ vf->vf_num, global_queue_num, info->queue_id);
+
+ if (info->databuffer_size > IXL_VF_MAX_BUFFER)
+ return (EINVAL);
+
+ if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
+ info->max_pkt_size < ETHER_MIN_LEN)
+ return (EINVAL);
+
+ if (info->splithdr_enabled) {
+ if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
+ return (EINVAL);
+
+ rxq.hsplit_0 = info->rx_split_pos &
+ (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
+ rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rxq.dtype = 2;
+ }
+
+ status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
+ rxq.qlen = info->ring_len;
+
+ rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+ rxq.dsize = 1;
+ rxq.crcstrip = 1;
+ rxq.l2tsel = 1;
+
+ rxq.rxmax = info->max_pkt_size;
+ rxq.tphrdesc_ena = 1;
+ rxq.tphwdesc_ena = 1;
+ rxq.tphdata_ena = 1;
+ rxq.tphhead_ena = 1;
+ rxq.lrxqthresh = 2;
+ rxq.prefena = 1;
+
+ status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
+
+ return (0);
+}
+
+static void
+ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *info;
+ struct i40e_virtchnl_queue_pair_info *pair;
+ uint16_t expected_msg_size;
+ int i;
+
+ if (msg_size < sizeof(*info)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ info = msg;
+ if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
+ vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
+ if (msg_size != expected_msg_size) {
+ device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
+ vf->vf_num, msg_size, expected_msg_size);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (info->vsi_id != vf->vsi.vsi_num) {
+ device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
+ vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < info->num_queue_pairs; i++) {
+ pair = &info->qpair[i];
+
+ if (pair->txq.vsi_id != vf->vsi.vsi_num ||
+ pair->rxq.vsi_id != vf->vsi.vsi_num ||
+ pair->txq.queue_id != pair->rxq.queue_id ||
+ pair->txq.queue_id >= vf->vsi.num_queues) {
+
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
+}
+
+static void
+ixl_vf_set_qctl(struct ixl_pf *pf,
+ const struct i40e_virtchnl_vector_map *vector,
+ enum i40e_queue_type cur_type, uint16_t cur_queue,
+ enum i40e_queue_type *last_type, uint16_t *last_queue)
+{
+ uint32_t offset, qctl;
+ uint16_t itr_indx;
+
+ if (cur_type == I40E_QUEUE_TYPE_RX) {
+ offset = I40E_QINT_RQCTL(cur_queue);
+ itr_indx = vector->rxitr_idx;
+ } else {
+ offset = I40E_QINT_TQCTL(cur_queue);
+ itr_indx = vector->txitr_idx;
+ }
+
+ qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
+
+ wr32(&pf->hw, offset, qctl);
+
+ *last_type = cur_type;
+ *last_queue = cur_queue;
+}
+
+static void
+ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
+ const struct i40e_virtchnl_vector_map *vector)
+{
+ struct i40e_hw *hw;
+ u_int qindex;
+ enum i40e_queue_type type, last_type;
+ uint32_t lnklst_reg;
+ uint16_t rxq_map, txq_map, cur_queue, last_queue;
+
+ hw = &pf->hw;
+
+ rxq_map = vector->rxq_map;
+ txq_map = vector->txq_map;
+
+ last_queue = IXL_END_OF_INTR_LNKLST;
+ last_type = I40E_QUEUE_TYPE_RX;
+
+ /*
+ * The datasheet says to optimize performance, RX queues and TX queues
+ * should be interleaved in the interrupt linked list, so we process
+ * both at once here.
+ */
+ while ((rxq_map != 0) || (txq_map != 0)) {
+ if (txq_map != 0) {
+ qindex = ffs(txq_map) - 1;
+ type = I40E_QUEUE_TYPE_TX;
+ cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
+ ixl_vf_set_qctl(pf, vector, type, cur_queue,
+ &last_type, &last_queue);
+ txq_map &= ~(1 << qindex);
+ }
+
+ if (rxq_map != 0) {
+ qindex = ffs(rxq_map) - 1;
+ type = I40E_QUEUE_TYPE_RX;
+ cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
+ ixl_vf_set_qctl(pf, vector, type, cur_queue,
+ &last_type, &last_queue);
+ rxq_map &= ~(1 << qindex);
+ }
+ }
+
+ if (vector->vector_id == 0)
+ lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
+ else
+ lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
+ vf->vf_num);
+ wr32(hw, lnklst_reg,
+ (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_irq_map_info *map;
+ struct i40e_virtchnl_vector_map *vector;
+ struct i40e_hw *hw;
+ int i, largest_txq, largest_rxq;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*map)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ map = msg;
+ if (map->num_vectors == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < map->num_vectors; i++) {
+ vector = &map->vecmap[i];
+
+ if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
+ vector->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (vector->rxq_map != 0) {
+ largest_rxq = fls(vector->rxq_map) - 1;
+ if (largest_rxq >= vf->vsi.num_queues) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (vector->txq_map != 0) {
+ largest_txq = fls(vector->txq_map) - 1;
+ if (largest_txq >= vf->vsi.num_queues) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
+ vector->txitr_idx > IXL_MAX_ITR_IDX) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_vf_config_vector(pf, vf, vector);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
+}
+
+static void
+ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *select;
+ int error = 0;
+
+ if (msg_size != sizeof(*select)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ select = msg;
+ if (select->vsi_id != vf->vsi.vsi_num ||
+ select->rx_queues == 0 || select->tx_queues == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Enable TX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->tx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
+ continue;
+ /* Warn if this queue is already marked as enabled */
+ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
+ device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
+ vf->vf_num, i);
+
+ error = ixl_enable_tx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
+ }
+ }
+
+ /* Enable RX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->rx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
+ continue;
+ /* Warn if this queue is already marked as enabled */
+ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
+ device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
+ vf->vf_num, i);
+ error = ixl_enable_rx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
+ }
+ }
+
+ if (error) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_TIMEOUT);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
+}
+
+static void
+ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+ void *msg, uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *select;
+ int error = 0;
+
+ if (msg_size != sizeof(*select)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ select = msg;
+ if (select->vsi_id != vf->vsi.vsi_num ||
+ select->rx_queues == 0 || select->tx_queues == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Disable TX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->tx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
+ continue;
+ /* Warn if this queue is already marked as disabled */
+ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
+ device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
+ vf->vf_num, i);
+ continue;
+ }
+ error = ixl_disable_tx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
+ }
+ }
+
+ /* Enable RX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->rx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
+ continue;
+ /* Warn if this queue is already marked as disabled */
+ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
+ device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
+ vf->vf_num, i);
+ continue;
+ }
+ error = ixl_disable_rx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
+ }
+ }
+
+ if (error) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_TIMEOUT);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
+}
+
+static bool
+ixl_zero_mac(const uint8_t *addr)
+{
+ uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+ return (cmp_etheraddr(addr, zero));
+}
+
+static bool
+ixl_bcast_mac(const uint8_t *addr)
+{
+
+ return (cmp_etheraddr(addr, ixl_bcast_addr));
+}
+
+static int
+ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
+{
+
+ if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
+ return (EINVAL);
+
+ /*
+ * If the VF is not allowed to change its MAC address, don't let it
+ * set a MAC filter for an address that is not a multicast address and
+ * is not its assigned MAC.
+ */
+ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
+ !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
+ return (EPERM);
+
+ return (0);
+}
+
+static void
+ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_ether_addr_list *addr_list;
+ struct i40e_virtchnl_ether_addr *addr;
+ struct ixl_vsi *vsi;
+ int i;
+ size_t expected_size;
+
+ vsi = &vf->vsi;
+
+ if (msg_size < sizeof(*addr_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ addr_list = msg;
+ expected_size = sizeof(*addr_list) +
+ addr_list->num_elements * sizeof(*addr);
+
+ if (addr_list->num_elements == 0 ||
+ addr_list->vsi_id != vsi->vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
+}
+
+static void
+ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_ether_addr_list *addr_list;
+ struct i40e_virtchnl_ether_addr *addr;
+ size_t expected_size;
+ int i;
+
+ if (msg_size < sizeof(*addr_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ addr_list = msg;
+ expected_size = sizeof(*addr_list) +
+ addr_list->num_elements * sizeof(*addr);
+
+ if (addr_list->num_elements == 0 ||
+ addr_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
+}
+
+static enum i40e_status_code
+ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_vsi_context vsi_ctx;
+
+ vsi_ctx.seid = vf->vsi.seid;
+
+ bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+ vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
+}
+
+static void
+ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vlan_filter_list *filter_list;
+ enum i40e_status_code code;
+ size_t expected_size;
+ int i;
+
+ if (msg_size < sizeof(*filter_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ filter_list = msg;
+ expected_size = sizeof(*filter_list) +
+ filter_list->num_elements * sizeof(uint16_t);
+ if (filter_list->num_elements == 0 ||
+ filter_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++) {
+ if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ code = ixl_vf_enable_vlan_strip(pf, vf);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++)
+ ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
+}
+
+static void
+ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vlan_filter_list *filter_list;
+ int i;
+ size_t expected_size;
+
+ if (msg_size < sizeof(*filter_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ filter_list = msg;
+ expected_size = sizeof(*filter_list) +
+ filter_list->num_elements * sizeof(uint16_t);
+ if (filter_list->num_elements == 0 ||
+ filter_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++) {
+ if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++)
+ ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
+}
+
+static void
+ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+ void *msg, uint16_t msg_size)
+{
+ struct i40e_virtchnl_promisc_info *info;
+ enum i40e_status_code code;
+
+ if (msg_size != sizeof(*info)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ info = msg;
+ if (info->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
+ info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ return;
+ }
+
+ code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
+ info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
+}
+
+static void
+ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *queue;
+
+ if (msg_size != sizeof(*queue)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ queue = msg;
+ if (queue->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_update_eth_stats(&vf->vsi);
+
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
+}
+
+static void
+ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_hw *hw;
+ struct i40e_virtchnl_rss_key *key;
+ struct i40e_aqc_get_set_rss_key_data key_data;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*key)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ key = msg;
+
+ if (key->key_len > 52) {
+ device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
+ vf->vf_num, key->key_len, 52);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (key->vsi_id != vf->vsi.vsi_num) {
+ device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
+ vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Fill out hash using MAC-dependent method */
+ if (hw->mac.type == I40E_MAC_X722) {
+ bzero(&key_data, sizeof(key_data));
+ if (key->key_len <= 40)
+ bcopy(key->key, key_data.standard_rss_key, key->key_len);
+ else {
+ bcopy(key->key, key_data.standard_rss_key, 40);
+ bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
+ }
+ status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
+ if (status) {
+ device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_ADMIN_QUEUE_ERROR);
+ return;
+ }
+ } else {
+ for (int i = 0; i < (key->key_len / 4); i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
+ }
+
+ DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
+ vf->vf_num, key->key[0]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
+}
+
+static void
+ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_hw *hw;
+ struct i40e_virtchnl_rss_lut *lut;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*lut)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ lut = msg;
+
+ if (lut->lut_entries > 64) {
+ device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
+ vf->vf_num, lut->lut_entries, 64);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (lut->vsi_id != vf->vsi.vsi_num) {
+ device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
+ vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Fill out LUT using MAC-dependent method */
+ if (hw->mac.type == I40E_MAC_X722) {
+ status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
+ if (status) {
+ device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_ADMIN_QUEUE_ERROR);
+ return;
+ }
+ } else {
+ for (int i = 0; i < (lut->lut_entries / 4); i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
+ }
+
+ DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
+ vf->vf_num, lut->lut[0], lut->lut_entries);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
+}
+
+static void
+ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_hw *hw;
+ struct i40e_virtchnl_rss_hena *hena;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*hena)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ hena = msg;
+
+ /* Set HENA */
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
+
+ DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
+ vf->vf_num, hena->hena);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
+}
+
+void
+ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
+{
+ struct ixl_vf *vf;
+ void *msg;
+ uint16_t vf_num, msg_size;
+ uint32_t opcode;
+
+ vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
+ opcode = le32toh(event->desc.cookie_high);
+
+ if (vf_num >= pf->num_vfs) {
+ device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
+ return;
+ }
+
+ vf = &pf->vfs[vf_num];
+ msg = event->msg_buf;
+ msg_size = event->msg_len;
+
+ I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
+ "Got msg %s(%d) from%sVF-%d of size %d\n",
+ ixl_vc_opcode_str(opcode), opcode,
+ (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
+ vf_num, msg_size);
+
+ /* This must be a stray msg from a previously destroyed VF. */
+ if (!(vf->vf_flags & VF_FLAG_ENABLED))
+ return;
+
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ ixl_vf_version_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ ixl_vf_reset_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
+ break;
+
+ /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ default:
+ i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+}
+
+/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
+void
+ixl_handle_vflr(void *arg, int pending)
+{
+ struct ixl_pf *pf;
+ struct ixl_vf *vf;
+ struct i40e_hw *hw;
+ uint16_t global_vf_num;
+ uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
+ int i;
+
+ pf = arg;
+ hw = &pf->hw;
+
+ IXL_PF_LOCK(pf);
+ for (i = 0; i < pf->num_vfs; i++) {
+ global_vf_num = hw->func_caps.vf_base_id + i;
+
+ vf = &pf->vfs[i];
+ if (!(vf->vf_flags & VF_FLAG_ENABLED))
+ continue;
+
+ vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
+ vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
+ vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
+ if (vflrstat & vflrstat_mask) {
+ wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
+ vflrstat_mask);
+
+ ixl_reinit_vf(pf, vf);
+ }
+ }
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
+ icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
+ ixl_flush(hw);
+
+ IXL_PF_UNLOCK(pf);
+}
+
+static int
+ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
+{
+
+ switch (err) {
+ case I40E_AQ_RC_EPERM:
+ return (EPERM);
+ case I40E_AQ_RC_ENOENT:
+ return (ENOENT);
+ case I40E_AQ_RC_ESRCH:
+ return (ESRCH);
+ case I40E_AQ_RC_EINTR:
+ return (EINTR);
+ case I40E_AQ_RC_EIO:
+ return (EIO);
+ case I40E_AQ_RC_ENXIO:
+ return (ENXIO);
+ case I40E_AQ_RC_E2BIG:
+ return (E2BIG);
+ case I40E_AQ_RC_EAGAIN:
+ return (EAGAIN);
+ case I40E_AQ_RC_ENOMEM:
+ return (ENOMEM);
+ case I40E_AQ_RC_EACCES:
+ return (EACCES);
+ case I40E_AQ_RC_EFAULT:
+ return (EFAULT);
+ case I40E_AQ_RC_EBUSY:
+ return (EBUSY);
+ case I40E_AQ_RC_EEXIST:
+ return (EEXIST);
+ case I40E_AQ_RC_EINVAL:
+ return (EINVAL);
+ case I40E_AQ_RC_ENOTTY:
+ return (ENOTTY);
+ case I40E_AQ_RC_ENOSPC:
+ return (ENOSPC);
+ case I40E_AQ_RC_ENOSYS:
+ return (ENOSYS);
+ case I40E_AQ_RC_ERANGE:
+ return (ERANGE);
+ case I40E_AQ_RC_EFLUSHED:
+ return (EINVAL); /* No exact equivalent in errno.h */
+ case I40E_AQ_RC_BAD_ADDR:
+ return (EFAULT);
+ case I40E_AQ_RC_EMODE:
+ return (EPERM);
+ case I40E_AQ_RC_EFBIG:
+ return (EFBIG);
+ default:
+ return (EINVAL);
+ }
+}
+
+int
+ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *pf_vsi;
+ enum i40e_status_code ret;
+ int i, error;
+
+ pf = device_get_softc(dev);
+ hw = &pf->hw;
+ pf_vsi = &pf->vsi;
+
+ IXL_PF_LOCK(pf);
+ pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
+ M_ZERO);
+
+ if (pf->vfs == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < num_vfs; i++)
+ sysctl_ctx_init(&pf->vfs[i].ctx);
+
+ ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
+ 1, FALSE, &pf->veb_seid, FALSE, NULL);
+ if (ret != I40E_SUCCESS) {
+ error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
+ device_printf(dev, "add_veb failed; code=%d error=%d", ret,
+ error);
+ goto fail;
+ }
+
+ ixl_enable_adminq(hw);
+
+ pf->num_vfs = num_vfs;
+ IXL_PF_UNLOCK(pf);
+ return (0);
+
+fail:
+ free(pf->vfs, M_IXL);
+ pf->vfs = NULL;
+ IXL_PF_UNLOCK(pf);
+ return (error);
+}
+
+void
+ixl_iov_uninit(device_t dev)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ struct ifnet *ifp;
+ struct ixl_vf *vfs;
+ int i, num_vfs;
+
+ pf = device_get_softc(dev);
+ hw = &pf->hw;
+ vsi = &pf->vsi;
+ ifp = vsi->ifp;
+
+ IXL_PF_LOCK(pf);
+ for (i = 0; i < pf->num_vfs; i++) {
+ if (pf->vfs[i].vsi.seid != 0)
+ i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
+ ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
+ DDPRINTF(dev, "VF %d: %d released\n",
+ i, pf->vfs[i].qtag.num_allocated);
+ DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
+ }
+
+ if (pf->veb_seid != 0) {
+ i40e_aq_delete_element(hw, pf->veb_seid, NULL);
+ pf->veb_seid = 0;
+ }
+
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
+ ixl_disable_intr(vsi);
+ ixl_flush(hw);
+ }
+
+ vfs = pf->vfs;
+ num_vfs = pf->num_vfs;
+
+ pf->vfs = NULL;
+ pf->num_vfs = 0;
+ IXL_PF_UNLOCK(pf);
+
+ /* Do this after the unlock as sysctl_ctx_free might sleep. */
+ for (i = 0; i < num_vfs; i++)
+ sysctl_ctx_free(&vfs[i].ctx);
+ free(vfs, M_IXL);
+}
+
+static int
+ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
+{
+ device_t dev = pf->dev;
+ int error;
+
+ /* Validate, and clamp value if invalid */
+ if (num_queues < 1 || num_queues > 16)
+ device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
+ num_queues, vf->vf_num);
+ if (num_queues < 1) {
+ device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
+ num_queues = 1;
+ } else if (num_queues > 16) {
+ device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
+ num_queues = 16;
+ }
+ error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
+ if (error) {
+ device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
+ num_queues, vf->vf_num);
+ return (ENOSPC);
+ }
+
+ DDPRINTF(dev, "VF %d: %d allocated, %d active",
+ vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
+ DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
+
+ return (0);
+}
+
+int
+ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
+{
+ char sysctl_name[QUEUE_NAME_LEN];
+ struct ixl_pf *pf;
+ struct ixl_vf *vf;
+ const void *mac;
+ size_t size;
+ int error;
+ int vf_num_queues;
+
+ pf = device_get_softc(dev);
+ vf = &pf->vfs[vfnum];
+
+ IXL_PF_LOCK(pf);
+ vf->vf_num = vfnum;
+
+ vf->vsi.back = pf;
+ vf->vf_flags = VF_FLAG_ENABLED;
+ SLIST_INIT(&vf->vsi.ftl);
+
+ /* Reserve queue allocation from PF */
+ vf_num_queues = nvlist_get_number(params, "num-queues");
+ error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
+ if (error != 0)
+ goto out;
+
+ error = ixl_vf_setup_vsi(pf, vf);
+ if (error != 0)
+ goto out;
+
+ if (nvlist_exists_binary(params, "mac-addr")) {
+ mac = nvlist_get_binary(params, "mac-addr", &size);
+ bcopy(mac, vf->mac, ETHER_ADDR_LEN);
+
+ if (nvlist_get_bool(params, "allow-set-mac"))
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+ } else
+ /*
+ * If the administrator has not specified a MAC address then
+ * we must allow the VF to choose one.
+ */
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+
+ if (nvlist_get_bool(params, "mac-anti-spoof"))
+ vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
+
+ if (nvlist_get_bool(params, "allow-promisc"))
+ vf->vf_flags |= VF_FLAG_PROMISC_CAP;
+
+ vf->vf_flags |= VF_FLAG_VLAN_CAP;
+
+ ixl_reset_vf(pf, vf);
+out:
+ IXL_PF_UNLOCK(pf);
+ if (error == 0) {
+ snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
+ ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
+ }
+
+ return (error);
+}
+
diff --git a/sys/dev/ixl/ixl_pf_iov.h b/sys/dev/ixl/ixl_pf_iov.h
new file mode 100644
index 000000000000..ae8abc208d4b
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_iov.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXL_PF_IOV_H_
+#define _IXL_PF_IOV_H_
+
+#include "ixl_pf.h"
+
+#include <sys/nv.h>
+#include <sys/iov_schema.h>
+#include <dev/pci/pci_iov.h>
+
+/* Public functions */
+
+/*
+ * These three are DEVMETHODs required for SR-IOV PF support.
+ */
+int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
+void ixl_iov_uninit(device_t dev);
+int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
+
+/*
+ * The standard PF driver needs to call these during normal execution when
+ * SR-IOV mode is active.
+ */
+void ixl_initialize_sriov(struct ixl_pf *pf);
+void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event);
+void ixl_handle_vflr(void *arg, int pending);
+
+#endif /* _IXL_PF_IOV_H_ */
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
new file mode 100644
index 000000000000..fabc8b7bfbc2
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -0,0 +1,5556 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixl_pf.h"
+
+#ifdef PCI_IOV
+#include "ixl_pf_iov.h"
+#endif
+
+#ifdef DEV_NETMAP
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <dev/netmap/netmap_kern.h>
+#endif /* DEV_NETMAP */
+
+static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
+
+/* Sysctls */
+static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
+static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
+
+/* Debug Sysctls */
+static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
+
+void
+ixl_dbg(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
+{
+ va_list args;
+
+ if (!(mask & pf->dbg_mask))
+ return;
+
+ va_start(args, fmt);
+ device_printf(pf->dev, fmt, args);
+ va_end(args);
+}
+
+/*
+** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
+*/
+void
+ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
+{
+ u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
+ u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
+ u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
+
+ sbuf_printf(buf,
+ "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
+ IXL_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
+ IXL_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack,
+ oem_ver, oem_build, oem_patch);
+}
+
+void
+ixl_print_nvm_version(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *sbuf;
+
+ sbuf = sbuf_new_auto();
+ ixl_nvm_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ device_printf(dev, "%s\n", sbuf_data(sbuf));
+ sbuf_delete(sbuf);
+}
+
+static void
+ixl_configure_tx_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->tx_itr_setting = pf->tx_itr;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+static void
+ixl_configure_rx_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->rx_itr_setting = pf->rx_itr;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+/*
+ * Write PF ITR values to queue ITR registers.
+ */
+void
+ixl_configure_itr(struct ixl_pf *pf)
+{
+ ixl_configure_tx_itr(pf);
+ ixl_configure_rx_itr(pf);
+}
+
+
+/*********************************************************************
+ * Init entry point
+ *
+ * This routine is used in two ways. It is used by the stack as
+ * init entry point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get to a
+ * consistent state.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+void
+ixl_init_locked(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = pf->dev;
+ struct i40e_filter_control_settings filter;
+ u8 tmpaddr[ETHER_ADDR_LEN];
+ int ret;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+ INIT_DEBUGOUT("ixl_init_locked: begin");
+
+ ixl_stop_locked(pf);
+
+ /* Get the latest mac address... User might use a LAA */
+ bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
+ (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+ ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+ bcopy(tmpaddr, hw->mac.addr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ ret = i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ hw->mac.addr, NULL);
+ if (ret) {
+ device_printf(dev, "LLA address"
+ "change failed!!\n");
+ return;
+ }
+ }
+
+ ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+
+ /* Set the various hardware offload abilities */
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
+
+ /* Set up the device filtering */
+ bzero(&filter, sizeof(filter));
+ filter.enable_ethtype = TRUE;
+ filter.enable_macvlan = TRUE;
+ filter.enable_fdir = FALSE;
+ filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+ if (i40e_set_filter_control(hw, &filter))
+ device_printf(dev, "i40e_set_filter_control() failed\n");
+
+ /* Prepare the VSI: rings, hmc contexts, etc... */
+ if (ixl_initialize_vsi(vsi)) {
+ device_printf(dev, "initialize vsi failed!!\n");
+ return;
+ }
+
+ /* Set up RSS */
+ ixl_config_rss(pf);
+
+ /* Add protocol filters to list */
+ ixl_init_filters(vsi);
+
+ /* Setup vlan's if needed */
+ ixl_setup_vlan_filters(vsi);
+
+ /* Set up MSI/X routing and the ITR settings */
+ if (pf->enable_msix) {
+ ixl_configure_queue_intr_msix(pf);
+ ixl_configure_itr(pf);
+ } else
+ ixl_configure_legacy(pf);
+
+ ixl_enable_rings(vsi);
+
+ i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
+
+ ixl_reconfigure_filters(vsi);
+
+ /* And now turn on interrupts */
+ ixl_enable_intr(vsi);
+
+ /* Get link info */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ ixl_update_link_status(pf);
+
+ /* Set initial advertised speed sysctl value */
+ ixl_get_initial_advertised_speeds(pf);
+
+ /* Start the local timer */
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+
+ /* Now inform the stack we're ready */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+}
+
+
+/*********************************************************************
+ *
+ * Get the hardware capabilities
+ *
+ **********************************************************************/
+
+int
+ixl_get_hw_capabilities(struct ixl_pf *pf)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error, len;
+ u16 needed;
+ bool again = TRUE;
+
+ len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+retry:
+ if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
+ malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate cap memory\n");
+ return (ENOMEM);
+ }
+
+ /* This populates the hw struct */
+ error = i40e_aq_discover_capabilities(hw, buf, len,
+ &needed, i40e_aqc_opc_list_func_capabilities, NULL);
+ free(buf, M_DEVBUF);
+ if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
+ (again == TRUE)) {
+ /* retry once with a larger buffer */
+ again = FALSE;
+ len = needed;
+ goto retry;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ device_printf(dev, "capability discovery failed: %d\n",
+ pf->hw.aq.asq_last_status);
+ return (ENODEV);
+ }
+
+ /* Capture this PF's starting queue pair */
+ pf->qbase = hw->func_caps.base_queue;
+
+#ifdef IXL_DEBUG
+ device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
+ "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
+ hw->pf_id, hw->func_caps.num_vfs,
+ hw->func_caps.num_msix_vectors,
+ hw->func_caps.num_msix_vectors_vf,
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort,
+ hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ hw->func_caps.base_queue);
+#endif
+ /* Print a subset of the capability information. */
+ device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
+ hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
+ hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
+ (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
+ (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
+ "MDIO shared");
+
+ return (error);
+}
+
+void
+ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
+{
+ device_t dev = vsi->dev;
+
+ /* Enable/disable TXCSUM/TSO4 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ /* enable TXCSUM, restore TSO if previously enabled */
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable |= IFCAP_TSO4;
+ }
+ }
+ else if (mask & IFCAP_TSO4) {
+ ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ device_printf(dev,
+ "TSO4 requires txcsum, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM)
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ else if (mask & IFCAP_TSO4)
+ ifp->if_capenable |= IFCAP_TSO4;
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && (ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
+ device_printf(dev,
+ "TSO4 requires txcsum, disabling both...\n");
+ } else if (mask & IFCAP_TSO4)
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ }
+
+ /* Enable/disable TXCSUM_IPV6/TSO6 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable |= IFCAP_TSO6;
+ }
+ } else if (mask & IFCAP_TSO6) {
+ ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ device_printf(dev,
+ "TSO6 requires txcsum6, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6)
+ ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
+ else if (mask & IFCAP_TSO6)
+ ifp->if_capenable |= IFCAP_TSO6;
+ } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && (ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ device_printf(dev,
+ "TSO6 requires txcsum6, disabling both...\n");
+ } else if (mask & IFCAP_TSO6)
+ ifp->if_capenable &= ~IFCAP_TSO6;
+ }
+}
+
+/* For the set_advertise sysctl */
+void
+ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+
+ /* Set initial sysctl values */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status) {
+ /* Non-fatal error */
+ device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
+ __func__, status);
+ return;
+ }
+
+ if (abilities.link_speed & I40E_LINK_SPEED_40GB)
+ pf->advertised_speed |= 0x10;
+ if (abilities.link_speed & I40E_LINK_SPEED_20GB)
+ pf->advertised_speed |= 0x8;
+ if (abilities.link_speed & I40E_LINK_SPEED_10GB)
+ pf->advertised_speed |= 0x4;
+ if (abilities.link_speed & I40E_LINK_SPEED_1GB)
+ pf->advertised_speed |= 0x2;
+ if (abilities.link_speed & I40E_LINK_SPEED_100MB)
+ pf->advertised_speed |= 0x1;
+}
+
+int
+ixl_teardown_hw_structs(struct ixl_pf *pf)
+{
+ enum i40e_status_code status = 0;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+
+ /* Shutdown LAN HMC */
+ if (hw->hmc.hmc_obj) {
+ status = i40e_shutdown_lan_hmc(hw);
+ if (status) {
+ device_printf(dev,
+ "init: LAN HMC shutdown failure; status %d\n", status);
+ goto err_out;
+ }
+ }
+
+ // XXX: This gets called when we know the adminq is inactive;
+ // so we already know it's setup when we get here.
+
+ /* Shutdown admin queue */
+ status = i40e_shutdown_adminq(hw);
+ if (status)
+ device_printf(dev,
+ "init: Admin Queue shutdown failure; status %d\n", status);
+
+err_out:
+ return (status);
+}
+
+int
+ixl_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ u8 set_fc_err_mask;
+ int error = 0;
+
+ // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
+ i40e_clear_hw(hw);
+ error = i40e_pf_reset(hw);
+ if (error) {
+ device_printf(dev, "init: PF reset failure");
+ error = EIO;
+ goto err_out;
+ }
+
+ error = i40e_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "init: Admin queue init failure;"
+ " status code %d", error);
+ error = EIO;
+ goto err_out;
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+ error = ixl_get_hw_capabilities(pf);
+ if (error) {
+ device_printf(dev, "init: Error retrieving HW capabilities;"
+ " status code %d\n", error);
+ goto err_out;
+ }
+
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (error) {
+ device_printf(dev, "init: LAN HMC init failed; status code %d\n",
+ error);
+ error = EIO;
+ goto err_out;
+ }
+
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "init: LAN HMC config failed; status code %d\n",
+ error);
+ error = EIO;
+ goto err_out;
+ }
+
+ // XXX: possible fix for panic, but our failure recovery is still broken
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "init: ixl_switch_config() failed: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
+ NULL);
+ if (error) {
+ device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
+ " aq_err %d\n", error, hw->aq.asq_last_status);
+ error = EIO;
+ goto err_out;
+ }
+
+ error = i40e_set_fc(hw, &set_fc_err_mask, true);
+ if (error) {
+ device_printf(dev, "init: setting link flow control failed; retcode %d,"
+ " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
+ goto err_out;
+ }
+
+ // XXX: (Rebuild VSIs?)
+
+ /* Firmware delay workaround */
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "init: link restart failed, aq_err %d\n",
+ hw->aq.asq_last_status);
+ goto err_out;
+ }
+ }
+
+
+err_out:
+ return (error);
+}
+
+/*
+** MSIX Interrupt Handlers and Tasklets
+*/
+void
+ixl_handle_que(void *context, int pending)
+{
+ struct ixl_queue *que = context;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ struct ifnet *ifp = vsi->ifp;
+ bool more;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ more = ixl_rxeof(que, IXL_RX_LIMIT);
+ IXL_TX_LOCK(txr);
+ ixl_txeof(que);
+ if (!drbr_empty(ifp, txr->br))
+ ixl_mq_start_locked(ifp, txr);
+ IXL_TX_UNLOCK(txr);
+ if (more) {
+ taskqueue_enqueue(que->tq, &que->task);
+ return;
+ }
+ }
+
+ /* Reenable this interrupt - hmmm */
+ ixl_enable_queue(hw, que->me);
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Legacy Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_intr(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct ifnet *ifp = vsi->ifp;
+ struct tx_ring *txr = &que->txr;
+ u32 reg, icr0, mask;
+ bool more_tx, more_rx;
+
+ ++que->irqs;
+
+ /* Protect against spurious interrupts */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+ reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+ reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+#ifdef PCI_IOV
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
+ taskqueue_enqueue(pf->tq, &pf->vflr_task);
+#endif
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ return;
+ }
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+
+ /* re-enable other interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, mask);
+
+ /* And now the queues */
+ reg = rd32(hw, I40E_QINT_RQCTL(0));
+ reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = rd32(hw, I40E_QINT_TQCTL(0));
+ reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+
+ ixl_enable_legacy(hw);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX VSI Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_msix_que(void *arg)
+{
+ struct ixl_queue *que = arg;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ bool more_tx, more_rx;
+
+ /* Protect against spurious interrupts */
+ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
+ return;
+
+ ++que->irqs;
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+
+ ixl_set_queue_rx_itr(que);
+ ixl_set_queue_tx_itr(que);
+
+ if (more_tx || more_rx)
+ taskqueue_enqueue(que->tq, &que->task);
+ else
+ ixl_enable_queue(hw, que->me);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX Admin Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_msix_adminq(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ u32 reg, mask, rstat_reg;
+ bool do_task = FALSE;
+
+ ++pf->admin_irq;
+
+ reg = rd32(hw, I40E_PFINT_ICR0);
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* Check on the cause */
+ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ixl_handle_mdd_event(pf);
+ mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
+ }
+
+ if (reg & I40E_PFINT_ICR0_GRST_MASK) {
+ device_printf(dev, "Reset Requested!\n");
+ rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
+ rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ device_printf(dev, "Reset type: ");
+ switch (rstat_reg) {
+ /* These others might be handled similarly to an EMPR reset */
+ case I40E_RESET_CORER:
+ printf("CORER\n");
+ break;
+ case I40E_RESET_GLOBR:
+ printf("GLOBR\n");
+ break;
+ case I40E_RESET_EMPR:
+ printf("EMPR\n");
+ atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
+ break;
+ default:
+ printf("POR\n");
+ break;
+ }
+ /* overload admin queue task to check reset progress */
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
+ device_printf(dev, "ECC Error detected!\n");
+ }
+
+ if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ reg = rd32(hw, I40E_PFHMC_ERRORINFO);
+ if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
+ device_printf(dev, "HMC Error detected!\n");
+ device_printf(dev, "INFO 0x%08x\n", reg);
+ reg = rd32(hw, I40E_PFHMC_ERRORDATA);
+ device_printf(dev, "DATA 0x%08x\n", reg);
+ wr32(hw, I40E_PFHMC_ERRORINFO, 0);
+ }
+ }
+
+ if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
+ device_printf(dev, "PCI Exception detected!\n");
+ }
+
+#ifdef PCI_IOV
+ if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ }
+#endif
+
+ if (do_task)
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ else
+ ixl_enable_adminq(hw);
+}
+
+void
+ixl_set_promisc(struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int err, mcnt = 0;
+ bool uni = FALSE, multi = FALSE;
+
+ if (ifp->if_flags & IFF_ALLMULTI)
+ multi = TRUE;
+ else { /* Need to count the multicast addresses */
+ struct ifmultiaddr *ifma;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (mcnt == MAX_MULTICAST_ADDR)
+ break;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ }
+
+ if (mcnt >= MAX_MULTICAST_ADDR)
+ multi = TRUE;
+ if (ifp->if_flags & IFF_PROMISC)
+ uni = TRUE;
+
+ err = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vsi->seid, uni, NULL, TRUE);
+ err = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, multi, NULL);
+ return;
+}
+
+/*********************************************************************
+ * Filter Routines
+ *
+ * Routines for multicast and vlan filter management.
+ *
+ *********************************************************************/
+void
+ixl_add_multi(struct ixl_vsi *vsi)
+{
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int mcnt = 0, flags;
+
+ IOCTL_DEBUGOUT("ixl_add_multi: begin");
+
+ if_maddr_rlock(ifp);
+ /*
+ ** First just get a count, to decide if we
+ ** we simply use multicast promiscuous.
+ */
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+
+ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
+ /* delete existing MC filters */
+ ixl_del_hw_filters(vsi, mcnt);
+ i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, TRUE, NULL);
+ return;
+ }
+
+ mcnt = 0;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ ixl_add_mc_filter(vsi,
+ (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ if (mcnt > 0) {
+ flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
+ ixl_add_hw_filters(vsi, flags, mcnt);
+ }
+
+ IOCTL_DEBUGOUT("ixl_add_multi: end");
+ return;
+}
+
+void
+ixl_del_multi(struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct ifmultiaddr *ifma;
+ struct ixl_mac_filter *f;
+ int mcnt = 0;
+ bool match = FALSE;
+
+ IOCTL_DEBUGOUT("ixl_del_multi: begin");
+
+ /* Search for removed multicast addresses */
+ if_maddr_rlock(ifp);
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
+ match = FALSE;
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+ if (cmp_etheraddr(f->macaddr, mc_addr)) {
+ match = TRUE;
+ break;
+ }
+ }
+ if (match == FALSE) {
+ f->flags |= IXL_FILTER_DEL;
+ mcnt++;
+ }
+ }
+ }
+ if_maddr_runlock(ifp);
+
+ if (mcnt > 0)
+ ixl_del_hw_filters(vsi, mcnt);
+}
+
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine checks for link status,updates statistics,
+ * and runs the watchdog check.
+ *
+ * Only runs when the driver is configured UP and RUNNING.
+ *
+ **********************************************************************/
+
+void
+ixl_local_timer(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = pf->dev;
+ int hung = 0;
+ u32 mask;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+
+ /* Fire off the adminq task */
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+
+ /* Update stats */
+ ixl_update_stats_counters(pf);
+
+ /* Check status of the queues */
+ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ /* Any queues with outstanding work get a sw irq */
+ if (que->busy)
+ wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
+ /*
+ ** Each time txeof runs without cleaning, but there
+ ** are uncleaned descriptors it increments busy. If
+ ** we get to 5 we declare it hung.
+ */
+ if (que->busy == IXL_QUEUE_HUNG) {
+ ++hung;
+ continue;
+ }
+ if (que->busy >= IXL_MAX_TX_BUSY) {
+#ifdef IXL_DEBUG
+ device_printf(dev, "Warning queue %d "
+ "appears to be hung!\n", i);
+#endif
+ que->busy = IXL_QUEUE_HUNG;
+ ++hung;
+ }
+ }
+ /* Only reinit if all queues show hung */
+ if (hung == vsi->num_queues)
+ goto hung;
+
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ return;
+
+hung:
+ device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
+ ixl_init_locked(pf);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+** the real check of the hardware only happens with
+** a link interrupt.
+*/
+void
+ixl_update_link_status(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = pf->dev;
+
+ if (pf->link_up) {
+ if (vsi->link_active == FALSE) {
+ pf->fc = hw->fc.current_mode;
+ if (bootverbose) {
+ device_printf(dev, "Link is up %d Gbps %s,"
+ " Flow Control: %s\n",
+ ((pf->link_speed ==
+ I40E_LINK_SPEED_40GB)? 40:10),
+ "Full Duplex", ixl_fc_string[pf->fc]);
+ }
+ vsi->link_active = TRUE;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+ } else { /* Link down */
+ if (vsi->link_active == TRUE) {
+ if (bootverbose)
+ device_printf(dev, "Link is Down\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ vsi->link_active = FALSE;
+ }
+ }
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+void
+ixl_stop_locked(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+
+ INIT_DEBUGOUT("ixl_stop: begin\n");
+
+ IXL_PF_LOCK_ASSERT(pf);
+
+ /* Stop the local timer */
+ callout_stop(&pf->timer);
+
+ ixl_disable_rings_intr(vsi);
+ ixl_disable_rings(vsi);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
+}
+
+void
+ixl_stop(struct ixl_pf *pf)
+{
+ IXL_PF_LOCK(pf);
+ ixl_stop_locked(pf);
+ IXL_PF_UNLOCK(pf);
+
+ ixl_teardown_queue_msix(&pf->vsi);
+ ixl_free_queue_tqs(&pf->vsi);
+}
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+int
+ixl_assign_vsi_legacy(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ int error, rid = 0;
+
+ if (pf->msix == 1)
+ rid = 1;
+ pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (pf->res == NULL) {
+ device_printf(dev, "Unable to allocate"
+ " bus resource: vsi legacy/msi interrupt\n");
+ return (ENXIO);
+ }
+
+ /* Set the handler function */
+ error = bus_setup_intr(dev, pf->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_intr, pf, &pf->tag);
+ if (error) {
+ pf->res = NULL;
+ device_printf(dev, "Failed to register legacy/msi handler\n");
+ return (error);
+ }
+ bus_describe_intr(dev, pf->res, pf->tag, "irq0");
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixl_handle_que, que);
+ que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(dev));
+ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+
+ pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+ taskqueue_thread_enqueue, &pf->tq);
+ taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+ device_get_nameunit(dev));
+
+ return (0);
+}
+
+int
+ixl_setup_adminq_tq(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int error = 0;
+
+ /* Tasklet for Admin Queue interrupts */
+ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+#ifdef PCI_IOV
+ /* VFLR Tasklet */
+ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
+#endif
+ /* Create and start Admin Queue taskqueue */
+ pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
+ taskqueue_thread_enqueue, &pf->tq);
+ if (!pf->tq) {
+ device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
+ return (ENOMEM);
+ }
+ error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
+ device_get_nameunit(dev));
+ if (error) {
+ device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
+ error);
+ taskqueue_free(pf->tq);
+ return (error);
+ }
+ return (0);
+}
+
+int
+ixl_setup_queue_tqs(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+
+ /* Create queue tasks and start queue taskqueues */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixl_handle_que, que);
+ que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+#ifdef RSS
+ CPU_SETOF(cpu_id, &cpu_mask);
+ taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
+ &cpu_mask, "%s (bucket %d)",
+ device_get_nameunit(dev), cpu_id);
+#else
+ taskqueue_start_threads(&que->tq, 1, PI_NET,
+ "%s (que %d)", device_get_nameunit(dev), que->me);
+#endif
+ }
+
+ return (0);
+}
+
+void
+ixl_free_adminq_tq(struct ixl_pf *pf)
+{
+ if (pf->tq) {
+ taskqueue_free(pf->tq);
+ pf->tq = NULL;
+ }
+}
+
+void
+ixl_free_queue_tqs(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ if (que->tq) {
+ taskqueue_free(que->tq);
+ que->tq = NULL;
+ }
+ }
+}
+
+int
+ixl_setup_adminq_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int rid, error = 0;
+
+ /* Admin IRQ rid is 1, vector is 0 */
+ rid = 1;
+ /* Get interrupt resource from bus */
+ pf->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!pf->res) {
+ device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
+ " interrupt failed [rid=%d]\n", rid);
+ return (ENXIO);
+ }
+ /* Then associate interrupt with handler */
+ error = bus_setup_intr(dev, pf->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_msix_adminq, pf, &pf->tag);
+ if (error) {
+ pf->res = NULL;
+ device_printf(dev, "bus_setup_intr() for Admin Queue"
+ " interrupt handler failed, error %d\n", error);
+ return (ENXIO);
+ }
+ error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
+ if (error) {
+ /* Probably non-fatal? */
+ device_printf(dev, "bus_describe_intr() for Admin Queue"
+ " interrupt name failed, error %d\n", error);
+ }
+ pf->admvec = 0;
+
+ return (0);
+}
+
+/*
+ * Allocate interrupt resources from bus and associate an interrupt handler
+ * to those for the VSI's queues.
+ */
+int
+ixl_setup_queue_msix(struct ixl_vsi *vsi)
+{
+ device_t dev = vsi->dev;
+ struct ixl_queue *que = vsi->queues;
+ struct tx_ring *txr;
+ int error, rid, vector = 1;
+#ifdef RSS
+ cpuset_t cpu_mask;
+#endif
+
+ /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
+ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ int cpu_id = i;
+ rid = vector + 1;
+ txr = &que->txr;
+ que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (!que->res) {
+ device_printf(dev, "bus_alloc_resource_any() for"
+ " Queue %d interrupt failed [rid=%d]\n",
+ que->me, rid);
+ return (ENXIO);
+ }
+ /* Set the handler function */
+ error = bus_setup_intr(dev, que->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_msix_que, que, &que->tag);
+ if (error) {
+ device_printf(dev, "bus_setup_intr() for Queue %d"
+ " interrupt handler failed, error %d\n",
+ que->me, error);
+ return (error);
+ }
+ error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
+ if (error) {
+ device_printf(dev, "bus_describe_intr() for Queue %d"
+ " interrupt name failed, error %d\n",
+ que->me, error);
+ }
+ /* Bind the vector to a CPU */
+#ifdef RSS
+ cpu_id = rss_getcpu(i % rss_getnumbuckets());
+#endif
+ error = bus_bind_intr(dev, que->res, cpu_id);
+ if (error) {
+ device_printf(dev, "bus_bind_intr() for Queue %d"
+ " to CPU %d failed, error %d\n",
+ que->me, cpu_id, error);
+ }
+ que->msix = vector;
+ }
+
+ return (0);
+}
+
+/*
+ * When used in a virtualized environment PCI BUSMASTER capability may not be set
+ * so explicity set it here and rewrite the ENABLE in the MSIX control register
+ * at this point to cause the host to successfully initialize us.
+ */
+void
+ixl_set_busmaster(device_t dev)
+{
+ u16 pci_cmd_word;
+ int msix_ctrl, rid;
+
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+}
+
+/*
+ * Allocate MSI/X vectors from the OS.
+ * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
+ */
+int
+ixl_init_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ int auto_max_queues;
+ int rid, want, vectors, queues, available;
+
+ /* Override by tuneable */
+ if (!pf->enable_msix)
+ goto no_msix;
+
+ /* Ensure proper operation in virtualized environment */
+ ixl_set_busmaster(dev);
+
+ /* First try MSI/X */
+ rid = PCIR_BAR(IXL_BAR);
+ pf->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (!pf->msix_mem) {
+ /* May not be enabled */
+ device_printf(pf->dev,
+ "Unable to map MSIX table\n");
+ goto no_msix;
+ }
+
+ available = pci_msix_count(dev);
+ if (available < 2) {
+ /* system has msix disabled (0), or only one vector (1) */
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rid, pf->msix_mem);
+ pf->msix_mem = NULL;
+ goto no_msix;
+ }
+
+ /* Clamp max number of queues based on:
+ * - # of MSI-X vectors available
+ * - # of cpus available
+ * - # of queues that can be assigned to the LAN VSI
+ */
+ auto_max_queues = min(mp_ncpus, available - 1);
+ if (hw->mac.type == I40E_MAC_X722)
+ auto_max_queues = min(auto_max_queues, 128);
+ else
+ auto_max_queues = min(auto_max_queues, 64);
+
+ /* Override with tunable value if tunable is less than autoconfig count */
+ if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
+ queues = pf->max_queues;
+ /* Use autoconfig amount if that's lower */
+ else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
+ device_printf(dev, "ixl_max_queues (%d) is too large, using "
+ "autoconfig amount (%d)...\n",
+ pf->max_queues, auto_max_queues);
+ queues = auto_max_queues;
+ }
+ /* Limit maximum auto-configured queues to 8 if no user value is set */
+ else
+ queues = min(auto_max_queues, 8);
+
+#ifdef RSS
+ /* If we're doing RSS, clamp at the number of RSS buckets */
+ if (queues > rss_getnumbuckets())
+ queues = rss_getnumbuckets();
+#endif
+
+ /*
+ ** Want one vector (RX/TX pair) per queue
+ ** plus an additional for the admin queue.
+ */
+ want = queues + 1;
+ if (want <= available) /* Have enough */
+ vectors = want;
+ else {
+ device_printf(pf->dev,
+ "MSIX Configuration Problem, "
+ "%d vectors available but %d wanted!\n",
+ available, want);
+ return (0); /* Will go to Legacy setup */
+ }
+
+ if (pci_alloc_msix(dev, &vectors) == 0) {
+ device_printf(pf->dev,
+ "Using MSIX interrupts with %d vectors\n", vectors);
+ pf->msix = vectors;
+ pf->vsi.num_queues = queues;
+#ifdef RSS
+ /*
+ * If we're doing RSS, the number of queues needs to
+ * match the number of RSS buckets that are configured.
+ *
+ * + If there's more queues than RSS buckets, we'll end
+ * up with queues that get no traffic.
+ *
+ * + If there's more RSS buckets than queues, we'll end
+ * up having multiple RSS buckets map to the same queue,
+ * so there'll be some contention.
+ */
+ if (queues != rss_getnumbuckets()) {
+ device_printf(dev,
+ "%s: queues (%d) != RSS buckets (%d)"
+ "; performance will be impacted.\n",
+ __func__, queues, rss_getnumbuckets());
+ }
+#endif
+ return (vectors);
+ }
+no_msix:
+ vectors = pci_msi_count(dev);
+ pf->vsi.num_queues = 1;
+ pf->max_queues = 1;
+ pf->enable_msix = 0;
+ if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
+ device_printf(pf->dev, "Using an MSI interrupt\n");
+ else {
+ vectors = 0;
+ device_printf(pf->dev, "Using a Legacy interrupt\n");
+ }
+ return (vectors);
+}
+
+/*
+ * Configure admin queue/misc interrupt cause registers in hardware.
+ */
+void
+ixl_configure_intr0_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ /* First set up the adminq - vector 0 */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /*
+ * 0x7FF is the end of the queue list.
+ * This means we won't use MSI-X vector 0 for a queue interrupt
+ * in MSIX mode.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
+ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
+
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/*
+ * Configure queue interrupt cause registers in hardware.
+ */
+void
+ixl_configure_queue_intr_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+ u16 vector = 1;
+
+ for (int i = 0; i < vsi->num_queues; i++, vector++) {
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
+ /* First queue type is RX / 0 */
+ wr32(hw, I40E_PFINT_LNKLSTN(i), i);
+
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(i), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(i), reg);
+ }
+}
+
+/*
+ * Configure for MSI single vector operation
+ */
+void
+ixl_configure_legacy(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ wr32(hw, I40E_PFINT_ITR0(0), 0);
+ wr32(hw, I40E_PFINT_ITR0(1), 0);
+
+ /* Setup "other" causes */
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
+ | I40E_PFINT_ICR0_ENA_GRST_MASK
+ | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
+ | I40E_PFINT_ICR0_ENA_GPIO_MASK
+ | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
+ | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
+ | I40E_PFINT_ICR0_ENA_VFLR_MASK
+ | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
+ ;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
+ | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+ | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
+ | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+ | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+}
+
+int
+ixl_allocate_pci_resources(struct ixl_pf *pf)
+{
+ int rid;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+
+ /* Map BAR0 */
+ rid = PCIR_BAR(0);
+ pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(pf->pci_mem)) {
+ device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
+ return (ENXIO);
+ }
+
+ /* Save off the PCI information */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+
+ /* Save off register access information */
+ pf->osdep.mem_bus_space_tag =
+ rman_get_bustag(pf->pci_mem);
+ pf->osdep.mem_bus_space_handle =
+ rman_get_bushandle(pf->pci_mem);
+ pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
+ pf->osdep.flush_reg = I40E_GLGEN_STAT;
+ pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
+
+ pf->hw.back = &pf->osdep;
+
+ return (0);
+}
+
+/*
+ * Teardown and release the admin queue/misc vector
+ * interrupt.
+ */
+int
+ixl_teardown_adminq_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int rid;
+
+ if (pf->admvec) /* we are doing MSIX */
+ rid = pf->admvec + 1;
+ else
+ (pf->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (pf->tag != NULL) {
+ bus_teardown_intr(dev, pf->res, pf->tag);
+ pf->tag = NULL;
+ }
+ if (pf->res != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
+ pf->res = NULL;
+ }
+
+ return (0);
+}
+
+int
+ixl_teardown_queue_msix(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ int rid, error = 0;
+
+ /* We may get here before stations are setup */
+ if ((!pf->enable_msix) || (que == NULL))
+ return (0);
+
+ /* Release all MSIX queue resources */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ error = bus_teardown_intr(dev, que->res, que->tag);
+ if (error) {
+ device_printf(dev, "bus_teardown_intr() for"
+ " Queue %d interrupt failed\n",
+ que->me);
+ // return (ENXIO);
+ }
+ que->tag = NULL;
+ }
+ if (que->res != NULL) {
+ error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ if (error) {
+ device_printf(dev, "bus_release_resource() for"
+ " Queue %d interrupt failed [rid=%d]\n",
+ que->me, rid);
+ // return (ENXIO);
+ }
+ que->res = NULL;
+ }
+ }
+
+ return (0);
+}
+
+void
+ixl_free_pci_resources(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int memrid;
+
+ ixl_teardown_queue_msix(&pf->vsi);
+ ixl_teardown_adminq_msix(pf);
+
+ if (pf->msix)
+ pci_release_msi(dev);
+
+ memrid = PCIR_BAR(IXL_BAR);
+
+ if (pf->msix_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ memrid, pf->msix_mem);
+
+ if (pf->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), pf->pci_mem);
+
+ return;
+}
+
+void
+ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
+{
+ /* Display supported media types */
+ if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_XFI) ||
+ phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
+ phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
+ || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_SFI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+}
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+int
+ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code aq_error = 0;
+
+ INIT_DEBUGOUT("ixl_setup_interface: begin");
+
+ ifp = vsi->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "can not allocate ifnet structure\n");
+ return (-1);
+ }
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = IF_Gbps(40);
+ ifp->if_init = ixl_init;
+ ifp->if_softc = vsi;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ixl_ioctl;
+
+#if __FreeBSD_version >= 1100036
+ if_setgetcounterfn(ifp, ixl_get_counter);
+#endif
+
+ ifp->if_transmit = ixl_mq_start;
+
+ ifp->if_qflush = ixl_qflush;
+
+ ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ /* Set TSO limits */
+ ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
+ ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
+ ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
+
+ /*
+ * Tell the upper layer(s) we support long frames.
+ */
+ ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+ ifp->if_capabilities |= IFCAP_TSO;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ /* VLAN capabilties */
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU
+ | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ ** Don't turn this on by default, if vlans are
+ ** created on another pseudo device (eg. lagg)
+ ** then vlan events are not passed thru, breaking
+ ** operation, but with HW FILTER off it works. If
+ ** using vlans directly on the ixl driver you can
+ ** enable this and get full hardware tag filtering.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
+ ixl_media_status);
+
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, TRUE, &abilities, NULL);
+ /* May need delay to detect fiber correctly */
+ if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+ i40e_msec_delay(200);
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
+ TRUE, &abilities, NULL);
+ }
+ if (aq_error) {
+ if (aq_error == I40E_ERR_UNKNOWN_PHY)
+ device_printf(dev, "Unknown PHY type detected!\n");
+ else
+ device_printf(dev,
+ "Error getting supported media types, err %d,"
+ " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+ return (0);
+ }
+
+ ixl_add_ifmedia(vsi, abilities.phy_type);
+
+ /* Use autoselect media by default */
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
+
+ ether_ifattach(ifp, hw->mac.addr);
+
+ return (0);
+}
+
+/*
+** Run when the Admin Queue gets a link state change interrupt.
+*/
+void
+ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+
+ /* Request link status from adapter */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+
+ /* Print out message if an unqualified module is found */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)))
+ device_printf(dev, "Link failed because "
+ "an unqualified module was detected!\n");
+
+ /* Update OS link info */
+ ixl_update_link_status(pf);
+}
+
+/*********************************************************************
+ *
+ * Get Firmware Switch configuration
+ * - this will need to be more robust when more complex
+ * switch configurations are enabled.
+ *
+ **********************************************************************/
+int
+ixl_switch_config(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = vsi->dev;
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ u8 aq_buf[I40E_AQ_LARGE_BUF];
+ int ret;
+ u16 next = 0;
+
+ memset(&aq_buf, 0, sizeof(aq_buf));
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ ret = i40e_aq_get_switch_config(hw, sw_config,
+ sizeof(aq_buf), &next, NULL);
+ if (ret) {
+ device_printf(dev, "aq_get_switch_config() failed, error %d,"
+ " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
+ return (ret);
+ }
+ if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
+ device_printf(dev,
+ "Switch config: header reported: %d in structure, %d total\n",
+ sw_config->header.num_reported, sw_config->header.num_total);
+ for (int i = 0; i < sw_config->header.num_reported; i++) {
+ device_printf(dev,
+ "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
+ sw_config->element[i].element_type,
+ sw_config->element[i].seid,
+ sw_config->element[i].uplink_seid,
+ sw_config->element[i].downlink_seid);
+ }
+ }
+ /* Simplified due to a single VSI */
+ vsi->uplink_seid = sw_config->element[0].uplink_seid;
+ vsi->downlink_seid = sw_config->element[0].downlink_seid;
+ vsi->seid = sw_config->element[0].seid;
+ return (ret);
+}
+
+/*********************************************************************
+ *
+ * Initialize the VSI: this handles contexts, which means things
+ * like the number of descriptors, buffer size,
+ * plus we init the rings thru this function.
+ *
+ **********************************************************************/
+int
+ixl_initialize_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ struct i40e_hw *hw = vsi->hw;
+ struct i40e_vsi_context ctxt;
+ int tc_queues;
+ int err = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ if (pf->veb_seid != 0)
+ ctxt.uplink_seid = pf->veb_seid;
+ ctxt.pf_num = hw->pf_id;
+ err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
+ return (err);
+ }
+ ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
+ "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
+ "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
+ "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
+ ctxt.uplink_seid, ctxt.vsi_number,
+ ctxt.vsis_allocated, ctxt.vsis_unallocated,
+ ctxt.flags, ctxt.pf_num, ctxt.vf_num,
+ ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
+ /*
+ ** Set the queue and traffic class bits
+ ** - when multiple traffic classes are supported
+ ** this will need to be more robust.
+ */
+ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
+ /* In contig mode, que_mapping[0] is first queue index used by this VSI */
+ ctxt.info.queue_mapping[0] = 0;
+ /*
+ * This VSI will only use traffic class 0; start traffic class 0's
+ * queue allocation at queue 0, and assign it 2^tc_queues queues (though
+ * the driver may not use all of them).
+ */
+ tc_queues = bsrl(pf->qtag.num_allocated);
+ ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
+ ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
+
+ /* Set VLAN receive stripping mode */
+ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
+ ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ /* Save VSI number and info for use later */
+ vsi->vsi_num = ctxt.vsi_number;
+ bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+
+ /* Reset VSI statistics */
+ ixl_vsi_reset_stats(vsi);
+ vsi->hw_filters_add = 0;
+ vsi->hw_filters_del = 0;
+
+ ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
+
+ err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
+ return (err);
+ }
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+ struct i40e_hmc_obj_txq tctx;
+ struct i40e_hmc_obj_rxq rctx;
+ u32 txctl;
+ u16 size;
+
+ /* Setup the HMC TX Context */
+ size = que->num_desc * sizeof(struct i40e_tx_desc);
+ memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
+ tctx.new_context = 1;
+ tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
+ tctx.qlen = que->num_desc;
+ tctx.fc_ena = 0;
+ tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
+ /* Enable HEAD writeback */
+ tctx.head_wb_ena = 1;
+ tctx.head_wb_addr = txr->dma.pa +
+ (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.rdylist_act = 0;
+ err = i40e_clear_lan_tx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev, "Unable to clear TX context\n");
+ break;
+ }
+ err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
+ if (err) {
+ device_printf(dev, "Unable to set TX context\n");
+ break;
+ }
+ /* Associate the ring with this PF */
+ txctl = I40E_QTX_CTL_PF_QUEUE;
+ txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(i), txctl);
+ ixl_flush(hw);
+
+ /* Do ring (re)init */
+ ixl_init_tx_ring(que);
+
+ /* Next setup the HMC RX Context */
+ if (vsi->max_frame_size <= MCLBYTES)
+ rxr->mbuf_sz = MCLBYTES;
+ else
+ rxr->mbuf_sz = MJUMPAGESIZE;
+
+ u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
+
+ /* Set up an RX context for the HMC */
+ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ /* ignore header split for now */
+ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
+ vsi->max_frame_size : max_rxmax;
+ rctx.dtype = 0;
+ rctx.dsize = 1; /* do 32byte descriptors */
+ rctx.hsplit_0 = 0; /* no HDR split initially */
+ rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
+ rctx.qlen = que->num_desc;
+ rctx.tphrdesc_ena = 1;
+ rctx.tphwdesc_ena = 1;
+ rctx.tphdata_ena = 0;
+ rctx.tphhead_ena = 0;
+ rctx.lrxqthresh = 2;
+ rctx.crcstrip = 1;
+ rctx.l2tsel = 1;
+ rctx.showiv = 1;
+ rctx.fc_ena = 0;
+ rctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev,
+ "Unable to clear RX context %d\n", i);
+ break;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
+ if (err) {
+ device_printf(dev, "Unable to set RX context %d\n", i);
+ break;
+ }
+ err = ixl_init_rx_ring(que);
+ if (err) {
+ device_printf(dev, "Fail in init_rx_ring %d\n", i);
+ break;
+ }
+#ifdef DEV_NETMAP
+ /* preserve queue */
+ if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
+ struct netmap_adapter *na = NA(vsi->ifp);
+ struct netmap_kring *kring = &na->rx_rings[i];
+ int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
+ } else
+#endif /* DEV_NETMAP */
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+ }
+ return (err);
+}
+
+
+/*********************************************************************
+ *
+ * Free all VSI structs.
+ *
+ **********************************************************************/
+void
+ixl_free_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_queue *que = vsi->queues;
+
+ /* Free station queues */
+ if (!vsi->queues)
+ goto free_filters;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+ continue;
+ IXL_TX_LOCK(txr);
+ ixl_free_que_tx(que);
+ if (txr->base)
+ i40e_free_dma_mem(&pf->hw, &txr->dma);
+ IXL_TX_UNLOCK(txr);
+ IXL_TX_LOCK_DESTROY(txr);
+
+ if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+ continue;
+ IXL_RX_LOCK(rxr);
+ ixl_free_que_rx(que);
+ if (rxr->base)
+ i40e_free_dma_mem(&pf->hw, &rxr->dma);
+ IXL_RX_UNLOCK(rxr);
+ IXL_RX_LOCK_DESTROY(rxr);
+ }
+ free(vsi->queues, M_DEVBUF);
+
+free_filters:
+ /* Free VSI filter list */
+ ixl_free_mac_filters(vsi);
+}
+
+void
+ixl_free_mac_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+
+ while (!SLIST_EMPTY(&vsi->ftl)) {
+ f = SLIST_FIRST(&vsi->ftl);
+ SLIST_REMOVE_HEAD(&vsi->ftl, next);
+ free(f, M_DEVBUF);
+ }
+}
+
+/*
+ * Fill out fields in queue struct and setup tx/rx memory and structs
+ */
+static int
+ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+ int error = 0;
+ int rsize, tsize;
+
+ /* ERJ: A lot of references to external objects... */
+ que->num_desc = pf->ringsz;
+ que->me = index;
+ que->vsi = vsi;
+
+ txr->que = que;
+ txr->tail = I40E_QTX_TAIL(que->me);
+
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+ /* Create the TX descriptor ring */
+ tsize = roundup2((que->num_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ if (i40e_allocate_dma_mem(hw,
+ &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ txr->base = (struct i40e_tx_desc *)txr->dma.va;
+ bzero((void *)txr->base, tsize);
+ /* Now allocate transmit soft structs for the ring */
+ if (ixl_allocate_tx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up TX structures\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
+ M_NOWAIT, &txr->mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up TX buf ring\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ rsize = roundup2(que->num_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ rxr->que = que;
+ rxr->tail = I40E_QRX_TAIL(que->me);
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (i40e_allocate_dma_mem(hw,
+ &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
+ device_printf(dev,
+ "Unable to allocate RX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+ bzero((void *)rxr->base, rsize);
+ /* Allocate receive soft structs for the ring*/
+ if (ixl_allocate_rx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up receive structs\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ return (0);
+fail:
+ if (rxr->base)
+ i40e_free_dma_mem(&pf->hw, &rxr->dma);
+ if (mtx_initialized(&rxr->mtx))
+ mtx_destroy(&rxr->mtx);
+ if (txr->br) {
+ buf_ring_free(txr->br, M_DEVBUF);
+ txr->br = NULL;
+ }
+ if (txr->base)
+ i40e_free_dma_mem(&pf->hw, &txr->dma);
+ if (mtx_initialized(&txr->mtx))
+ mtx_destroy(&txr->mtx);
+
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Allocate memory for the VSI (virtual station interface) and their
+ * associated queues, rings and the descriptors associated with each,
+ * called only once at attach.
+ *
+ **********************************************************************/
+int
+ixl_setup_stations(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi;
+ struct ixl_queue *que;
+ int error = 0;
+
+ vsi = &pf->vsi;
+ vsi->back = (void *)pf;
+ vsi->hw = &pf->hw;
+ vsi->id = 0;
+ vsi->num_vlans = 0;
+ vsi->back = pf;
+
+ /* Get memory for the station queues */
+ if (!(vsi->queues =
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ return (error);
+ }
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ error = ixl_setup_queue(que, pf, i);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+void
+ixl_set_queue_rx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+ u16 rx_itr;
+ u16 rx_latency = 0;
+ int rx_bytes;
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ if (pf->dynamic_rx_itr) {
+ rx_bytes = rxr->bytes/rxr->itr;
+ rx_itr = rxr->itr;
+
+ /* Adjust latency range */
+ switch (rxr->latency) {
+ case IXL_LOW_LATENCY:
+ if (rx_bytes > 10) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (rx_bytes > 20) {
+ rx_latency = IXL_BULK_LATENCY;
+ rx_itr = IXL_ITR_8K;
+ } else if (rx_bytes <= 10) {
+ rx_latency = IXL_LOW_LATENCY;
+ rx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (rx_bytes <= 20) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ rxr->latency = rx_latency;
+
+ if (rx_itr != rxr->itr) {
+ /* do an exponential smoothing */
+ rx_itr = (10 * rx_itr * rxr->itr) /
+ ((9 * rx_itr) + rxr->itr);
+ rxr->itr = rx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->rx_itr_setting = pf->rx_itr;
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ }
+ rxr->bytes = 0;
+ rxr->packets = 0;
+ return;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+void
+ixl_set_queue_tx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ u16 tx_itr;
+ u16 tx_latency = 0;
+ int tx_bytes;
+
+
+ /* Idle, do nothing */
+ if (txr->bytes == 0)
+ return;
+
+ if (pf->dynamic_tx_itr) {
+ tx_bytes = txr->bytes/txr->itr;
+ tx_itr = txr->itr;
+
+ switch (txr->latency) {
+ case IXL_LOW_LATENCY:
+ if (tx_bytes > 10) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (tx_bytes > 20) {
+ tx_latency = IXL_BULK_LATENCY;
+ tx_itr = IXL_ITR_8K;
+ } else if (tx_bytes <= 10) {
+ tx_latency = IXL_LOW_LATENCY;
+ tx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (tx_bytes <= 20) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ txr->latency = tx_latency;
+
+ if (tx_itr != txr->itr) {
+ /* do an exponential smoothing */
+ tx_itr = (10 * tx_itr * txr->itr) /
+ ((9 * tx_itr) + txr->itr);
+ txr->itr = tx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->tx_itr_setting = pf->tx_itr;
+ /* Update the hardware if needed */
+ if (txr->itr != vsi->tx_itr_setting) {
+ txr->itr = vsi->tx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+ }
+ txr->bytes = 0;
+ txr->packets = 0;
+ return;
+}
+
+void
+ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
+ struct sysctl_ctx_list *ctx, const char *sysctl_name)
+{
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+ struct sysctl_oid_list *vsi_list;
+
+ tree = device_get_sysctl_tree(pf->dev);
+ child = SYSCTL_CHILDREN(tree);
+ vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
+ CTLFLAG_RD, NULL, "VSI Number");
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
+}
+
+#ifdef IXL_DEBUG
+/**
+ * ixl_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL value from hardware
+ * for a sysctl.
+ */
+int
+ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->txr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
+/**
+ * ixl_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL value from hardware
+ * for a sysctl.
+ */
+int
+ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->rxr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+#endif
+
+/*
+ * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
+ * Writes to the ITR registers immediately.
+ */
+static int
+ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ int error = 0;
+ int requested_tx_itr;
+
+ requested_tx_itr = pf->tx_itr;
+ error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (pf->dynamic_tx_itr) {
+ device_printf(dev,
+ "Cannot set TX itr value while dynamic TX itr is enabled\n");
+ return (EINVAL);
+ }
+ if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
+ device_printf(dev,
+ "Invalid TX itr value; value must be between 0 and %d\n",
+ IXL_MAX_ITR);
+ return (EINVAL);
+ }
+
+ pf->tx_itr = requested_tx_itr;
+ ixl_configure_tx_itr(pf);
+
+ return (error);
+}
+
+/*
+ * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
+ * Writes to the ITR registers immediately.
+ */
+static int
+ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ int error = 0;
+ int requested_rx_itr;
+
+ requested_rx_itr = pf->rx_itr;
+ error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (pf->dynamic_rx_itr) {
+ device_printf(dev,
+ "Cannot set RX itr value while dynamic RX itr is enabled\n");
+ return (EINVAL);
+ }
+ if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
+ device_printf(dev,
+ "Invalid RX itr value; value must be between 0 and %d\n",
+ IXL_MAX_ITR);
+ return (EINVAL);
+ }
+
+ pf->rx_itr = requested_rx_itr;
+ ixl_configure_rx_itr(pf);
+
+ return (error);
+}
+
+void
+ixl_add_hw_stats(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *queues = vsi->queues;
+ struct i40e_hw_port_stats *pf_stats = &pf->stats;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct sysctl_oid_list *vsi_list;
+
+ struct sysctl_oid *queue_node;
+ struct sysctl_oid_list *queue_list;
+
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ char queue_namebuf[QUEUE_NAME_LEN];
+
+ /* Driver statistics */
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ CTLFLAG_RD, &pf->watchdog_events,
+ "Watchdog timeouts");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ CTLFLAG_RD, &pf->admin_irq,
+ "Admin Queue IRQ Handled");
+
+ ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
+ vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
+
+ /* Queue statistics */
+ for (int q = 0; q < vsi->num_queues; q++) {
+ snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+ OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ txr = &(queues[q].txr);
+ rxr = &(queues[q].rxr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+ CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+ "m_defrag() failed");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(queues[q].irqs),
+ "irqs on this queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+ CTLFLAG_RD, &(queues[q].tso),
+ "TSO");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
+ CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
+ "Driver tx dma failure in xmit");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ CTLFLAG_RD, &(txr->no_desc),
+ "Queue No Descriptor Available");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ CTLFLAG_RD, &(txr->total_packets),
+ "Queue Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "Queue Bytes Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "Queue Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "Queue Bytes Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
+ CTLFLAG_RD, &(rxr->desc_errs),
+ "Queue Rx Descriptor Errors");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+#ifdef IXL_DEBUG
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
+ CTLFLAG_RD, &(rxr->not_done),
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
+ CTLFLAG_RD, &(rxr->next_refresh), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
+ CTLFLAG_RD, &(rxr->next_check), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qtx_tail_handler, "IU",
+ "Queue Transmit Descriptor Tail");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
+#endif
+ }
+
+ /* MAC stats */
+ ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
+}
+
+void
+ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_eth_stats *eth_stats)
+{
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+ {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
+ "Unicast Packets Received"},
+ {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
+ "Multicast Packets Received"},
+ {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
+ "Broadcast Packets Received"},
+ {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+ {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+ {&eth_stats->tx_multicast, "mcast_pkts_txd",
+ "Multicast Packets Transmitted"},
+ {&eth_stats->tx_broadcast, "bcast_pkts_txd",
+ "Broadcast Packets Transmitted"},
+ // end
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+void
+ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_hw_port_stats *stats)
+{
+ struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
+ CTLFLAG_RD, NULL, "Mac Statistics");
+ struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
+
+ struct i40e_eth_stats *eth_stats = &stats->eth;
+ ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
+
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&stats->crc_errors, "crc_errors", "CRC Errors"},
+ {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
+ {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
+ {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
+ {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
+ /* Packet Reception Stats */
+ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
+ {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
+ {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
+ {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
+ {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
+ {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
+ {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
+ {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
+ {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
+ {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
+ {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
+ {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
+ /* Packet Transmission Stats */
+ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
+ {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
+ {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
+ {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
+ {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
+ {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
+ {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
+ /* Flow control */
+ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
+ {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
+ {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
+ {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
+ /* End */
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+void
+ixl_set_rss_key(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+#ifdef RSS
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
+#else
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
+ 0x0, 0x0, 0x0};
+#endif
+
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#endif
+ /* Fill out hash function seed */
+ if (hw->mac.type == I40E_MAC_X722) {
+ struct i40e_aqc_get_set_rss_key_data key_data;
+ bcopy(rss_seed, key_data.standard_rss_key, 40);
+ status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
+ if (status)
+ device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
+ }
+}
+
+/*
+ * Configure enabled PCTYPES for RSS.
+ */
+void
+ixl_set_rss_pctypes(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 set_hena = 0, hena;
+
+#ifdef RSS
+ u32 rss_hash_config;
+
+ rss_hash_config = rss_gethashconfig();
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
+ set_hena = IXL_DEFAULT_RSS_HENA;
+#endif
+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= set_hena;
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+}
+
+void
+ixl_set_rss_hlut(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ int i, que_id;
+ int lut_entry_width;
+ u32 lut = 0;
+ enum i40e_status_code status;
+
+ if (hw->mac.type == I40E_MAC_X722)
+ lut_entry_width = 7;
+ else
+ lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ u8 hlut_buf[512];
+ for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % vsi->num_queues;
+#else
+ que_id = i % vsi->num_queues;
+#endif
+ lut = (que_id & ((0x1 << lut_entry_width) - 1));
+ hlut_buf[i] = lut;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
+ if (status)
+ device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
+ ixl_flush(hw);
+ }
+}
+
+/*
+** Setup the PF's RSS parameters.
+*/
+void
+ixl_config_rss(struct ixl_pf *pf)
+{
+ ixl_set_rss_key(pf);
+ ixl_set_rss_pctypes(pf);
+ ixl_set_rss_hlut(pf);
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+void
+ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXL_PF_LOCK(pf);
+ ++vsi->num_vlans;
+ ixl_add_filter(vsi, hw->mac.addr, vtag);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+void
+ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ if (ifp->if_softc != arg)
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXL_PF_LOCK(pf);
+ --vsi->num_vlans;
+ ixl_del_filter(vsi, hw->mac.addr, vtag);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine updates vlan filters, called by init
+** it scans the filter table and then updates the hw
+** after a soft reset.
+*/
+void
+ixl_setup_vlan_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+ int cnt = 0, flags;
+
+ if (vsi->num_vlans == 0)
+ return;
+ /*
+ ** Scan the filter list for vlan entries,
+ ** mark them for addition and then call
+ ** for the AQ update.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags & IXL_FILTER_VLAN) {
+ f->flags |=
+ (IXL_FILTER_ADD |
+ IXL_FILTER_USED);
+ cnt++;
+ }
+ }
+ if (cnt == 0) {
+ printf("setup vlan: no filters found!\n");
+ return;
+ }
+ flags = IXL_FILTER_VLAN;
+ flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ ixl_add_hw_filters(vsi, flags, cnt);
+ return;
+}
+
+/*
+** Initialize filter list and add filters that the hardware
+** needs to know about.
+**
+** Requires VSI's filter list & seid to be set before calling.
+*/
+void
+ixl_init_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ /* Add broadcast address */
+ ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+
+ /*
+ * Prevent Tx flow control frames from being sent out by
+ * non-firmware transmitters.
+ * This affects every VSI in the PF.
+ */
+ if (pf->enable_tx_fc_filter)
+ i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
+}
+
+/*
+** This routine adds mulicast filters
+*/
+void
+ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
+{
+ struct ixl_mac_filter *f;
+
+ /* Does one already exist */
+ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+ if (f != NULL)
+ return;
+
+ f = ixl_get_filter(vsi);
+ if (f == NULL) {
+ printf("WARNING: no filter available!!\n");
+ return;
+ }
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->vlan = IXL_VLAN_ANY;
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
+ | IXL_FILTER_MC);
+
+ return;
+}
+
+void
+ixl_reconfigure_filters(struct ixl_vsi *vsi)
+{
+ ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
+}
+
+/*
+** This routine adds macvlan filters
+*/
+void
+ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f, *tmp;
+ struct ixl_pf *pf;
+ device_t dev;
+
+ DEBUGOUT("ixl_add_filter: begin");
+
+ pf = vsi->back;
+ dev = pf->dev;
+
+ /* Does one already exist */
+ f = ixl_find_filter(vsi, macaddr, vlan);
+ if (f != NULL)
+ return;
+ /*
+ ** Is this the first vlan being registered, if so we
+ ** need to remove the ANY filter that indicates we are
+ ** not in a vlan, and replace that with a 0 filter.
+ */
+ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
+ tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+ if (tmp != NULL) {
+ ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
+ ixl_add_filter(vsi, macaddr, 0);
+ }
+ }
+
+ f = ixl_get_filter(vsi);
+ if (f == NULL) {
+ device_printf(dev, "WARNING: no filter available!!\n");
+ return;
+ }
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->vlan = vlan;
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ if (f->vlan != IXL_VLAN_ANY)
+ f->flags |= IXL_FILTER_VLAN;
+ else
+ vsi->num_macs++;
+
+ ixl_add_hw_filters(vsi, f->flags, 1);
+ return;
+}
+
+void
+ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f;
+
+ f = ixl_find_filter(vsi, macaddr, vlan);
+ if (f == NULL)
+ return;
+
+ f->flags |= IXL_FILTER_DEL;
+ ixl_del_hw_filters(vsi, 1);
+ vsi->num_macs--;
+
+ /* Check if this is the last vlan removal */
+ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
+ /* Switch back to a non-vlan filter */
+ ixl_del_filter(vsi, macaddr, 0);
+ ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
+ }
+ return;
+}
+
+/*
+** Find the filter with both matching mac addr and vlan id
+*/
+struct ixl_mac_filter *
+ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f;
+ bool match = FALSE;
+
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (!cmp_etheraddr(f->macaddr, macaddr))
+ continue;
+ if (f->vlan == vlan) {
+ match = TRUE;
+ break;
+ }
+ }
+
+ if (!match)
+ f = NULL;
+ return (f);
+}
+
+/*
+** This routine takes additions to the vsi filter
+** table and creates an Admin Queue call to create
+** the filters in the hardware.
+*/
+void
+ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
+{
+ struct i40e_aqc_add_macvlan_element_data *a, *b;
+ struct ixl_mac_filter *f;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ device_t dev;
+ int err, j = 0;
+
+ pf = vsi->back;
+ dev = pf->dev;
+ hw = &pf->hw;
+ IXL_PF_LOCK_ASSERT(pf);
+
+ a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (a == NULL) {
+ device_printf(dev, "add_hw_filters failed to get memory\n");
+ return;
+ }
+
+ /*
+ ** Scan the filter list, each time we find one
+ ** we add it to the admin queue array and turn off
+ ** the add bit.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags == flags) {
+ b = &a[j]; // a pox on fvl long names :)
+ bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
+ if (f->vlan == IXL_VLAN_ANY) {
+ b->vlan_tag = 0;
+ b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ b->vlan_tag = f->vlan;
+ b->flags = 0;
+ }
+ b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ f->flags &= ~IXL_FILTER_ADD;
+ j++;
+ }
+ if (j == cnt)
+ break;
+ }
+ if (j > 0) {
+ err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
+ if (err)
+ device_printf(dev, "aq_add_macvlan err %d, "
+ "aq_error %d\n", err, hw->aq.asq_last_status);
+ else
+ vsi->hw_filters_add += j;
+ }
+ free(a, M_DEVBUF);
+ return;
+}
+
+/*
+** This routine takes removals in the vsi filter
+** table and creates an Admin Queue call to delete
+** the filters in the hardware.
+*/
+void
+ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
+{
+ struct i40e_aqc_remove_macvlan_element_data *d, *e;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ device_t dev;
+ struct ixl_mac_filter *f, *f_temp;
+ int err, j = 0;
+
+ DEBUGOUT("ixl_del_hw_filters: begin\n");
+
+ pf = vsi->back;
+ hw = &pf->hw;
+ dev = pf->dev;
+
+ d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (d == NULL) {
+ printf("del hw filter failed to get memory\n");
+ return;
+ }
+
+ SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
+ if (f->flags & IXL_FILTER_DEL) {
+ e = &d[j]; // a pox on fvl long names :)
+ bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
+ e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
+ e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ /* delete entry from vsi list */
+ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
+ free(f, M_DEVBUF);
+ j++;
+ }
+ if (j == cnt)
+ break;
+ }
+ if (j > 0) {
+ err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
+ if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
+ int sc = 0;
+ for (int i = 0; i < j; i++)
+ sc += (!d[i].error_code);
+ vsi->hw_filters_del += sc;
+ device_printf(dev,
+ "Failed to remove %d/%d filters, aq error %d\n",
+ j - sc, j, hw->aq.asq_last_status);
+ } else
+ vsi->hw_filters_del += j;
+ }
+ free(d, M_DEVBUF);
+
+ DEBUGOUT("ixl_del_hw_filters: end\n");
+ return;
+}
+
+int
+ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ ixl_dbg(pf, IXL_DBG_EN_DIS,
+ "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
+ pf_qidx, vsi_qidx);
+
+ i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
+
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK;
+ wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
+ /* Verify the enable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(10);
+ }
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
+ device_printf(pf->dev, "TX queue %d still disabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ ixl_dbg(pf, IXL_DBG_EN_DIS,
+ "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
+ pf_qidx, vsi_qidx);
+
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK;
+ wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
+ /* Verify the enable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(10);
+ }
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
+ device_printf(pf->dev, "RX queue %d still disabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ int error = 0;
+
+ error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
+ /* Called function already prints error message */
+ if (error)
+ return (error);
+ error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
+ return (error);
+}
+
+/* For PF VSI only */
+int
+ixl_enable_rings(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ error = ixl_enable_ring(pf, &pf->qtag, i);
+ if (error)
+ return (error);
+ }
+
+ return (error);
+}
+
+int
+ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
+ i40e_usec_delay(500);
+
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
+ /* Verify the disable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ i40e_msec_delay(10);
+ }
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
+ device_printf(pf->dev, "TX queue %d still enabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
+ /* Verify the disable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ i40e_msec_delay(10);
+ }
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
+ device_printf(pf->dev, "RX queue %d still enabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ int error = 0;
+
+ error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
+ /* Called function already prints error message */
+ if (error)
+ return (error);
+ error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
+ return (error);
+}
+
+/* For PF VSI only */
+int
+ixl_disable_rings(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ error = ixl_disable_ring(pf, &pf->qtag, i);
+ if (error)
+ return (error);
+ }
+
+ return (error);
+}
+
+/**
+ * ixl_handle_mdd_event
+ *
+ * Called from interrupt handler to identify possibly malicious vfs
+ * (But also detects events from the PF, as well)
+ **/
+void
+ixl_handle_mdd_event(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ bool mdd_detected = false;
+ bool pf_mdd_detected = false;
+ u32 reg;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+ I40E_GL_MDET_TX_EVENT_SHIFT;
+ u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT;
+ device_printf(dev,
+ "Malicious Driver Detection event %d"
+ " on TX queue %d, pf number %d\n",
+ event, queue, pf_num);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+ I40E_GL_MDET_RX_EVENT_SHIFT;
+ u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT;
+ device_printf(dev,
+ "Malicious Driver Detection event %d"
+ " on RX queue %d, pf number %d\n",
+ event, queue, pf_num);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ reg = rd32(hw, I40E_PF_MDET_TX);
+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+ device_printf(dev,
+ "MDD TX event is for this function!");
+ pf_mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_PF_MDET_RX);
+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+ device_printf(dev,
+ "MDD RX event is for this function!");
+ pf_mdd_detected = true;
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ ixl_flush(hw);
+}
+
+void
+ixl_enable_intr(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ if (pf->enable_msix) {
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_enable_queue(hw, que->me);
+ } else
+ ixl_enable_legacy(hw);
+}
+
+void
+ixl_disable_rings_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_disable_queue(hw, que->me);
+}
+
+void
+ixl_disable_intr(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+
+ if (pf->enable_msix)
+ ixl_disable_adminq(hw);
+ else
+ ixl_disable_legacy(hw);
+}
+
+void
+ixl_enable_adminq(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+ ixl_flush(hw);
+}
+
+void
+ixl_disable_adminq(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+ ixl_flush(hw);
+}
+
+void
+ixl_enable_queue(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+}
+
+void
+ixl_disable_queue(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+}
+
+void
+ixl_enable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+void
+ixl_disable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+void
+ixl_update_stats_counters(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_vf *vf;
+
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+ /* Update hw stats */
+ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+ ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+ ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
+
+ ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+ ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+ ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+
+ /* Flow control (LFC) stats */
+ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ /* Packet size stats rx */
+ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ /* Packet size stats tx */
+ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+ pf->stat_offsets_loaded = true;
+ /* End hw stats */
+
+ /* Update vsi stats */
+ ixl_update_vsi_stats(vsi);
+
+ for (int i = 0; i < pf->num_vfs; i++) {
+ vf = &pf->vfs[i];
+ if (vf->vf_flags & VF_FLAG_ENABLED)
+ ixl_update_eth_stats(&pf->vfs[i].vsi);
+ }
+}
+
+int
+ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ bool is_up = false;
+ int error = 0;
+
+ is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
+
+ /* Teardown */
+ if (is_up)
+ ixl_stop(pf);
+ error = i40e_shutdown_lan_hmc(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown LAN HMC failed with code %d\n", error);
+ ixl_disable_adminq(hw);
+ ixl_teardown_adminq_msix(pf);
+ error = i40e_shutdown_adminq(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown Admin queue failed with code %d\n", error);
+
+ /* Setup */
+ error = i40e_init_adminq(hw);
+ if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
+ device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
+ error);
+ }
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
+ error);
+ }
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_adminq(hw);
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (error) {
+ device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ }
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ }
+ if (is_up)
+ ixl_init(pf);
+
+ return (0);
+}
+
+void
+ixl_handle_empr_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int count = 0;
+ u32 reg;
+
+ /* Typically finishes within 3-4 seconds */
+ while (count++ < 100) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT)
+ & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
+ if (reg)
+ i40e_msec_delay(100);
+ else
+ break;
+ }
+ ixl_dbg(pf, IXL_DBG_INFO,
+ "EMPR reset wait count: %d\n", count);
+
+ device_printf(dev, "Rebuilding driver state...\n");
+ ixl_rebuild_hw_structs_after_reset(pf);
+ device_printf(dev, "Rebuilding driver state done.\n");
+
+ atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
+}
+
+/*
+** Tasklet handler for MSIX Adminq interrupts
+** - do outside interrupt since it might sleep
+*/
+void
+ixl_do_adminq(void *context, int pending)
+{
+ struct ixl_pf *pf = context;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_arq_event_info event;
+ i40e_status ret;
+ device_t dev = pf->dev;
+ u32 loop = 0;
+ u16 opcode, result;
+
+ if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
+ /* Flag cleared at end of this function */
+ ixl_handle_empr_reset(pf);
+ return;
+ }
+
+ /* Admin Queue handling */
+ event.buf_len = IXL_AQ_BUF_SZ;
+ event.msg_buf = malloc(event.buf_len,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!event.msg_buf) {
+ device_printf(dev, "%s: Unable to allocate memory for Admin"
+ " Queue event!\n", __func__);
+ return;
+ }
+
+ IXL_PF_LOCK(pf);
+ /* clean and process any events */
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &result);
+ if (ret)
+ break;
+ opcode = LE16_TO_CPU(event.desc.opcode);
+ ixl_dbg(pf, IXL_DBG_AQ,
+ "%s: Admin Queue event: %#06x\n", __func__, opcode);
+ switch (opcode) {
+ case i40e_aqc_opc_get_link_status:
+ ixl_link_event(pf, &event);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+#ifdef PCI_IOV
+ ixl_handle_vf_msg(pf, &event);
+#endif
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ default:
+ break;
+ }
+
+ } while (result && (loop++ < IXL_ADM_LIMIT));
+
+ free(event.msg_buf, M_DEVBUF);
+
+ /*
+ * If there are still messages to process, reschedule ourselves.
+ * Otherwise, re-enable our interrupt.
+ */
+ if (result > 0)
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ else
+ ixl_enable_adminq(hw);
+
+ IXL_PF_UNLOCK(pf);
+}
+
+/**
+ * Update VSI-specific ethernet statistics counters.
+ **/
+void
+ixl_update_eth_stats(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *es;
+ struct i40e_eth_stats *oes;
+ struct i40e_hw_port_stats *nsd;
+ u16 stat_idx = vsi->info.stat_counter_idx;
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+ nsd = &pf->stats;
+
+ /* Gather up the stats that the hw collects */
+ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+
+ ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ vsi->stat_offsets_loaded = true;
+}
+
+void
+ixl_update_vsi_stats(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf;
+ struct ifnet *ifp;
+ struct i40e_eth_stats *es;
+ u64 tx_discards;
+
+ struct i40e_hw_port_stats *nsd;
+
+ pf = vsi->back;
+ ifp = vsi->ifp;
+ es = &vsi->eth_stats;
+ nsd = &pf->stats;
+
+ ixl_update_eth_stats(vsi);
+
+ tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
+ for (int i = 0; i < vsi->num_queues; i++)
+ tx_discards += vsi->queues[i].txr.br->br_drops;
+
+ /* Update ifnet stats */
+ IXL_SET_IPACKETS(vsi, es->rx_unicast +
+ es->rx_multicast +
+ es->rx_broadcast);
+ IXL_SET_OPACKETS(vsi, es->tx_unicast +
+ es->tx_multicast +
+ es->tx_broadcast);
+ IXL_SET_IBYTES(vsi, es->rx_bytes);
+ IXL_SET_OBYTES(vsi, es->tx_bytes);
+ IXL_SET_IMCASTS(vsi, es->rx_multicast);
+ IXL_SET_OMCASTS(vsi, es->tx_multicast);
+
+ IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
+ nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
+ nsd->rx_jabber);
+ IXL_SET_OERRORS(vsi, es->tx_errors);
+ IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
+ IXL_SET_OQDROPS(vsi, tx_discards);
+ IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+ IXL_SET_COLLISIONS(vsi, 0);
+}
+
+/**
+ * Reset all of the stats for the given pf
+ **/
+void
+ixl_pf_reset_stats(struct ixl_pf *pf)
+{
+ bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
+ bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
+ pf->stat_offsets_loaded = false;
+}
+
+/**
+ * Resets all stats of the given vsi
+ **/
+void
+ixl_vsi_reset_stats(struct ixl_vsi *vsi)
+{
+ bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * Read and update a 48 bit stat from the hw
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+void
+ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
+ new_data = rd64(hw, loreg);
+#else
+ /*
+ * Use two rd32's instead of one rd64; FreeBSD versions before
+ * 10 don't support 64-bit bus reads/writes.
+ */
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+#endif
+
+ if (!offset_loaded)
+ *offset = new_data;
+ if (new_data >= *offset)
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * Read and update a 32 bit stat from the hw
+ **/
+void
+ixl_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (new_data >= *offset)
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+void
+ixl_add_device_sysctls(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ struct sysctl_oid *debug_node;
+ struct sysctl_oid_list *debug_list;
+
+ /* Set up sysctls */
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_current_speed, "A", "Current Port Speed");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_unallocated_queues, "I",
+ "Queues not allocated to a PF or VF");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_pf_tx_itr, "I",
+ "Immediately set TX ITR value for all queues");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_pf_rx_itr, "I",
+ "Immediately set RX ITR value for all queues");
+
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
+ &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
+
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
+ &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
+
+ /* Add sysctls meant to print debug information, but don't list them
+ * in "sysctl -a" output. */
+ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
+ OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
+ debug_list = SYSCTL_CHILDREN(debug_node);
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
+ &pf->hw.debug_mask, 0, "Shared code debug message level");
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "core_debug_mask", CTLFLAG_RW,
+ &pf->dbg_mask, 0, "Non-hared code debug message level");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
+#ifdef PCI_IOV
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
+ 0, "PF/VF Virtual Channel debug level");
+#endif
+}
+
+/*
+ * Primarily for finding out how many queues can be assigned to VFs,
+ * at runtime.
+ */
+static int
+ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int queues;
+
+ IXL_PF_LOCK(pf);
+ queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
+ IXL_PF_UNLOCK(pf);
+
+ return sysctl_handle_int(oidp, NULL, queues, req);
+}
+
+/*
+** Set flow control using sysctl:
+** 0 - off
+** 1 - rx pause
+** 2 - tx pause
+** 3 - full
+*/
+int
+ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_fc, error = 0;
+ enum i40e_status_code aq_error = 0;
+ u8 fc_aq_err = 0;
+
+ /* Get request */
+ requested_fc = pf->fc;
+ error = sysctl_handle_int(oidp, &requested_fc, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_fc < 0 || requested_fc > 3) {
+ device_printf(dev,
+ "Invalid fc mode; valid modes are 0 through 3\n");
+ return (EINVAL);
+ }
+
+ /* Set fc ability for port */
+ hw->fc.requested_mode = requested_fc;
+ aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error setting new fc mode %d; fc_err %#x\n",
+ __func__, aq_error, fc_aq_err);
+ return (EIO);
+ }
+ pf->fc = requested_fc;
+
+ /* Get new link state */
+ i40e_msec_delay(250);
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+
+ return (0);
+}
+
+int
+ixl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0, index = 0;
+
+ char *speeds[] = {
+ "Unknown",
+ "100M",
+ "1G",
+ "10G",
+ "40G",
+ "20G"
+ };
+
+ ixl_update_link_status(pf);
+
+ switch (hw->phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ index = 1;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ index = 2;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ index = 3;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ index = 4;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ index = 5;
+ break;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ index = 0;
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, speeds[index],
+ strlen(speeds[index]), req);
+ return (error);
+}
+
+int
+ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code aq_error = 0;
+
+ /* Get current capability information */
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, FALSE, &abilities, NULL);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error getting phy capabilities %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EIO);
+ }
+
+ /* Prepare new config */
+ bzero(&config, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities
+ | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ /* Translate into aq cmd link_speed */
+ if (speeds & 0x10)
+ config.link_speed |= I40E_LINK_SPEED_40GB;
+ if (speeds & 0x8)
+ config.link_speed |= I40E_LINK_SPEED_20GB;
+ if (speeds & 0x4)
+ config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (speeds & 0x2)
+ config.link_speed |= I40E_LINK_SPEED_1GB;
+ if (speeds & 0x1)
+ config.link_speed |= I40E_LINK_SPEED_100MB;
+
+ /* Do aq command & restart link */
+ aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error setting new phy config %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EAGAIN);
+ }
+
+ /*
+ ** This seems a bit heavy handed, but we
+ ** need to get a reinit on some devices
+ */
+ IXL_PF_LOCK(pf);
+ ixl_stop_locked(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+
+ return (0);
+}
+
+/*
+** Control link advertise speed:
+** Flags:
+** 0x1 - advertise 100 Mb
+** 0x2 - advertise 1G
+** 0x4 - advertise 10G
+** 0x8 - advertise 20G
+** 0x10 - advertise 40G
+**
+** Set to 0 to disable link
+*/
+int
+ixl_set_advertise(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_ls = 0;
+ int error = 0;
+
+ /* Read in new mode */
+ requested_ls = pf->advertised_speed;
+ error = sysctl_handle_int(oidp, &requested_ls, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ /* Check for sane value */
+ if (requested_ls > 0x10) {
+ device_printf(dev, "Invalid advertised speed; "
+ "valid modes are 0x1 through 0x10\n");
+ return (EINVAL);
+ }
+ /* Then check for validity based on adapter type */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ /* 1G BaseT */
+ if (requested_ls & ~(0x2)) {
+ device_printf(dev,
+ "Only 1G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ /* 10G BaseT */
+ if (requested_ls & ~(0x7)) {
+ device_printf(dev,
+ "Only 100M/1G/10G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ /* 20G */
+ if (requested_ls & ~(0xE)) {
+ device_printf(dev,
+ "Only 1G/10G/20G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ /* 40G */
+ if (requested_ls & ~(0x10)) {
+ device_printf(dev,
+ "Only 40G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ default:
+ /* 10G (1G) */
+ if (requested_ls & ~(0x6)) {
+ device_printf(dev,
+ "Only 1/10G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ }
+
+ /* Exit if no change */
+ if (pf->advertised_speed == requested_ls)
+ return (0);
+
+ error = ixl_set_advertised_speeds(pf, requested_ls);
+ if (error)
+ return (error);
+
+ pf->advertised_speed = requested_ls;
+ ixl_update_link_status(pf);
+ return (0);
+}
+
+/*
+** Get the width and transaction speed of
+** the bus this adapter is plugged into.
+*/
+void
+ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
+{
+ u16 link;
+ u32 offset;
+
+ /* Some devices don't use PCIE */
+ if (hw->mac.type == I40E_MAC_X722)
+ return;
+
+ /* Read PCI Express Capabilities Link Status Register */
+ pci_find_cap(dev, PCIY_EXPRESS, &offset);
+ link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
+
+ /* Fill out hw struct with PCIE info */
+ i40e_set_pci_config_data(hw, link);
+
+ /* Use info to print out bandwidth messages */
+ device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+ ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
+ (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
+ (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
+ (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
+ ("Unknown"));
+
+ if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
+ (hw->bus.speed < i40e_bus_speed_8000)) {
+ device_printf(dev, "PCI-Express bandwidth available"
+ " for this device may be insufficient for"
+ " optimal performance.\n");
+ device_printf(dev, "For optimal performance, a x8 "
+ "PCIE Gen3 slot is required.\n");
+ }
+}
+
+static int
+ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ struct sbuf *sbuf;
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ ixl_nvm_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return 0;
+}
+
+void
+ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
+{
+ if ((nvma->command == I40E_NVM_READ) &&
+ ((nvma->config & 0xFF) == 0xF) &&
+ (((nvma->config & 0xF00) >> 8) == 0xF) &&
+ (nvma->offset == 0) &&
+ (nvma->data_size == 1)) {
+ // device_printf(dev, "- Get Driver Status Command\n");
+ }
+ else if (nvma->command == I40E_NVM_READ) {
+
+ }
+ else {
+ switch (nvma->command) {
+ case 0xB:
+ device_printf(dev, "- command: I40E_NVM_READ\n");
+ break;
+ case 0xC:
+ device_printf(dev, "- command: I40E_NVM_WRITE\n");
+ break;
+ default:
+ device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
+ break;
+ }
+
+ device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
+ device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
+ device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
+ device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
+ }
+}
+
+int
+ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_nvm_access *nvma;
+ device_t dev = pf->dev;
+ enum i40e_status_code status = 0;
+ int perrno;
+
+ DEBUGFUNC("ixl_handle_nvmupd_cmd");
+
+ /* Sanity checks */
+ if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
+ ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
+ __func__);
+ device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
+ __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
+ device_printf(dev, "%s: data pointer: %p\n", __func__,
+ ifd->ifd_data);
+ return (EINVAL);
+ }
+
+ nvma = (struct i40e_nvm_access *)ifd->ifd_data;
+
+ if (pf->dbg_mask & IXL_DBG_NVMUPD)
+ ixl_print_nvm_cmd(dev, nvma);
+
+ if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
+ int count = 0;
+ while (count++ < 100) {
+ i40e_msec_delay(100);
+ if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
+ break;
+ }
+ }
+
+ if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
+ IXL_PF_LOCK(pf);
+ status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
+ IXL_PF_UNLOCK(pf);
+ } else {
+ perrno = -EBUSY;
+ }
+
+ if (status)
+ device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
+ status, perrno);
+
+ /*
+ * -EPERM is actually ERESTART, which the kernel interprets as it needing
+ * to run this ioctl again. So use -EACCES for -EPERM instead.
+ */
+ if (perrno == -EPERM)
+ return (-EACCES);
+ else
+ return (perrno);
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+void
+ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ INIT_DEBUGOUT("ixl_media_status: begin");
+ IXL_PF_LOCK(pf);
+
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ ixl_update_link_status(pf);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!pf->link_up) {
+ IXL_PF_UNLOCK(pf);
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ /* Hardware always does full-duplex */
+ ifmr->ifm_active |= IFM_FDX;
+
+ switch (hw->phy.link_info.phy_type) {
+ /* 100 M */
+ case I40E_PHY_TYPE_100BASE_TX:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ /* 1 G */
+ case I40E_PHY_TYPE_1000BASE_T:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case I40E_PHY_TYPE_1000BASE_SX:
+ ifmr->ifm_active |= IFM_1000_SX;
+ break;
+ case I40E_PHY_TYPE_1000BASE_LX:
+ ifmr->ifm_active |= IFM_1000_LX;
+ break;
+ case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ /* 10 G */
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
+ case I40E_PHY_TYPE_10GBASE_SR:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_LR:
+ ifmr->ifm_active |= IFM_10G_LR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ ifmr->ifm_active |= IFM_10G_T;
+ break;
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ /* 40 G */
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ifmr->ifm_active |= IFM_40G_CR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ifmr->ifm_active |= IFM_40G_LR4;
+ break;
+ case I40E_PHY_TYPE_XLAUI:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ ifmr->ifm_active |= IFM_1000_SGMII;
+ break;
+ /* ERJ: What's the difference between these? */
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ifmr->ifm_active |= IFM_10G_CR1;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ifmr->ifm_active |= IFM_10G_KX4;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case I40E_PHY_TYPE_SFI:
+ ifmr->ifm_active |= IFM_10G_SFI;
+ break;
+ /* Our single 20G media type */
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ ifmr->ifm_active |= IFM_20G_KR2;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ifmr->ifm_active |= IFM_40G_KR4;
+ break;
+ case I40E_PHY_TYPE_XLPPI:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ ifmr->ifm_active |= IFM_40G_XLPPI;
+ break;
+ /* Unknown to driver */
+ default:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
+ }
+ /* Report flow control status as well */
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+
+ IXL_PF_UNLOCK(pf);
+}
+
+void
+ixl_init(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ /*
+ * If the aq is dead here, it probably means something outside of the driver
+ * did something to the adapter, like a PF reset.
+ * So rebuild the driver's state here if that occurs.
+ */
+ if (!i40e_check_asq_alive(&pf->hw)) {
+ device_printf(dev, "Admin Queue is down; resetting...\n");
+ IXL_PF_LOCK(pf);
+ ixl_teardown_hw_structs(pf);
+ ixl_reset(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+
+ /*
+ * Set up LAN queue interrupts here.
+ * Kernel interrupt setup functions cannot be called while holding a lock,
+ * so this is done outside of init_locked().
+ */
+ if (pf->msix > 1) {
+ /* Teardown existing interrupts, if they exist */
+ ixl_teardown_queue_msix(vsi);
+ ixl_free_queue_tqs(vsi);
+ /* Then set them up again */
+ error = ixl_setup_queue_msix(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ error = ixl_setup_queue_tqs(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
+ } else
+ // possibly broken
+ error = ixl_assign_vsi_legacy(pf);
+ if (error) {
+ device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
+ return;
+ }
+
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+ * NOTE: Fortville does not support forcing media speeds. Instead,
+ * use the set_advertise sysctl to set the speeds Fortville
+ * will advertise or be allowed to operate at.
+ */
+int
+ixl_media_change(struct ifnet * ifp)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ifmedia *ifm = &vsi->media;
+
+ INIT_DEBUGOUT("ixl_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
+
+ return (ENODEV);
+}
+
+/*********************************************************************
+ * Ioctl entry point
+ *
+ * ixl_ioctl is called when the user wants to configure the
+ * interface.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+int
+ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = vsi->back;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifdrv *ifd = (struct ifdrv *)data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ bool avoid_reset = FALSE;
+#endif
+ int error = 0;
+
+ switch (command) {
+
+ case SIOCSIFADDR:
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixl_init(pf);
+#ifdef INET
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
+#endif
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+#endif
+ case SIOCSIFMTU:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (ifr->ifr_mtu > IXL_MAX_FRAME -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
+ error = EINVAL;
+ } else {
+ IXL_PF_LOCK(pf);
+ ifp->if_mtu = ifr->ifr_mtu;
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+ IXL_PF_LOCK(pf);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ pf->if_flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ ixl_set_promisc(vsi);
+ }
+ } else {
+ IXL_PF_UNLOCK(pf);
+ ixl_init(pf);
+ IXL_PF_LOCK(pf);
+ }
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_UNLOCK(pf);
+ ixl_stop(pf);
+ IXL_PF_LOCK(pf);
+ }
+ }
+ pf->if_flags = ifp->if_flags;
+ IXL_PF_UNLOCK(pf);
+ break;
+ case SIOCSDRVSPEC:
+ case SIOCGDRVSPEC:
+ IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
+ "Info)\n");
+
+ /* NVM update command */
+ if (ifd->ifd_cmd == I40E_NVM_ACCESS)
+ error = ixl_handle_nvmupd_cmd(pf, ifd);
+ else
+ error = EINVAL;
+ break;
+ case SIOCADDMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+ ixl_add_multi(vsi);
+ ixl_enable_intr(vsi);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCDELMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+ ixl_del_multi(vsi);
+ ixl_enable_intr(vsi);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ case SIOCGIFXMEDIA:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
+ break;
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+
+ ixl_cap_txcsum_tso(vsi, ifp, mask);
+
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_VLAN_HWFILTER)
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+ VLAN_CAPABILITIES(ifp);
+
+ break;
+ }
+
+ default:
+ IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static char *
+ixl_phy_type_string(u32 bit_pos)
+{
+ static char * phy_types_str[32] = {
+ "SGMII",
+ "1000BASE-KX",
+ "10GBASE-KX4",
+ "10GBASE-KR",
+ "40GBASE-KR4",
+ "XAUI",
+ "XFI",
+ "SFI",
+ "XLAUI",
+ "XLPPI",
+ "40GBASE-CR4",
+ "10GBASE-CR1",
+ "Reserved (12)",
+ "Reserved (13)",
+ "Reserved (14)",
+ "Reserved (15)",
+ "Reserved (16)",
+ "100BASE-TX",
+ "1000BASE-T",
+ "10GBASE-T",
+ "10GBASE-SR",
+ "10GBASE-LR",
+ "10GBASE-SFP+Cu",
+ "10GBASE-CR1",
+ "40GBASE-CR4",
+ "40GBASE-SR4",
+ "40GBASE-LR4",
+ "1000BASE-SX",
+ "1000BASE-LX",
+ "1000BASE-T Optical",
+ "20GBASE-KR2",
+ "Reserved (31)"
+ };
+
+ if (bit_pos > 31) return "Invalid";
+ return phy_types_str[bit_pos];
+}
+
+
+static int
+ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_link_status link_status;
+ enum i40e_status_code status;
+ struct sbuf *buf;
+ int error = 0;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ status = i40e_aq_get_link_info(hw, true, &link_status, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_get_link_info() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return (EIO);
+ }
+
+ sbuf_printf(buf, "\n"
+ "PHY Type : 0x%02x<%s>\n"
+ "Speed : 0x%02x\n"
+ "Link info: 0x%02x\n"
+ "AN info : 0x%02x\n"
+ "Ext info : 0x%02x\n"
+ "Max Frame: %d\n"
+ "Pacing : 0x%02x\n"
+ "CRC En? : %s\n",
+ link_status.phy_type, ixl_phy_type_string(link_status.phy_type),
+ link_status.link_speed,
+ link_status.link_info, link_status.an_info,
+ link_status.ext_info, link_status.max_frame_size,
+ link_status.pacing,
+ (link_status.crc_enable) ? "Yes" : "No");
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+static int
+ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct sbuf *buf;
+ int error = 0;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ status = i40e_aq_get_phy_capabilities(hw,
+ TRUE, FALSE, &abilities, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return (EIO);
+ }
+
+ sbuf_printf(buf, "\n"
+ "PHY Type : %08x",
+ abilities.phy_type);
+
+ if (abilities.phy_type != 0) {
+ sbuf_printf(buf, "<");
+ for (int i = 0; i < 32; i++)
+ if ((1 << i) & abilities.phy_type)
+ sbuf_printf(buf, "%s,", ixl_phy_type_string(i));
+ sbuf_printf(buf, ">\n");
+ }
+
+ sbuf_printf(buf,
+ "Speed : %02x\n"
+ "Abilities: %02x\n"
+ "EEE cap : %04x\n"
+ "EEER reg : %08x\n"
+ "D3 Lpan : %02x\n"
+ "ID : %02x %02x %02x %02x\n"
+ "ModType : %02x %02x %02x",
+ abilities.link_speed,
+ abilities.abilities, abilities.eee_capability,
+ abilities.eeer_val, abilities.d3_lpan,
+ abilities.phy_id[0], abilities.phy_id[1],
+ abilities.phy_id[2], abilities.phy_id[3],
+ abilities.module_type[0], abilities.module_type[1],
+ abilities.module_type[2]);
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+static int
+ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_mac_filter *f;
+ char *buf, *buf_i;
+
+ int error = 0;
+ int ftl_len = 0;
+ int ftl_counter = 0;
+ int buf_len = 0;
+ int entry_len = 42;
+
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ ftl_len++;
+ }
+
+ if (ftl_len < 1) {
+ sysctl_handle_string(oidp, "(none)", 6, req);
+ return (0);
+ }
+
+ buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
+ buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
+
+ sprintf(buf_i++, "\n");
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ sprintf(buf_i,
+ MAC_FORMAT ", vlan %4d, flags %#06x",
+ MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
+ buf_i += entry_len;
+ /* don't print '\n' for last entry */
+ if (++ftl_counter != ftl_len) {
+ sprintf(buf_i, "\n");
+ buf_i++;
+ }
+ }
+
+ error = sysctl_handle_string(oidp, buf, strlen(buf), req);
+ if (error)
+ printf("sysctl error: %d\n", error);
+ free(buf, M_DEVBUF);
+ return error;
+}
+
+#define IXL_SW_RES_SIZE 0x14
+int
+ixl_res_alloc_cmp(const void *a, const void *b)
+{
+ const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
+ one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
+ two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
+
+ return ((int)one->resource_type - (int)two->resource_type);
+}
+
+/*
+ * Longest string length: 25
+ */
+char *
+ixl_switch_res_type_string(u8 type)
+{
+ char * ixl_switch_res_type_strings[0x14] = {
+ "VEB",
+ "VSI",
+ "Perfect Match MAC address",
+ "S-tag",
+ "(Reserved)",
+ "Multicast hash entry",
+ "Unicast hash entry",
+ "VLAN",
+ "VSI List entry",
+ "(Reserved)",
+ "VLAN Statistic Pool",
+ "Mirror Rule",
+ "Queue Set",
+ "Inner VLAN Forward filter",
+ "(Reserved)",
+ "Inner MAC",
+ "IP",
+ "GRE/VN1 Key",
+ "VN2 Key",
+ "Tunneling Port"
+ };
+
+ if (type < 0x14)
+ return ixl_switch_res_type_strings[type];
+ else
+ return "(Reserved)";
+}
+
+static int
+ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ enum i40e_status_code status;
+ int error = 0;
+
+ u8 num_entries;
+ struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ bzero(resp, sizeof(resp));
+ status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
+ resp,
+ IXL_SW_RES_SIZE,
+ NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: get_switch_resource_alloc() error %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return (error);
+ }
+
+ /* Sort entries by type for display */
+ qsort(resp, num_entries,
+ sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
+ &ixl_res_alloc_cmp);
+
+ sbuf_cat(buf, "\n");
+ sbuf_printf(buf, "# of entries: %d\n", num_entries);
+ sbuf_printf(buf,
+ " Type | Guaranteed | Total | Used | Un-allocated\n"
+ " | (this) | (all) | (this) | (all) \n");
+ for (int i = 0; i < num_entries; i++) {
+ sbuf_printf(buf,
+ "%25s | %10d %5d %6d %12d",
+ ixl_switch_res_type_string(resp[i].resource_type),
+ resp[i].guaranteed,
+ resp[i].total,
+ resp[i].used,
+ resp[i].total_unalloced);
+ if (i < num_entries - 1)
+ sbuf_cat(buf, "\n");
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+/*
+** Caller must init and delete sbuf; this function will clear and
+** finish it for caller.
+**
+** XXX: Cannot use the SEID for this, since there is no longer a
+** fixed mapping between SEID and element type.
+*/
+char *
+ixl_switch_element_string(struct sbuf *s,
+ struct i40e_aqc_switch_config_element_resp *element)
+{
+ sbuf_clear(s);
+
+ switch (element->element_type) {
+ case I40E_AQ_SW_ELEM_TYPE_MAC:
+ sbuf_printf(s, "MAC %3d", element->element_info);
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_PF:
+ sbuf_printf(s, "PF %3d", element->element_info);
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_VF:
+ sbuf_printf(s, "VF %3d", element->element_info);
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_EMP:
+ sbuf_cat(s, "EMP");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_BMC:
+ sbuf_cat(s, "BMC");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_PV:
+ sbuf_cat(s, "PV");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_VEB:
+ sbuf_cat(s, "VEB");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_PA:
+ sbuf_cat(s, "PA");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_VSI:
+ sbuf_printf(s, "VSI %3d", element->element_info);
+ break;
+ default:
+ sbuf_cat(s, "?");
+ break;
+ }
+
+ sbuf_finish(s);
+ return sbuf_data(s);
+}
+
+static int
+ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ struct sbuf *nmbuf;
+ enum i40e_status_code status;
+ int error = 0;
+ u16 next = 0;
+ u8 aq_buf[I40E_AQ_LARGE_BUF];
+
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ status = i40e_aq_get_switch_config(hw, sw_config,
+ sizeof(aq_buf), &next, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: aq_get_switch_config() error %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return error;
+ }
+ if (next)
+ device_printf(dev, "%s: TODO: get more config with SEID %d\n",
+ __func__, next);
+
+ nmbuf = sbuf_new_auto();
+ if (!nmbuf) {
+ device_printf(dev, "Could not allocate sbuf for name output.\n");
+ sbuf_delete(buf);
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ /* Assuming <= 255 elements in switch */
+ sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
+ sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
+ /* Exclude:
+ ** Revision -- all elements are revision 1 for now
+ */
+ sbuf_printf(buf,
+ "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
+ " | | | (uplink)\n");
+ for (int i = 0; i < sw_config->header.num_reported; i++) {
+ // "%4d (%8s) | %8s %8s %#8x",
+ sbuf_printf(buf, "%4d", sw_config->element[i].seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
+ &sw_config->element[i]));
+ sbuf_cat(buf, " | ");
+ sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
+ if (i < sw_config->header.num_reported - 1)
+ sbuf_cat(buf, "\n");
+ }
+ sbuf_delete(nmbuf);
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+ enum i40e_status_code status;
+ u32 reg;
+
+ struct i40e_aqc_get_set_rss_key_data key_data;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ if (hw->mac.type == I40E_MAC_X722) {
+ bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
+ status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
+ if (status)
+ device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
+ } else {
+ for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
+ reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+ sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ }
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+ enum i40e_status_code status;
+ u8 hlut[512];
+ u32 reg;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ if (hw->mac.type == I40E_MAC_X722) {
+ bzero(hlut, sizeof(hlut));
+ status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
+ if (status)
+ device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_printf(buf, "%512D", (u_char *)hlut, "");
+ } else {
+ for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
+ reg = rd32(hw, I40E_PFQF_HLUT(i));
+ sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ }
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
diff --git a/sys/dev/ixl/ixl_pf_qmgr.c b/sys/dev/ixl/ixl_pf_qmgr.c
new file mode 100644
index 000000000000..f2842e584dc4
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_qmgr.c
@@ -0,0 +1,308 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixl_pf_qmgr.h"
+
+static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num);
+
+int
+ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues)
+{
+ if (num_queues < 1)
+ return (EINVAL);
+
+ qmgr->num_queues = num_queues;
+ qmgr->qinfo = malloc(num_queues * sizeof(struct ixl_pf_qmgr_qinfo),
+ M_IXL, M_ZERO | M_WAITOK);
+ if (qmgr->qinfo == NULL)
+ return ENOMEM;
+
+ return (0);
+}
+
+int
+ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag)
+{
+ int i;
+ int avail;
+ int block_start;
+ u16 alloc_size;
+
+ if (qtag == NULL || num < 1)
+ return (EINVAL);
+
+ /* We have to allocate in power-of-two chunks, so get next power of two */
+ alloc_size = (u16)next_power_of_two(num);
+
+ /* Don't try if there aren't enough queues */
+ avail = ixl_pf_qmgr_get_num_free(qmgr);
+ if (avail < alloc_size)
+ return (ENOSPC);
+
+ block_start = ixl_pf_qmgr_find_free_contiguous_block(qmgr, alloc_size);
+ if (block_start < 0)
+ return (ENOSPC);
+
+ /* Mark queues as allocated */
+ for (i = block_start; i < block_start + alloc_size; i++)
+ qmgr->qinfo[i].allocated = true;
+
+ bzero(qtag, sizeof(*qtag));
+ qtag->qmgr = qmgr;
+ qtag->type = IXL_PF_QALLOC_CONTIGUOUS;
+ qtag->qidx[0] = block_start;
+ qtag->num_allocated = num;
+ qtag->num_active = alloc_size;
+
+ return (0);
+}
+
+/*
+ * NB: indices is u16 because this is the queue index width used in the Add VSI AQ command
+ */
+int
+ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag)
+{
+ int i;
+ int avail, count = 0;
+ u16 alloc_size;
+
+ if (qtag == NULL || num < 1 || num > 16)
+ return (EINVAL);
+
+ /* We have to allocate in power-of-two chunks, so get next power of two */
+ alloc_size = (u16)next_power_of_two(num);
+
+ avail = ixl_pf_qmgr_get_num_free(qmgr);
+ if (avail < alloc_size)
+ return (ENOSPC);
+
+ bzero(qtag, sizeof(*qtag));
+ qtag->qmgr = qmgr;
+ qtag->type = IXL_PF_QALLOC_SCATTERED;
+ qtag->num_active = num;
+ qtag->num_allocated = alloc_size;
+
+ for (i = 0; i < qmgr->num_queues; i++) {
+ if (!qmgr->qinfo[i].allocated) {
+ qtag->qidx[count] = i;
+ count++;
+ qmgr->qinfo[i].allocated = true;
+ if (count == alloc_size)
+ return (0);
+ }
+ }
+
+ // Shouldn't get here
+ return (EDOOFUS);
+}
+
+int
+ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag)
+{
+ u16 i, qidx;
+
+ if (qtag == NULL)
+ return (EINVAL);
+
+ if (qtag->type == IXL_PF_QALLOC_SCATTERED) {
+ for (i = 0; i < qtag->num_allocated; i++) {
+ qidx = qtag->qidx[i];
+ bzero(&qmgr->qinfo[qidx], sizeof(qmgr->qinfo[qidx]));
+ }
+ } else {
+ u16 first_index = qtag->qidx[0];
+ for (i = first_index; i < first_index + qtag->num_allocated; i++)
+ bzero(&qmgr->qinfo[i], sizeof(qmgr->qinfo[qidx]));
+ }
+
+ qtag->qmgr = NULL;
+ return (0);
+}
+
+int
+ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr)
+{
+ return (qmgr->num_queues);
+}
+
+/*
+ * ERJ: This assumes the info array isn't longer than INT_MAX.
+ * This assumption might cause a y3k bug or something, I'm sure.
+ */
+int
+ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr)
+{
+ int count = 0;
+
+ for (int i = 0; i < qmgr->num_queues; i++) {
+ if (!qmgr->qinfo[i].allocated)
+ count++;
+ }
+
+ return (count);
+}
+
+int
+ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start)
+{
+ int i;
+
+ if (start > qmgr->num_queues - 1)
+ return (-EINVAL);
+
+ for (i = start; i < qmgr->num_queues; i++) {
+ if (qmgr->qinfo[i].allocated)
+ continue;
+ else
+ return (i);
+ }
+
+ // No free queues
+ return (-ENOSPC);
+}
+
+void
+ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr)
+{
+ free(qmgr->qinfo, M_IXL);
+ qmgr->qinfo = NULL;
+}
+
+void
+ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ qmgr->qinfo[pf_qidx].tx_enabled = true;
+ else
+ qmgr->qinfo[pf_qidx].rx_enabled = true;
+}
+
+void
+ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ qmgr->qinfo[pf_qidx].tx_enabled = false;
+ else
+ qmgr->qinfo[pf_qidx].rx_enabled = false;
+}
+
+void
+ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ qmgr->qinfo[pf_qidx].tx_configured = true;
+ else
+ qmgr->qinfo[pf_qidx].rx_configured = true;
+}
+
+bool
+ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ return (qmgr->qinfo[pf_qidx].tx_enabled);
+ else
+ return (qmgr->qinfo[pf_qidx].rx_enabled);
+}
+
+bool
+ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ return (qmgr->qinfo[pf_qidx].tx_configured);
+ else
+ return (qmgr->qinfo[pf_qidx].rx_configured);
+}
+
+u16
+ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index)
+{
+ MPASS(index < qtag->num_allocated);
+
+ if (qtag->type == IXL_PF_QALLOC_CONTIGUOUS)
+ return qtag->qidx[0] + index;
+ else
+ return qtag->qidx[index];
+}
+
+/* Static Functions */
+
+static int
+ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num)
+{
+ int i;
+ int count = 0;
+ bool block_started = false;
+ int possible_start;
+
+ for (i = 0; i < qmgr->num_queues; i++) {
+ if (!qmgr->qinfo[i].allocated) {
+ if (!block_started) {
+ block_started = true;
+ possible_start = i;
+ }
+ count++;
+ if (count == num)
+ return (possible_start);
+ } else { /* this queue is already allocated */
+ block_started = false;
+ count = 0;
+ }
+ }
+
+ /* Can't find a contiguous block of the requested size */
+ return (-1);
+}
+
diff --git a/sys/dev/ixl/ixl_pf_qmgr.h b/sys/dev/ixl/ixl_pf_qmgr.h
new file mode 100644
index 000000000000..d6ad431bd605
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_qmgr.h
@@ -0,0 +1,109 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixl_pf.h"
+
+#ifndef _IXL_PF_QMGR_H_
+#define _IXL_PF_QMGR_H_
+
+/*
+ * Primarily manages the queues that need to be allocated to VSIs.
+ *
+ * Cardinality: There should only be one of these in a PF.
+ * Lifetime: Created and initialized in attach(); destroyed in detach().
+ */
+
+#define IXL_MAX_SCATTERED_QUEUES 16
+#define IXL_MAX_CONTIGUOUS_QUEUES_XL710 64
+#define IXL_MAX_CONTIGUOUS_QUEUES_X722 128
+
+/* Structures */
+
+/* Manager */
+struct ixl_pf_qmgr_qinfo {
+ bool allocated;
+ bool tx_enabled;
+ bool rx_enabled;
+ bool tx_configured;
+ bool rx_configured;
+};
+
+struct ixl_pf_qmgr {
+ u16 num_queues;
+ struct ixl_pf_qmgr_qinfo *qinfo;
+};
+
+/* Tag */
+enum ixl_pf_qmgr_qalloc_type {
+ IXL_PF_QALLOC_CONTIGUOUS,
+ IXL_PF_QALLOC_SCATTERED
+};
+
+struct ixl_pf_qtag {
+ struct ixl_pf_qmgr *qmgr;
+ enum ixl_pf_qmgr_qalloc_type type;
+ u16 qidx[IXL_MAX_SCATTERED_QUEUES];
+ u16 num_allocated;
+ u16 num_active;
+};
+
+/* Public manager functions */
+int ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues);
+void ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr);
+
+int ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr);
+int ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start);
+int ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr);
+
+/* Allocate queues for a VF VSI */
+int ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag);
+/* Allocate queues for the LAN VSIs, or X722 VF VSIs */
+int ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag);
+/* Release a queue allocation */
+int ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag);
+
+/* Help manage queues used in VFs */
+/* Typically hardware refers to RX as 0 and TX as 1, so continue that convention here */
+void ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+
+/* Public tag functions */
+u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index);
+
+#endif /* _IXL_PF_QMGR_H_ */
+
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index d3aa7bf7b259..5cf54fa9410a 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -35,7 +35,7 @@
/*
** IXL driver TX/RX Routines:
** This was seperated to allow usage by
-** both the BASE and the VF drivers.
+** both the PF and VF drivers.
*/
#ifndef IXL_STANDALONE_BUILD
@@ -58,15 +58,37 @@ static int ixl_tx_setup_offload(struct ixl_queue *,
struct mbuf *, u32 *, u32 *);
static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
-static __inline void ixl_rx_discard(struct rx_ring *, int);
-static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
+static inline void ixl_rx_discard(struct rx_ring *, int);
+static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
+static inline bool ixl_tso_detect_sparse(struct mbuf *mp);
+static int ixl_tx_setup_offload(struct ixl_queue *que,
+ struct mbuf *mp, u32 *cmd, u32 *off);
+static inline u32 ixl_get_tx_head(struct ixl_queue *que);
+
#ifdef DEV_NETMAP
#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
/*
+ * @key key is saved into this parameter
+ */
+void
+ixl_get_default_rss_key(u32 *key)
+{
+ MPASS(key != NULL);
+
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
+ 0x0, 0x0, 0x0};
+
+ bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
+}
+
+/*
** Multiqueue Transmit driver
*/
int
@@ -98,13 +120,6 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
i = m->m_pkthdr.flowid % vsi->num_queues;
} else
i = curcpu % vsi->num_queues;
- /*
- ** This may not be perfect, but until something
- ** better comes along it will keep from scheduling
- ** on stalled queues.
- */
- if (((1 << i) & vsi->active_queues) == 0)
- i = ffsl(vsi->active_queues);
que = &vsi->queues[i];
txr = &que->txr;
@@ -239,7 +254,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
struct ixl_tx_buf *buf;
struct i40e_tx_desc *txd = NULL;
struct mbuf *m_head, *m;
- int i, j, error, nsegs, maxsegs;
+ int i, j, error, nsegs;
int first, last = 0;
u16 vtag = 0;
u32 cmd, off;
@@ -259,12 +274,10 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
buf = &txr->buffers[first];
map = buf->map;
tag = txr->tx_tag;
- maxsegs = IXL_MAX_TX_SEGS;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
/* Use larger mapping for TSO */
tag = txr->tso_tag;
- maxsegs = IXL_MAX_TSO_SEGS;
if (ixl_tso_detect_sparse(m_head)) {
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
@@ -299,19 +312,19 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == ENOMEM) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
}
} else if (error == ENOMEM) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
@@ -804,6 +817,7 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
+ /* ERJ: this must not be less than 64 */
mss = mp->m_pkthdr.tso_segsz;
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
@@ -1374,7 +1388,7 @@ ixl_free_que_rx(struct ixl_queue *que)
return;
}
-static __inline void
+static inline void
ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
{
@@ -1405,7 +1419,7 @@ ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
}
-static __inline void
+static inline void
ixl_rx_discard(struct rx_ring *rxr, int i)
{
struct ixl_rx_buf *rbuf;
@@ -1532,7 +1546,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
- u32 rsc, status, error;
+ u32 status, error;
u16 hlen, plen, vtag;
u64 qword;
u8 ptype;
@@ -1565,7 +1579,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
count--;
sendmp = NULL;
nbuf = NULL;
- rsc = 0;
cur->wb.qword1.status_error_len = 0;
rbuf = &rxr->buffers[i];
mh = rbuf->m_head;
@@ -1673,10 +1686,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
sendmp = mp;
sendmp->m_flags |= M_PKTHDR;
sendmp->m_pkthdr.len = mp->m_len;
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
}
/* Pass the head pointer on */
if (eop == 0) {
@@ -1695,6 +1704,11 @@ ixl_rxeof(struct ixl_queue *que, int count)
/* capture data for dynamic ITR adjustment */
rxr->packets++;
rxr->bytes += sendmp->m_pkthdr.len;
+ /* Set VLAN tag (field only valid in eop desc) */
+ if (vtag) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
ixl_rx_checksum(sendmp, status, error, ptype);
#ifdef RSS
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index a131ca0a4b22..be4daeefbc83 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -44,7 +44,7 @@
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
-#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
@@ -55,6 +55,10 @@
#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
+#define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
+#define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
+#define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
+#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
/* printf %b arg */
#define IXLV_FLAGS \
@@ -62,9 +66,17 @@
"\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
-
-/* Hack for compatibility with 1.0.x linux pf driver */
-#define I40E_VIRTCHNL_OP_EVENT 17
+#define IXLV_PRINTF_VF_OFFLOAD_FLAGS \
+ "\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \
+ "\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \
+ "\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \
+ "\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \
+ "\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \
+ "\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \
+ "\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \
+ "\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \
+ "\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \
+ "\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF"
/* Driver state */
enum ixlv_state_t {
@@ -80,9 +92,11 @@ enum ixlv_state_t {
IXLV_INIT_MAPPING,
IXLV_INIT_ENABLE,
IXLV_INIT_COMPLETE,
- IXLV_RUNNING,
+ IXLV_RUNNING,
};
+/* Structs */
+
struct ixlv_mac_filter {
SLIST_ENTRY(ixlv_mac_filter) next;
u8 macaddr[ETHER_ADDR_LEN];
@@ -107,6 +121,7 @@ struct ixlv_sc {
struct resource *msix_mem;
enum ixlv_state_t init_state;
+ int init_in_progress;
/*
* Interrupt resources
@@ -154,6 +169,10 @@ struct ixlv_sc {
struct ixl_vc_cmd del_vlan_cmd;
struct ixl_vc_cmd add_multi_cmd;
struct ixl_vc_cmd del_multi_cmd;
+ struct ixl_vc_cmd config_rss_key_cmd;
+ struct ixl_vc_cmd get_rss_hena_caps_cmd;
+ struct ixl_vc_cmd set_rss_hena_cmd;
+ struct ixl_vc_cmd config_rss_lut_cmd;
/* Virtual comm channel */
struct i40e_virtchnl_vf_resource *vf_res;
@@ -209,5 +228,9 @@ void ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
void ixlv_update_link_status(struct ixlv_sc *);
+void ixlv_get_default_rss_key(u32 *, bool);
+void ixlv_config_rss_key(struct ixlv_sc *);
+void ixlv_set_rss_hena(struct ixlv_sc *);
+void ixlv_config_rss_lut(struct ixlv_sc *);
#endif /* _IXLV_H_ */
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index 6a367eab3a6d..155f0ab46ae6 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -69,8 +69,10 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
+ valid_len = 0;
+ break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- // TODO: valid length in api v1.0 is 0, v1.1 is 4
+ /* Valid length in api v1.0 is 0, v1.1 is 4 */
valid_len = 4;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
@@ -218,7 +220,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
i40e_status err;
int retries = 0;
- event.buf_len = IXL_AQ_BUFSZ;
+ event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
err = ENOMEM;
@@ -230,7 +232,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out_alloc;
/* Initial delay here is necessary */
- i40e_msec_delay(100);
+ i40e_msec_pause(100);
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
continue;
@@ -288,7 +290,7 @@ ixlv_send_vf_config_msg(struct ixlv_sc *sc)
u32 caps;
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
@@ -331,7 +333,7 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
- i40e_msec_delay(10);
+ i40e_msec_pause(10);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
DDPRINTF(dev, "Received a response from PF,"
@@ -498,7 +500,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
vm->vecmap[i].rxitr_idx = 0;
- vm->vecmap[i].txitr_idx = 0;
+ vm->vecmap[i].txitr_idx = 1;
}
/* Misc vector last - this is only for AdminQ messages */
@@ -570,13 +572,6 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
- // ERJ: Should this be taken out?
- if (i == 0) { /* Should not happen... */
- device_printf(dev, "%s: i == 0?\n", __func__);
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
- I40E_SUCCESS);
- return;
- }
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
@@ -640,13 +635,6 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
- // ERJ: Take this out?
- if (i == 0) { /* Should not happen... */
- device_printf(dev, "%s: i == 0?\n", __func__);
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
- I40E_SUCCESS);
- return;
- }
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
@@ -842,6 +830,100 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
vsi->eth_stats = *es;
}
+void
+ixlv_config_rss_key(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_rss_key *rss_key_msg;
+ int msg_len, key_length;
+ u8 rss_seed[IXL_RSS_KEY_SIZE];
+#ifdef RSS
+ u32 rss_hash_config;
+#endif
+
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey(&rss_seed);
+#else
+ ixl_get_default_rss_key((u32 *)rss_seed);
+#endif
+
+ /* Send the fetched key */
+ key_length = IXL_RSS_KEY_SIZE;
+ msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
+ rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (rss_key_msg == NULL) {
+ device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
+ return;
+ }
+
+ rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
+ rss_key_msg->key_len = key_length;
+ bcopy(rss_seed, &rss_key_msg->key[0], key_length);
+
+ DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
+ rss_key_msg->vsi_id, rss_key_msg->key_len);
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)rss_key_msg, msg_len);
+
+ free(rss_key_msg, M_DEVBUF);
+}
+
+void
+ixlv_set_rss_hena(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_rss_hena hena;
+
+ hena.hena = IXL_DEFAULT_RSS_HENA;
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&hena, sizeof(hena));
+}
+
+void
+ixlv_config_rss_lut(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_rss_lut *rss_lut_msg;
+ int msg_len;
+ u16 lut_length;
+ u32 lut;
+ int i, que_id;
+
+ lut_length = IXL_RSS_VSI_LUT_SIZE;
+ msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
+ rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (rss_lut_msg == NULL) {
+ device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
+ return;
+ }
+
+ rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
+ /* Each LUT entry is a max of 1 byte, so this is easy */
+ rss_lut_msg->lut_entries = lut_length;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0; i < lut_length; i++) {
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % sc->vsi.num_queues;
+#else
+ que_id = i % sc->vsi.num_queues;
+#endif
+ lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
+ rss_lut_msg->lut[i] = lut;
+ }
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)rss_lut_msg, msg_len);
+
+ free(rss_lut_msg, M_DEVBUF);
+}
+
/*
** ixlv_vc_completion
**
@@ -940,7 +1022,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
- vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ /* TODO: Clear a state flag, so we know we're ready to run init again */
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
@@ -950,7 +1032,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* Turn off all interrupts */
ixlv_disable_intr(vsi);
/* Tell the stack that the interface is no longer active */
- vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
@@ -961,6 +1043,18 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
v_retval);
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
+ v_retval);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
+ v_retval);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
+ v_retval);
+ break;
default:
#ifdef IXL_DEBUG
device_printf(dev,
@@ -1008,6 +1102,18 @@ ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
case IXLV_FLAG_AQ_ENABLE_QUEUES:
ixlv_enable_queues(sc);
break;
+
+ case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
+ ixlv_config_rss_key(sc);
+ break;
+
+ case IXLV_FLAG_AQ_SET_RSS_HENA:
+ ixlv_set_rss_hena(sc);
+ break;
+
+ case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
+ ixlv_config_rss_lut(sc);
+ break;
}
}