aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Joyner <erj@FreeBSD.org>2024-08-29 22:41:20 +0000
committerEric Joyner <erj@FreeBSD.org>2024-10-28 23:17:18 +0000
commitf2635e844dd138ac9dfba676f27d41750049af26 (patch)
tree1b1736f40fd575cc3afef8ec97ec0a4acbb65608
parentf2daf89954a45c7eed22990dd4cf2cf879763dc0 (diff)
downloadsrc-f2635e844dd1.tar.gz
src-f2635e844dd1.zip
ice: Update to 1.42.1-k
Summary: - Adds E830 device support - Adds pre-release E825C support (for the Ethernet device included in an upcoming Xeon D platform) - Add sysctl for E810 devices to print out PHY debug statistics (mostly for FEC debugging) - Adds per-TX-queue tso counter sysctl to count how many times a TSO offload was requested for a packet, matching other Intel drivers - Various bug fixes Signed-off-by: Eric Joyner <erj@FreeBSD.org> Tested by: Jeffrey Pieper <jeffrey.e.pieper@intel.com> MFC after: 2 days Relnotes: yes Sponsored by: Intel Corporation Differential Revisison: https://reviews.freebsd.org/D46949
-rw-r--r--sys/dev/ice/ice_adminq_cmd.h191
-rw-r--r--sys/dev/ice/ice_bitops.h4
-rw-r--r--sys/dev/ice/ice_common.c967
-rw-r--r--sys/dev/ice/ice_common.h189
-rw-r--r--sys/dev/ice/ice_common_txrx.h2
-rw-r--r--sys/dev/ice/ice_controlq.c143
-rw-r--r--sys/dev/ice/ice_controlq.h24
-rw-r--r--sys/dev/ice/ice_dcb.c90
-rw-r--r--sys/dev/ice/ice_dcb.h42
-rw-r--r--sys/dev/ice/ice_ddp_common.c123
-rw-r--r--sys/dev/ice/ice_ddp_common.h19
-rw-r--r--sys/dev/ice/ice_devids.h38
-rw-r--r--sys/dev/ice/ice_drv_info.h49
-rw-r--r--sys/dev/ice/ice_features.h3
-rw-r--r--sys/dev/ice/ice_flex_pipe.c266
-rw-r--r--sys/dev/ice/ice_flex_pipe.h38
-rw-r--r--sys/dev/ice/ice_flow.c100
-rw-r--r--sys/dev/ice/ice_flow.h14
-rw-r--r--sys/dev/ice/ice_fw_logging.c4
-rw-r--r--sys/dev/ice/ice_fwlog.c39
-rw-r--r--sys/dev/ice/ice_fwlog.h12
-rw-r--r--sys/dev/ice/ice_hw_autogen.h2557
-rw-r--r--sys/dev/ice/ice_iflib.h1
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h8
-rw-r--r--sys/dev/ice/ice_lib.c1113
-rw-r--r--sys/dev/ice/ice_lib.h109
-rw-r--r--sys/dev/ice/ice_nvm.c374
-rw-r--r--sys/dev/ice/ice_nvm.h56
-rw-r--r--sys/dev/ice/ice_rdma.c2
-rw-r--r--sys/dev/ice/ice_sbq_cmd.h120
-rw-r--r--sys/dev/ice/ice_sched.c487
-rw-r--r--sys/dev/ice/ice_sched.h102
-rw-r--r--sys/dev/ice/ice_strings.c4
-rw-r--r--sys/dev/ice/ice_switch.c304
-rw-r--r--sys/dev/ice/ice_switch.h93
-rw-r--r--sys/dev/ice/ice_type.h36
-rw-r--r--sys/dev/ice/ice_vlan_mode.c22
-rw-r--r--sys/dev/ice/ice_vlan_mode.h2
-rw-r--r--sys/dev/ice/if_ice_iflib.c59
-rw-r--r--sys/dev/ice/virtchnl.h28
40 files changed, 5678 insertions, 2156 deletions
diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h
index 70b56144faf2..6225abc0f38b 100644
--- a/sys/dev/ice/ice_adminq_cmd.h
+++ b/sys/dev/ice/ice_adminq_cmd.h
@@ -187,7 +187,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
-
+#define ICE_AQC_CAPS_NEXT_CLUSTER_ID 0x0096
u8 major_ver;
u8 minor_ver;
/* Number of resources described by this capability */
@@ -320,7 +320,12 @@ struct ice_aqc_set_port_params {
(0x3F << ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S)
#define ICE_AQC_SET_P_PARAMS_IS_LOGI_PORT BIT(14)
#define ICE_AQC_SET_P_PARAMS_SWID_VALID BIT(15)
- u8 reserved[10];
+ u8 lb_mode;
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_VALID BIT(2)
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL 0x00
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NO 0x01
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_HIGH 0x02
+ u8 reserved[9];
};
/* These resource type defines are used for all switch resource
@@ -1389,7 +1394,18 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
+#define ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 BIT_ULL(5)
+#define ICE_PHY_TYPE_HIGH_200G_SR4 BIT_ULL(6)
+#define ICE_PHY_TYPE_HIGH_200G_FR4 BIT_ULL(7)
+#define ICE_PHY_TYPE_HIGH_200G_LR4 BIT_ULL(8)
+#define ICE_PHY_TYPE_HIGH_200G_DR4 BIT_ULL(9)
+#define ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 BIT_ULL(10)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC BIT_ULL(11)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4 BIT_ULL(12)
+#define ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC BIT_ULL(13)
+#define ICE_PHY_TYPE_HIGH_200G_AUI8 BIT_ULL(14)
+#define ICE_PHY_TYPE_HIGH_400GBASE_FR8 BIT_ULL(15)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 15
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1541,11 +1557,14 @@ struct ice_aqc_get_link_status {
enum ice_get_link_status_data_version {
ICE_GET_LINK_STATUS_DATA_V1 = 1,
+ ICE_GET_LINK_STATUS_DATA_V2 = 2,
};
#define ICE_GET_LINK_STATUS_DATALEN_V1 32
+#define ICE_GET_LINK_STATUS_DATALEN_V2 56
/* Get link status response data structure, also used for Link Status Event */
+#pragma pack(1)
struct ice_aqc_get_link_status_data {
u8 topo_media_conflict;
#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
@@ -1618,7 +1637,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
__le16 link_speed;
-#define ICE_AQ_LINK_SPEED_M 0x7FF
+#define ICE_AQ_LINK_SPEED_M 0xFFF
#define ICE_AQ_LINK_SPEED_10MB BIT(0)
#define ICE_AQ_LINK_SPEED_100MB BIT(1)
#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
@@ -1630,12 +1649,37 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
#define ICE_AQ_LINK_SPEED_50GB BIT(9)
#define ICE_AQ_LINK_SPEED_100GB BIT(10)
+#define ICE_AQ_LINK_SPEED_200GB BIT(11)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
- __le32 reserved3; /* Aligns next field to 8-byte boundary */
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define ICE_AQ_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define ICE_AQ_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define ICE_AQ_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define ICE_AQ_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define ICE_AQ_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
+#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved[5];
};
+#pragma pack()
+
/* Set event mask command (direct 0x0613) */
struct ice_aqc_set_event_mask {
u8 lport_num;
@@ -1793,14 +1837,46 @@ struct ice_aqc_dnl_call_command {
u8 ctx; /* Used in command, reserved in response */
u8 reserved;
__le16 activity_id;
+#define ICE_AQC_ACT_ID_DNL 0x1129
__le32 reserved1;
__le32 addr_high;
__le32 addr_low;
};
+struct ice_aqc_dnl_equa_param {
+ __le16 data_in;
+#define ICE_AQC_RX_EQU_SHIFT 8
+#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_TX_EQU_PRE1 0x0
+#define ICE_AQC_TX_EQU_PRE3 0x3
+#define ICE_AQC_TX_EQU_ATTEN 0x4
+#define ICE_AQC_TX_EQU_POST1 0x8
+#define ICE_AQC_TX_EQU_PRE2 0xC
+ __le16 op_code_serdes_sel;
+#define ICE_AQC_OP_CODE_SHIFT 4
+#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT)
+#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT)
+ __le32 reserved[3];
+};
+
+struct ice_aqc_dnl_equa_resp {
+ /* Equalization value can be -ve */
+ int val;
+ __le32 reserved[3];
+};
+
/* DNL call command/response buffer (indirect 0x0682) */
struct ice_aqc_dnl_call {
- __le32 stores[4];
+ union {
+ struct ice_aqc_dnl_equa_param txrx_equa_reqs;
+ __le32 stores[4];
+ struct ice_aqc_dnl_equa_resp txrx_equa_resp;
+ } sto;
};
/* Used for both commands:
@@ -1902,8 +1978,8 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
/* Used to decode the handle field */
#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM 0
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ BIT(9)
#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0
/* In case of a Mezzanine type */
#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \
@@ -1919,7 +1995,7 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
-#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
@@ -2054,6 +2130,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
u8 pf2port_cid[2];
@@ -2201,6 +2278,29 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_PTR_OFFSET 0xD8
+#define ICE_AQC_NVM_SDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_PTR_M MAKEMASK(0x7FFF, 0)
+#define ICE_AQC_NVM_SDP_CFG_PTR_TYPE_M BIT(15)
+#define ICE_AQC_NVM_SDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_SEC_LEN_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_DATA_LEN 14 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_MAX_SECTION_SIZE 7
+#define ICE_AQC_NVM_SDP_CFG_PIN_SIZE 10
+#define ICE_AQC_NVM_SDP_CFG_PIN_OFFSET 6
+#define ICE_AQC_NVM_SDP_CFG_PIN_MASK MAKEMASK(0x3FF, \
+ ICE_AQC_NVM_SDP_CFG_PIN_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_CHAN_OFFSET 4
+#define ICE_AQC_NVM_SDP_CFG_CHAN_MASK MAKEMASK(0x3, \
+ ICE_AQC_NVM_SDP_CFG_CHAN_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_DIR_OFFSET 3
+#define ICE_AQC_NVM_SDP_CFG_DIR_MASK MAKEMASK(0x1, \
+ ICE_AQC_NVM_SDP_CFG_DIR_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_SDP_NUM_OFFSET 0
+#define ICE_AQC_NVM_SDP_CFG_SDP_NUM_MASK MAKEMASK(0x7, \
+ ICE_AQC_NVM_SDP_CFG_SDP_NUM_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_NA_PIN_MASK MAKEMASK(0x1, 15)
+
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
#define ICE_AQC_NVM_CMPO_MOD_ID 0x153
@@ -2265,6 +2365,29 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
+/* Used for NVM Sanitization command - 0x070C */
+struct ice_aqc_nvm_sanitization {
+ u8 cmd_flags;
+#define ICE_AQ_NVM_SANITIZE_REQ_READ 0
+#define ICE_AQ_NVM_SANITIZE_REQ_OPERATE BIT(0)
+
+#define ICE_AQ_NVM_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define ICE_AQ_NVM_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define ICE_AQ_NVM_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define ICE_AQ_NVM_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -2560,6 +2683,15 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
+/* Sideband Control Interface Commands */
+/* Neighbor Device Request (indirect 0x0C00); also used for the response. */
+struct ice_aqc_neigh_dev_req {
+ __le16 sb_data_len;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
@@ -2812,19 +2944,33 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
__le16 cluster_id; /* Expresses next cluster ID in response */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E810 0
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL_E810 1
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED_E810 2
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES_E810 3
/* EMP_DRAM only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM_E810 4
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK_E810 5
/* AUX_REGS only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_MNG_TRANSACTIONS 22
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS_E810 6
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB_E810 7
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P_E810 8
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E810 9
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE_E810 21
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_MNG_TRANSACTIONS_E810 22
+
+/* Start cluster to discover first available cluster */
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_START_ALL 0
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E830 100
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL_E830 101
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED_E830 102
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES_E830 103
+/* EMP_DRAM only dumpable in device debug mode */
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK_E830 105
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB_E830 107
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P_E830 108
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E830 109
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE_E830 121
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
__le32 addr_high;
@@ -3076,6 +3222,7 @@ struct ice_aq_desc {
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_cfg nvm_cfg;
struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_nvm_sanitization sanitization;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_read_write_alt_direct read_write_alt_direct;
struct ice_aqc_read_write_alt_indirect read_write_alt_indirect;
@@ -3095,6 +3242,7 @@ struct ice_aq_desc {
struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_move_txqs move_txqs;
@@ -3330,6 +3478,7 @@ enum ice_adminq_opc {
ice_aqc_opc_nvm_update_empr = 0x0709,
ice_aqc_opc_nvm_pkg_data = 0x070A,
ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
+ ice_aqc_opc_nvm_sanitization = 0x070C,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
@@ -3360,6 +3509,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_rss_lut = 0x0B03,
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
+ /* Sideband Control Interface commands */
+ ice_aqc_opc_neighbour_device_request = 0x0C00,
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index 499ee41228c3..c480900596f4 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -445,10 +445,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits)
* Note that this function assumes it is operating on a bitmap declared using
* ice_declare_bitmap.
*/
-static inline int
+static inline u16
ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
{
- int count = 0;
+ u16 count = 0;
u16 bit = 0;
while (size > (bit = ice_find_next_bit(bm, size, bit))) {
diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c
index ef487bcfd0f4..ad4ea4c8e7a1 100644
--- a/sys/dev/ice/ice_common.c
+++ b/sys/dev/ice/ice_common.c
@@ -32,7 +32,6 @@
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
-
#include "ice_flow.h"
#include "ice_switch.h"
@@ -111,6 +110,17 @@ static const char * const ice_link_mode_str_high[] = {
ice_arr_elem_idx(2, "100G_CAUI2"),
ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
ice_arr_elem_idx(4, "100G_AUI2"),
+ ice_arr_elem_idx(5, "200G_CR4_PAM4"),
+ ice_arr_elem_idx(6, "200G_SR4"),
+ ice_arr_elem_idx(7, "200G_FR4"),
+ ice_arr_elem_idx(8, "200G_LR4"),
+ ice_arr_elem_idx(9, "200G_DR4"),
+ ice_arr_elem_idx(10, "200G_KR4_PAM4"),
+ ice_arr_elem_idx(11, "200G_AUI4_AOC_ACC"),
+ ice_arr_elem_idx(12, "200G_AUI4"),
+ ice_arr_elem_idx(13, "200G_AUI8_AOC_ACC"),
+ ice_arr_elem_idx(14, "200G_AUI8"),
+ ice_arr_elem_idx(15, "400GBASE_FR8"),
};
/**
@@ -151,7 +161,7 @@ ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the HW structure.
*/
-enum ice_status ice_set_mac_type(struct ice_hw *hw)
+int ice_set_mac_type(struct ice_hw *hw)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -188,13 +198,43 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823C_SGMII:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
+ case ICE_DEV_ID_E830_BACKPLANE:
+ case ICE_DEV_ID_E830_QSFP56:
+ case ICE_DEV_ID_E830_SFP:
+ case ICE_DEV_ID_E830C_BACKPLANE:
+ case ICE_DEV_ID_E830_L_BACKPLANE:
+ case ICE_DEV_ID_E830C_QSFP:
+ case ICE_DEV_ID_E830_L_QSFP:
+ case ICE_DEV_ID_E830C_SFP:
+ case ICE_DEV_ID_E830_L_SFP:
+ hw->mac_type = ICE_MAC_E830;
+ break;
default:
hw->mac_type = ICE_MAC_UNKNOWN;
break;
}
ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
- return ICE_SUCCESS;
+ return 0;
+}
+
+/**
+ * ice_is_generic_mac
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if mac_type is ICE_MAC_GENERIC, false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
}
/**
@@ -223,7 +263,7 @@ bool ice_is_e810t(struct ice_hw *hw)
case ICE_SUBDEV_ID_E810T2:
case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T5:
+ case ICE_SUBDEV_ID_E810T6:
case ICE_SUBDEV_ID_E810T7:
return true;
}
@@ -231,8 +271,8 @@ bool ice_is_e810t(struct ice_hw *hw)
case ICE_DEV_ID_E810C_QSFP:
switch (hw->subsystem_device_id) {
case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T5:
- case ICE_SUBDEV_ID_E810T6:
return true;
}
break;
@@ -244,6 +284,17 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e830
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E830 based, false if not.
+ */
+bool ice_is_e830(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_E830;
+}
+
+/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
@@ -269,13 +320,32 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+int ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -299,14 +369,14 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* ice_discover_dev_caps is expected to be called before this function is
* called.
*/
-enum ice_status
+int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 flags;
u8 i;
@@ -340,7 +410,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
ETH_ALEN, ICE_NONDMA_TO_NONDMA);
break;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -401,16 +471,21 @@ static void ice_set_media_type(struct ice_port_info *pi)
* type is FIBER
*/
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
- ICE_MEDIA_OPT_PHY_TYPE_LOW_M, 0) ||
- (phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M &&
- phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M))
+ ICE_MEDIA_OPT_PHY_TYPE_LOW_M,
+ ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) ||
+ ((phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) &&
+ (phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
*media_type = ICE_MEDIA_FIBER;
/* else if PHY types are only DA, or DA and C2C, then media type DA */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
- ICE_MEDIA_DAC_PHY_TYPE_LOW_M, 0) ||
- (phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M &&
- (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
- phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
+ ICE_MEDIA_DAC_PHY_TYPE_LOW_M,
+ ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) ||
+ ((phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) &&
+ (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
*media_type = ICE_MEDIA_DA;
/* else if PHY types are only C2M or only C2C, then media is AUI */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
@@ -435,7 +510,7 @@ static void ice_set_media_type(struct ice_port_info *pi)
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -443,9 +518,9 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
- enum ice_status status;
const char *prefix;
struct ice_hw *hw;
+ int status;
cmd = &desc.params.get_phy;
@@ -510,7 +585,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
- if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
@@ -525,81 +600,65 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
}
/**
- * ice_aq_get_netlist_node
- * @hw: pointer to the hw struct
- * @cmd: get_link_topo AQ structure
- * @node_part_number: output node part number if node found
- * @node_handle: output node handle parameter if node found
+ * ice_aq_get_phy_equalization - function to read serdes equalizer value from
+ * firmware using admin queue command.
+ * @hw: pointer to the HW struct
+ * @data_in: represents the serdes equalization parameter requested
+ * @op_code: represents the serdes number and flag to represent tx or rx
+ * @serdes_num: represents the serdes number
+ * @output: pointer to the caller-supplied buffer to return serdes equalizer
+ *
+ * Returns 0 on success,
+ * non-zero status on error
*/
-enum ice_status
-ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
- u8 *node_part_number, u16 *node_handle)
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output)
{
+ struct ice_aqc_dnl_call_command *cmd;
+ struct ice_aqc_dnl_call buf;
struct ice_aq_desc desc;
+ int err = 0;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ if (!hw || !output)
+ return (ICE_ERR_PARAM);
- if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
- return ICE_ERR_NOT_SUPPORTED;
+ memset(&buf, 0, sizeof(buf));
+ buf.sto.txrx_equa_reqs.data_in = CPU_TO_LE16(data_in);
+ buf.sto.txrx_equa_reqs.op_code_serdes_sel =
+ CPU_TO_LE16(op_code | (serdes_num & 0xF));
- if (node_handle)
- *node_handle =
- LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
- if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ cmd = &desc.params.dnl_call;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_RD |
+ ICE_AQ_FLAG_SI);
+ desc.datalen = CPU_TO_LE16(sizeof(struct ice_aqc_dnl_call));
+ cmd->activity_id = CPU_TO_LE16(ICE_AQC_ACT_ID_DNL);
+ cmd->ctx = 0;
- return ICE_SUCCESS;
+ err = ice_aq_send_cmd(hw, &desc, &buf,
+ sizeof(struct ice_aqc_dnl_call), NULL);
+ if (!err)
+ *output = buf.sto.txrx_equa_resp.val;
+
+ return err;
}
-#define MAX_NETLIST_SIZE 10
+#define ice_get_link_status_data_ver(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_GET_LINK_STATUS_DATA_V2 : ICE_GET_LINK_STATUS_DATA_V1)
+
/**
- * ice_find_netlist_node
- * @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
- * @node_part_number: node part number to look for
- * @node_handle: output parameter if node found - optional
+ * ice_get_link_status_datalen
+ * @hw: pointer to the HW struct
*
- * Find and return the node handle for a given node type and part number in the
- * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
- * otherwise. If node_handle provided, it would be set to found node handle.
+ * return Get Link Status datalen
*/
-enum ice_status
-ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
- u16 *node_handle)
+static u16 ice_get_link_status_datalen(struct ice_hw *hw)
{
- struct ice_aqc_get_link_topo cmd;
- u8 rec_node_part_number;
- u16 rec_node_handle;
- u8 idx;
-
- for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
- enum ice_status status;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.addr.topo_params.node_type_ctx =
- (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
- cmd.addr.topo_params.index = idx;
-
- status = ice_aq_get_netlist_node(hw, &cmd,
- &rec_node_part_number,
- &rec_node_handle);
- if (status)
- return status;
-
- if (rec_node_part_number == node_part_number) {
- if (node_handle)
- *node_handle = rec_node_handle;
- return ICE_SUCCESS;
- }
- }
-
- return ICE_ERR_DOES_NOT_EXIST;
+ return (ice_get_link_status_data_ver(hw) ==
+ ICE_GET_LINK_STATUS_DATA_V1) ? ICE_GET_LINK_STATUS_DATALEN_V1 :
+ ICE_GET_LINK_STATUS_DATALEN_V2;
}
-#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1
-
/**
* ice_aq_get_link_info
* @pi: port information structure
@@ -609,7 +668,7 @@ ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -619,9 +678,9 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -639,7 +698,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
status = ice_aq_send_cmd(hw, &desc, &link_data,
ice_get_link_status_datalen(hw), cd);
- if (status != ICE_SUCCESS)
+ if (status)
return status;
/* save off old link status information */
@@ -696,7 +755,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -721,17 +780,28 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* Also, because we are operating on transmit timer and fc
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/
-#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
- /* Retrieve the transmit timer */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
- tx_timer_val = val &
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
- cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+ if ((hw)->mac_type == ICE_MAC_E830) {
+ /* Retrieve the transmit timer */
+ val = rd32(hw, E830_PRTMAC_CL01_PAUSE_QUANTA);
+ tx_timer_val = val & E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
- /* Retrieve the fc threshold */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
- fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+ /* Retrieve the fc threshold */
+ val = rd32(hw, E830_PRTMAC_CL01_QUANTA_THRESH);
+ fc_thres_val = val & E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M;
+ } else {
+ /* Retrieve the transmit timer */
+ val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(E800_IDX_OF_LFC));
+ tx_timer_val = val &
+ E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(E800_IDX_OF_LFC));
+ fc_thres_val = val & E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+ }
cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
}
@@ -745,7 +815,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
*
* Set MAC configuration (0x0603)
*/
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
struct ice_sq_cd *cd)
{
@@ -772,10 +842,10 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
- enum ice_status status;
+ int status;
hw->switch_info = (struct ice_switch_info *)
ice_malloc(hw, sizeof(*hw->switch_info));
@@ -793,7 +863,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
ice_free(hw, hw->switch_info);
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -866,7 +936,7 @@ ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
* @hw: pointer to the HW struct
*/
-static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
}
@@ -911,7 +981,7 @@ void ice_print_rollback_msg(struct ice_hw *hw)
orom = &hw->flash.orom;
nvm = &hw->flash.nvm;
- SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
+ (void)SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
nvm->major, nvm->minor, nvm->eetrack, orom->major,
orom->build, orom->patch);
ice_warn(hw,
@@ -934,12 +1004,12 @@ void ice_set_umac_shared(struct ice_hw *hw)
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw(struct ice_hw *hw)
+int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u16 mac_buf_len;
void *mac_buf;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -957,6 +1027,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
return status;
ice_get_itr_intrl_gran(hw);
+ hw->fw_vsi_num = ICE_DFLT_VSI_INVAL;
+
status = ice_create_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
@@ -987,9 +1059,11 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
ice_print_rollback_msg(hw);
- status = ice_clear_pf_cfg(hw);
- if (status)
- goto err_unroll_cqinit;
+ if (!hw->skip_clear_pf) {
+ status = ice_clear_pf_cfg(hw);
+ if (status)
+ goto err_unroll_cqinit;
+ }
ice_clear_pxe_mode(hw);
@@ -1005,6 +1079,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
+ hw->port_info->loopback_mode = ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL;
+
/* set the back pointer to HW */
hw->port_info->hw = hw;
@@ -1088,7 +1164,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_fltr_mgmt_struct;
ice_init_lock(&hw->tnl_lock);
- return ICE_SUCCESS;
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
@@ -1135,7 +1211,7 @@ void ice_deinit_hw(struct ice_hw *hw)
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_check_reset(struct ice_hw *hw)
+int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt;
@@ -1187,7 +1263,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1197,7 +1273,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
-static enum ice_status ice_pf_reset(struct ice_hw *hw)
+static int ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout;
@@ -1212,7 +1288,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
if (ice_check_reset(hw))
return ICE_ERR_RESET_FAILED;
- return ICE_SUCCESS;
+ return 0;
}
/* Reset the PF */
@@ -1240,7 +1316,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1255,7 +1331,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
@@ -1290,7 +1366,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
*
* Copies rxq context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
@@ -1310,7 +1386,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1321,7 +1397,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
*
* Copies rxq context from HW register space to dense structure
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
@@ -1341,7 +1417,7 @@ ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
}
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Rx Queue Context */
@@ -1380,7 +1456,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
@@ -1404,12 +1480,12 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
* Read rxq context from HW register space and then converts it from dense
* structure to sparse
*/
-enum ice_status
+int
ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
- enum ice_status status;
+ int status;
if (!rlan_ctx)
return ICE_ERR_BAD_PTR;
@@ -1428,7 +1504,7 @@ ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
*
* Clears rxq context in HW register space
*/
-enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
+int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
{
u8 i;
@@ -1439,7 +1515,7 @@ enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
@@ -1486,7 +1562,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Copies Tx completion queue context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
u32 tx_cmpltnq_index)
{
@@ -1507,7 +1583,7 @@ ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
*((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
}
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Tx Completion Queue Context */
@@ -1535,7 +1611,7 @@ static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
* Converts completion queue context from sparse to dense structure and then
* writes it to HW register space
*/
-enum ice_status
+int
ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
u32 tx_cmpltnq_index)
@@ -1553,7 +1629,7 @@ ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
*
* Clears Tx completion queue context in HW register space
*/
-enum ice_status
+int
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
{
u8 i;
@@ -1565,7 +1641,7 @@ ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1576,7 +1652,7 @@ ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
*
* Copies doorbell queue context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
u32 tx_drbell_q_index)
{
@@ -1597,7 +1673,7 @@ ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
*((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
}
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Tx Doorbell Queue Context info */
@@ -1626,7 +1702,7 @@ static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
* Converts doorbell queue context from sparse to dense structure and then
* writes it to HW register space
*/
-enum ice_status
+int
ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index)
@@ -1645,7 +1721,7 @@ ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
*
* Clears doorbell queue context in HW register space
*/
-enum ice_status
+int
ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
{
u8 i;
@@ -1657,7 +1733,130 @@ ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
- return ICE_SUCCESS;
+ return 0;
+}
+
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_get_sbq - returns the right control queue to use for sideband
+ * @hw: pointer to the hardware structure
+ */
+static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
+{
+ if (!ice_is_generic_mac(hw))
+ return &hw->adminq;
+ return &hw->sbq;
+}
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
+ buf, buf_size, cd);
+}
+
+/**
+ * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
+ * but do not lock sq_lock
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc, buf,
+ buf_size, cd);
+}
+
+/**
+ * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ * @flag: flag to fill desc structure
+ * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
+ * already been locked at a higher level
+ */
+int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
+ u16 flag, bool lock)
+{
+ struct ice_sbq_cmd_desc desc = {0};
+ struct ice_sbq_msg_req msg = {0};
+ u16 msg_len;
+ int status;
+
+ msg_len = sizeof(msg);
+
+ msg.dest_dev = in->dest_dev;
+ msg.opcode = in->opcode;
+ msg.flags = ICE_SBQ_MSG_FLAGS;
+ msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+ msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
+ msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
+
+ if (in->opcode)
+ msg.data = CPU_TO_LE32(in->data);
+ else
+ /* data read comes back in completion, so shorten the struct by
+ * sizeof(msg.data)
+ */
+ msg_len -= sizeof(msg.data);
+
+ desc.flags = CPU_TO_LE16(flag);
+ desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
+ desc.param0.cmd_len = CPU_TO_LE16(msg_len);
+ if (lock)
+ status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+ else
+ status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
+ NULL);
+ if (!status && !in->opcode)
+ in->data = LE32_TO_CPU
+ (((struct ice_sbq_msg_cmpl *)&msg)->data);
+ return status;
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ * @flag: flag to fill desc structure
+ */
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag)
+{
+ return ice_sbq_rw_reg_lp(hw, in, flag, true);
+}
+
+/**
+ * ice_sbq_lock - Lock the sideband queue's sq_lock
+ * @hw: pointer to the HW struct
+ */
+void ice_sbq_lock(struct ice_hw *hw)
+{
+ ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
+}
+
+/**
+ * ice_sbq_unlock - Unlock the sideband queue's sq_lock
+ * @hw: pointer to the HW struct
+ */
+void ice_sbq_unlock(struct ice_hw *hw)
+{
+ ice_release_lock(&ice_get_sbq(hw)->sq_lock);
}
/* FW Admin Queue command wrappers */
@@ -1702,17 +1901,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
-static enum ice_status
+static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
- enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
+ int status;
opcode = LE16_TO_CPU(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
@@ -1732,7 +1931,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
do {
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
- if (!is_cmd_for_retry || status == ICE_SUCCESS ||
+ if (!is_cmd_for_retry || !status ||
hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
break;
@@ -1763,7 +1962,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
@@ -1777,11 +1976,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*
* Get the firmware version (0x0001) from the admin queue commands
*/
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
struct ice_aqc_get_ver *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
resp = &desc.params.get_ver;
@@ -1812,7 +2011,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
*
* Send the driver version (0x0002) to the firmware
*/
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
@@ -1849,7 +2048,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well (0x0003).
*/
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
struct ice_aq_desc desc;
@@ -1876,8 +2075,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can
* learn of three states:
- * 1) ICE_SUCCESS - acquired lock, and can perform download package
- * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 1) 0 - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
* 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
* successfully downloaded the package; the driver does
* not have to download the package and can continue
@@ -1890,14 +2089,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* will likely get an error propagated back to it indicating the Download
* Package, Update Package or the Release Resource AQ commands timed out.
*/
-static enum ice_status
+static int
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1927,7 +2126,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
*timeout = LE32_TO_CPU(cmd_resp->timeout);
- return ICE_SUCCESS;
+ return 0;
} else if (LE16_TO_CPU(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = LE32_TO_CPU(cmd_resp->timeout);
@@ -1961,7 +2160,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
*
* release common resource using the admin queue commands (0x0009)
*/
-static enum ice_status
+static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
@@ -1989,14 +2188,14 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
*
* This function will attempt to acquire the ownership of a resource.
*/
-enum ice_status
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2050,8 +2249,8 @@ ice_acquire_res_exit:
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
- enum ice_status status;
u32 total_delay = 0;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2079,7 +2278,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
*
* Helper function to allocate/free resources using the admin queue commands
*/
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
@@ -2114,12 +2313,12 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(buf, elem, num);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -2153,11 +2352,11 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(buf, elem, num);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -2216,10 +2415,10 @@ ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
u8 i;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %u\n", prefix,
caps->led_pin_num);
else
- ice_info(hw, "%s: led_pin_num = %d\n", prefix,
+ ice_info(hw, "%s: led_pin_num = %u\n", prefix,
caps->led_pin_num);
for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
@@ -2227,10 +2426,10 @@ ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
continue;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = %u\n",
prefix, i, caps->led[i]);
else
- ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
+ ice_info(hw, "%s: led[%u] = %u\n", prefix, i,
caps->led[i]);
}
}
@@ -2249,10 +2448,10 @@ ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
u8 i;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %u\n", prefix,
caps->sdp_pin_num);
else
- ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
+ ice_info(hw, "%s: sdp_pin_num = %u\n", prefix,
caps->sdp_pin_num);
for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
@@ -2260,10 +2459,10 @@ ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
continue;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = %u\n",
prefix, i, caps->sdp[i]);
else
- ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
+ ice_info(hw, "%s: sdp[%u] = %u\n", prefix,
i, caps->sdp[i]);
}
}
@@ -2294,86 +2493,86 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
switch (cap) {
case ICE_AQC_CAPS_SWITCHING_MODE:
caps->switching_mode = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %u\n", prefix,
caps->switching_mode);
break;
case ICE_AQC_CAPS_MANAGEABILITY_MODE:
caps->mgmt_mode = number;
caps->mgmt_protocols_mctp = logical_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %u\n", prefix,
caps->mgmt_mode);
- ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %u\n", prefix,
caps->mgmt_protocols_mctp);
break;
case ICE_AQC_CAPS_OS2BMC:
caps->os2bmc = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
+ ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %u\n", prefix, caps->os2bmc);
break;
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = 0x%x\n", prefix,
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %u\n", prefix,
caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VMDQ:
caps->vmdq = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %d\n", prefix, caps->vmdq);
+ ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %u\n", prefix, caps->vmdq);
break;
case ICE_AQC_CAPS_802_1QBG:
caps->evb_802_1_qbg = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
+ ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %u\n", prefix, number);
break;
case ICE_AQC_CAPS_802_1BR:
caps->evb_802_1_qbh = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
+ ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %u\n", prefix, number);
break;
case ICE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
- ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %u\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = 0x%x\n", prefix,
caps->active_tc_bitmap);
- ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
+ ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %u\n", prefix, caps->maxtc);
break;
case ICE_AQC_CAPS_ISCSI:
caps->iscsi = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
+ ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %u\n", prefix, caps->iscsi);
break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %u\n", prefix,
caps->rss_table_size);
- ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %u\n", prefix,
caps->rss_table_entry_width);
break;
case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %u\n", prefix,
caps->num_rxq);
- ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %u\n", prefix,
caps->rxq_first_id);
break;
case ICE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %u\n", prefix,
caps->num_txq);
- ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %u\n", prefix,
caps->txq_first_id);
break;
case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %u\n", prefix,
caps->num_msix_vectors);
- ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %u\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_NVM_MGMT:
@@ -2400,30 +2599,30 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
break;
case ICE_AQC_CAPS_CEM:
caps->mgmt_cem = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %u\n", prefix,
caps->mgmt_cem);
break;
case ICE_AQC_CAPS_IWARP:
caps->iwarp = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
+ ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %u\n", prefix, caps->iwarp);
break;
case ICE_AQC_CAPS_ROCEV2_LAG:
caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
- ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
break;
case ICE_AQC_CAPS_LED:
if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
caps->led[phys_id] = true;
caps->led_pin_num++;
- ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = 1\n", prefix, phys_id);
}
break;
case ICE_AQC_CAPS_SDP:
if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
caps->sdp[phys_id] = true;
caps->sdp_pin_num++;
- ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = 1\n", prefix, phys_id);
}
break;
case ICE_AQC_CAPS_WR_CSR_PROT:
@@ -2439,16 +2638,16 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->acpi_prog_mthd = !!(phys_id &
ICE_ACPI_PROG_MTHD_M);
caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
- ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %u\n", prefix,
caps->num_wol_proxy_fltr);
- ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %u\n", prefix,
caps->wol_proxy_vsi_seid);
- ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %u\n",
prefix, caps->apm_wol_support);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %u\n",
prefix, caps->max_mtu);
break;
case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
@@ -2482,15 +2681,15 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->ext_topo_dev_img_ver_schema[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0;
ice_debug(hw, ICE_DBG_INIT,
- "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
+ "%s: ext_topo_dev_img_ver_high[%d] = %u\n",
prefix, index,
caps->ext_topo_dev_img_ver_high[index]);
ice_debug(hw, ICE_DBG_INIT,
- "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
+ "%s: ext_topo_dev_img_ver_low[%d] = %u\n",
prefix, index,
caps->ext_topo_dev_img_ver_low[index]);
ice_debug(hw, ICE_DBG_INIT,
- "%s: ext_topo_dev_img_part_num[%d] = %d\n",
+ "%s: ext_topo_dev_img_part_num[%d] = %u\n",
prefix, index,
caps->ext_topo_dev_img_part_num[index]);
ice_debug(hw, ICE_DBG_INIT,
@@ -2520,6 +2719,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
prefix, caps->orom_recovery_update);
break;
+ case ICE_AQC_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2546,7 +2750,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
if (hw->dev_caps.num_funcs > 4) {
/* Max 4 TCs per port */
caps->maxtc = 4;
- ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
+ ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %u (based on #ports)\n",
caps->maxtc);
if (caps->iwarp) {
ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
@@ -2578,9 +2782,9 @@ ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id;
- ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %u\n",
func_p->num_allocd_vfs);
- ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %u\n",
func_p->vf_base_id);
}
@@ -2597,9 +2801,9 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
struct ice_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
- ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %u\n",
LE32_TO_CPU(cap->number));
- ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %u\n",
func_p->guar_num_vsi);
}
@@ -2672,7 +2876,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
u32 number = LE32_TO_CPU(cap->number);
dev_p->num_funcs = ice_hweight32(number);
- ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %u\n",
dev_p->num_funcs);
}
@@ -2692,7 +2896,7 @@ ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
u32 number = LE32_TO_CPU(cap->number);
dev_p->num_vfs_exposed = number;
- ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %u\n",
dev_p->num_vfs_exposed);
}
@@ -2711,7 +2915,7 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
u32 number = LE32_TO_CPU(cap->number);
dev_p->num_vsi_allocd_to_host = number;
- ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %u\n",
dev_p->num_vsi_allocd_to_host);
}
@@ -2730,15 +2934,15 @@ ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
- ice_info(hw, "PF is configured in %s mode with IP instance ID %d\n",
- (dev_p->nac_topo.mode == 0) ? "primary" : "secondary",
- dev_p->nac_topo.id);
+ ice_info(hw, "PF is configured in %s mode with IP instance ID %u\n",
+ (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
+ "primary" : "secondary", dev_p->nac_topo.id);
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
- ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %u\n",
dev_p->nac_topo.id);
}
@@ -2813,7 +3017,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
default:
/* Don't list common capabilities as unknown */
if (!found)
- ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%u]: 0x%x\n",
i, cap);
break;
}
@@ -2826,6 +3030,81 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ */
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return ICE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+#define MAX_NETLIST_SIZE 10
+/**
+ * ice_find_netlist_node
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Scan the netlist for a node handle of the given node type and part number.
+ *
+ * If node_handle is non-NULL it will be modified on function exit. It is only
+ * valid if the function returns zero, and should be ignored on any non-zero
+ * return value.
+ *
+ * Returns: 0 if the node is found, ICE_ERR_DOES_NOT_EXIST if no handle was
+ * found, and an error code on failure to access the AQ.
+ */
+int
+ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
+ u16 *node_handle)
+{
+ u8 idx;
+
+ for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
+ struct ice_aqc_get_link_topo cmd;
+ u8 rec_node_part_number;
+ int status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number)
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -2844,13 +3123,13 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
* firmware could return) to avoid this.
*/
-static enum ice_status
+static int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_cap;
@@ -2875,12 +3154,12 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
if (!cbuf)
@@ -2909,12 +3188,12 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* Read the function capabilities and extract them into the func_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
if (!cbuf)
@@ -3002,9 +3281,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+int ice_get_caps(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
status = ice_discover_dev_caps(hw, &hw->dev_caps);
if (status)
@@ -3022,7 +3301,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
*
* This function is used to write MAC address to the NVM (0x0108).
*/
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
@@ -3044,7 +3323,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
-static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -3078,7 +3357,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
*
* Set Physical port parameters (0x0203)
*/
-enum ice_status
+int
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd)
@@ -3091,6 +3370,8 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
cmd = &desc.params.set_port_params;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ cmd->lb_mode = pi->loopback_mode |
+ ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_VALID;
cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
if (save_bad_pac)
cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
@@ -3238,6 +3519,18 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI8:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
@@ -3311,12 +3604,12 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-enum ice_status
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!cfg)
return ICE_ERR_PARAM;
@@ -3349,7 +3642,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- status = ICE_SUCCESS;
+ status = 0;
if (!status)
pi->phy.curr_user_phy_cfg = *cfg;
@@ -3361,10 +3654,10 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-enum ice_status ice_update_link_info(struct ice_port_info *pi)
+int ice_update_link_info(struct ice_port_info *pi)
{
struct ice_link_status *li;
- enum ice_status status;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -3388,7 +3681,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
- if (status == ICE_SUCCESS)
+ if (!status)
ice_memcpy(li->module_type, &pcaps->module_type,
sizeof(li->module_type),
ICE_NONDMA_TO_NONDMA);
@@ -3488,7 +3781,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
* @cfg: PHY configuration data to set FC mode
* @req_mode: FC mode to configure
*/
-static enum ice_status
+static int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode)
{
@@ -3501,7 +3794,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
case ICE_FC_AUTO:
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
+ int status;
pcaps = (struct ice_aqc_get_phy_caps_data *)
ice_malloc(pi->hw, sizeof(*pcaps));
@@ -3548,7 +3841,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cache_data.data.curr_user_fc_req = req_mode;
ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3559,13 +3852,13 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
*
* Set the requested flow control mode.
*/
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !aq_failures)
return ICE_ERR_BAD_PTR;
@@ -3620,7 +3913,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
for (retry_count = 0; retry_count < retry_max; retry_count++) {
status = ice_update_link_info(pi);
- if (status == ICE_SUCCESS)
+ if (!status)
break;
ice_msec_delay(100, true);
@@ -3706,13 +3999,13 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw;
+ int status = 0;
if (!pi || !cfg)
return ICE_ERR_BAD_PTR;
@@ -3802,10 +4095,10 @@ out:
* The variable link_up is invalid if status is non zero. As a
* result of this call, link status reporting becomes enabled
*/
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
{
struct ice_phy_info *phy_info;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!pi || !link_up)
return ICE_ERR_PARAM;
@@ -3833,11 +4126,11 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
*
* Sets up the link and restarts the Auto-Negotiation over the link.
*/
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_AQ_ERROR;
+ int status = ICE_ERR_AQ_ERROR;
struct ice_aqc_restart_an *cmd;
struct ice_aq_desc desc;
@@ -3861,7 +4154,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
else
pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3873,7 +4166,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
*
* Set event mask (0x0613)
*/
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
@@ -3898,7 +4191,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
*
* Enable/disable loopback on a given port
*/
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
@@ -3921,7 +4214,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
*
* Set LED value for the given port (0x06e9)
*/
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
@@ -3956,14 +4249,14 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
*
* Read/Write SFF EEPROM (0x06EE)
*/
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || (mem_addr & 0xff00))
return ICE_ERR_PARAM;
@@ -3996,7 +4289,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
* Program Topology Device NVM (0x06F2)
*
*/
-enum ice_status
+int
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd)
@@ -4025,7 +4318,7 @@ ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
* Read Topology Device NVM (0x06F3)
*
*/
-enum ice_status
+int
ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *data, u8 data_size,
@@ -4033,7 +4326,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
{
struct ice_aqc_read_topo_dev_nvm *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || data_size == 0 ||
data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
@@ -4054,7 +4347,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
- return ICE_SUCCESS;
+ return 0;
}
static u16 ice_lut_type_to_size(u16 lut_type)
@@ -4114,13 +4407,13 @@ int ice_lut_size_to_type(int lut_size)
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
-static enum ice_status
+static int
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 *lut;
if (!params)
@@ -4176,7 +4469,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
*
* get the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
return __ice_aq_get_set_rss_lut(hw, get_params, false);
@@ -4189,7 +4482,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_
*
* set the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
return __ice_aq_get_set_rss_lut(hw, set_params, true);
@@ -4204,8 +4497,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_
*
* get (0x0B04) or set (0x0B02) the RSS key per VSI
*/
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *key,
bool set)
{
@@ -4238,7 +4530,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
@@ -4257,7 +4549,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
*
* set the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
@@ -4289,7 +4581,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
* Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
* flow.
*/
-enum ice_status
+int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
@@ -4339,7 +4631,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*
* Disable LAN Tx queue (0x0C31)
*/
-static enum ice_status
+static int
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -4348,7 +4640,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 i, sz = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -4442,7 +4734,7 @@ do_aq:
*
* Move / Reconfigure Tx LAN queues (0x0C32)
*/
-enum ice_status
+int
ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
bool is_tc_change, bool subseq_call, bool flush_pipe,
u8 timeout, u32 *blocked_cgds,
@@ -4451,7 +4743,7 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
{
struct ice_aqc_move_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.move_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
@@ -4506,7 +4798,7 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
*
* Add Tx RDMA Qsets (0x0C33)
*/
-enum ice_status
+int
ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
struct ice_aqc_add_rdma_qset_data *qset_list,
u16 buf_size, struct ice_sq_cd *cd)
@@ -4578,13 +4870,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
dest_byte &= ~mask; /* get the bits not changing */
dest_byte |= src_byte; /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4621,13 +4913,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4672,13 +4964,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4723,13 +5015,13 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4739,7 +5031,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
@@ -4773,7 +5065,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -4792,7 +5084,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
*
* Get internal FW/HW data (0xFF08) for debug purposes.
*/
-enum ice_status
+int
ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_cluster, u16 *ret_next_table,
@@ -4800,7 +5092,7 @@ ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
{
struct ice_aqc_debug_dump_internals *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.debug_dump;
@@ -4852,7 +5144,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
dest_byte &= mask;
@@ -4862,7 +5154,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4889,7 +5181,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&src_word, src, sizeof(src_word), ICE_NONDMA_TO_NONDMA);
/* the data in the memory is stored as little endian so mask it
* correctly
@@ -4905,7 +5197,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4940,7 +5232,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_NONDMA_TO_NONDMA);
/* the data in the memory is stored as little endian so mask it
* correctly
@@ -4956,7 +5248,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4991,7 +5283,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_NONDMA_TO_NONDMA);
/* the data in the memory is stored as little endian so mask it
* correctly
@@ -5007,7 +5299,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -5016,7 +5308,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to a generic non-packed context structure
* @ce_info: a description of the structure to be read from
*/
-enum ice_status
+int
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
int f;
@@ -5041,7 +5333,7 @@ ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5081,7 +5373,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
*
* This function adds one LAN queue
*/
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -5089,8 +5381,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
@@ -5149,7 +5441,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
LE16_TO_CPU(buf->txqs[0].txq_id),
hw->adminq.sq_last_status);
@@ -5186,15 +5478,15 @@ ena_txq_exit:
*
* This function removes queues and their corresponding nodes in SW DB
*/
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
+ int status = ICE_ERR_DOES_NOT_EXIST;
struct ice_hw *hw;
u16 i, buf_size;
@@ -5244,7 +5536,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
vmvf_num, cd);
- if (status != ICE_SUCCESS)
+ if (status)
break;
ice_free_sched_node(pi, node);
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
@@ -5264,11 +5556,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
*
* This function adds/updates the VSI queues per TC.
*/
-static enum ice_status
+static int
ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *maxqs, u8 owner)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
@@ -5303,7 +5595,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
*
* This function adds/updates the VSI LAN queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_lanqs)
{
@@ -5320,7 +5612,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
*
* This function adds/updates the VSI RDMA queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
@@ -5339,16 +5631,16 @@ ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
*
* This function adds RDMA qset
*/
-enum ice_status
+int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
{
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
- enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
@@ -5389,7 +5681,7 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
}
status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
goto rdma_error_exit;
}
@@ -5415,13 +5707,13 @@ rdma_error_exit:
* @qset_teid: TEID of qset node
* @q_id: list of queue IDs being disabled
*/
-enum ice_status
+int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw;
+ int status = 0;
u16 qg_size;
int i;
@@ -5473,14 +5765,14 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
*
* Get sensor reading (0x0632)
*/
-enum ice_status
+int
ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
struct ice_aqc_get_sensor_reading_resp *data,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sensor_reading *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data)
return ICE_ERR_PARAM;
@@ -5519,10 +5811,10 @@ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-enum ice_status
+int
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
{
- enum ice_status status;
+ int status;
u8 i;
/* Delete old entries from replay filter list head if there is any */
@@ -5551,11 +5843,11 @@ ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
* Restore all VSI configuration after reset. It is required to call this
* function with main VSI first.
*/
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -5728,13 +6020,13 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
* Write one or two dwords to alternate structure. Fields are indicated
* by 'reg_addr0' and 'reg_addr1' register numbers.
*/
-enum ice_status
+int
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1)
{
struct ice_aqc_read_write_alt_direct *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.read_write_alt_direct;
@@ -5761,13 +6053,13 @@ ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
* by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
* is not passed then only register at 'reg_addr0' is read.
*/
-enum ice_status
+int
ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1)
{
struct ice_aqc_read_write_alt_direct *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.read_write_alt_direct;
@@ -5780,7 +6072,7 @@ ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status == ICE_SUCCESS) {
+ if (!status) {
*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
if (reg_val1)
@@ -5798,12 +6090,12 @@ ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
*
* Indicates to the FW that alternate structures have been changed.
*/
-enum ice_status
+int
ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
{
struct ice_aqc_done_alt_write *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.done_alt_write;
@@ -5828,10 +6120,10 @@ ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
* Clear the alternate structures of the port from which the function
* is called.
*/
-enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
+int ice_aq_alternate_clear(struct ice_hw *hw)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
@@ -5848,19 +6140,19 @@ enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
*
* This function queries HW element information
*/
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
+ int status;
buf_size = sizeof(*buf);
ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
buf->node_teid = CPU_TO_LE32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
- if (status != ICE_SUCCESS || num_elem_ret != 1)
+ if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
@@ -5877,7 +6169,7 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
u32 fw_mode;
/* check the current FW mode */
- fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
+ fw_mode = rd32(hw, GL_MNG_FWSM) & E800_GL_MNG_FWSM_FW_MODES_M;
if (fw_mode & ICE_FW_MODE_DBG_M)
return ICE_FW_MODE_DBG;
else if (fw_mode & ICE_FW_MODE_REC_M)
@@ -5895,13 +6187,13 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
*
* Get the current status of LLDP persistent
*/
-enum ice_status
+int
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status ret;
__le32 raw_data;
u32 data, mask;
+ int ret;
if (!lldp_status)
return ICE_ERR_BAD_PTR;
@@ -5935,14 +6227,14 @@ ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
*
* Get the default status of LLDP persistent
*/
-enum ice_status
+int
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
{
struct ice_port_info *pi = hw->port_info;
u32 data, mask, loc_data, loc_data_tmp;
- enum ice_status ret;
__le16 loc_raw_data;
__le32 raw_data;
+ int ret;
if (!lldp_status)
return ICE_ERR_BAD_PTR;
@@ -6014,15 +6306,15 @@ exit:
*
* Read I2C (0x06E2)
*/
-enum ice_status
+int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
- enum ice_status status;
u8 data_size;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
cmd = &desc.params.read_write_i2c;
@@ -6064,7 +6356,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*
* Write I2C (0x06E3)
*/
-enum ice_status
+int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
@@ -6105,7 +6397,7 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*
* Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
*/
-enum ice_status
+int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
@@ -6132,13 +6424,13 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
* Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
* the topology
*/
-enum ice_status
+int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
@@ -6150,7 +6442,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
return status;
*value = !!cmd->gpio_val;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -6223,13 +6515,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
*
* Gets the link default override for a port
*/
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi)
{
u16 i, tlv, tlv_len, tlv_start, buf, offset;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
@@ -6336,7 +6628,7 @@ bool ice_is_fw_health_report_supported(struct ice_hw *hw)
* Configure the health status event types that the firmware will send to this
* PF. The supported event types are: PF-specific, all PFs, and global
*/
-enum ice_status
+int
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd)
{
@@ -6369,7 +6661,7 @@ ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
*
* Calls Get Port Options AQC (0x06ea) and verifies result.
*/
-enum ice_status
+int
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
@@ -6378,7 +6670,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
{
struct ice_aqc_get_port_options *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 i;
/* options buffer shall be able to hold max returned options */
@@ -6393,7 +6685,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
status = ice_aq_send_cmd(hw, &desc, options,
*option_count * sizeof(*options), NULL);
- if (status != ICE_SUCCESS)
+ if (status)
return status;
/* verify direct FW response & set output parameters */
@@ -6428,7 +6720,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
options[i].pmd, options[i].max_lane_speed);
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -6441,7 +6733,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
*
* Calls Set Port Options AQC (0x06eb).
*/
-enum ice_status
+int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
@@ -6472,7 +6764,7 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
*
* Set the LLDP MIB. (0x0A08)
*/
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -6515,7 +6807,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
* @vsi_num: absolute HW index for VSI
* @add: boolean for if adding or removing a filter
*/
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
@@ -6539,7 +6831,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
* ice_lldp_execute_pending_mib - execute LLDP pending MIB request
* @hw: pointer to HW struct
*/
-enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
+int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -6580,6 +6872,7 @@ static const u32 ice_aq_to_link_speed[] = {
ICE_LINK_SPEED_40000MBPS,
ICE_LINK_SPEED_50000MBPS,
ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
+ ICE_LINK_SPEED_200000MBPS,
};
/**
@@ -6604,6 +6897,8 @@ u32 ice_get_link_speed(u16 index)
*/
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
{
+ if (ice_is_e830(hw))
+ return true;
return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
ICE_FW_FEC_DIS_AUTO_MAJ,
ICE_FW_FEC_DIS_AUTO_MIN,
diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h
index 3abfba874b9c..7bd9de0c94e6 100644
--- a/sys/dev/ice/ice_common.h
+++ b/sys/dev/ice/ice_common.h
@@ -41,6 +41,9 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
+#define LOOPBACK_MODE_NO 0
+#define LOOPBACK_MODE_HIGH 2
+
enum ice_fw_modes {
ICE_FW_MODE_NORMAL,
ICE_FW_MODE_DBG,
@@ -51,49 +54,55 @@ enum ice_fw_modes {
void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+int ice_init_fltr_mgmt_struct(struct ice_hw *hw);
+void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw);
void ice_set_umac_shared(struct ice_hw *hw);
-enum ice_status ice_init_hw(struct ice_hw *hw);
+int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status ice_check_reset(struct ice_hw *hw);
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+int ice_check_reset(struct ice_hw *hw);
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+int ice_create_all_ctrlq(struct ice_hw *hw);
+int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
+int
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
-enum ice_status ice_update_link_info(struct ice_port_info *pi);
-enum ice_status
+int ice_update_link_info(struct ice_port_info *pi);
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
-enum ice_status
+int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-enum ice_status
+int
+ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
-enum ice_status ice_get_caps(struct ice_hw *hw);
+int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_cluster, u16 *ret_next_table,
u32 *ret_next_index, struct ice_sq_cd *cd);
-enum ice_status ice_set_mac_type(struct ice_hw *hw);
+int ice_set_mac_type(struct ice_hw *hw);
/* Define a macro that will align a pointer to point to the next memory address
* that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
@@ -108,108 +117,110 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw);
#define ice_arr_elem_idx(idx, val) [(idx)] = (val)
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status
+int
ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
-enum ice_status
+int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
+int
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
-enum ice_status
+int
ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
u32 tx_cmpltnq_index);
-enum ice_status
+int
ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
-enum ice_status
+int
ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index);
int ice_lut_size_to_type(int lut_size);
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 count,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
bool is_tc_change, bool subseq_call, bool flush_pipe,
u8 timeout, u32 *blocked_cgds,
struct ice_aqc_move_txqs_data *buf, u16 buf_size,
u8 *txqs_moved, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
struct ice_aqc_add_rdma_qset_data *qset_list,
u16 buf_size, struct ice_sq_cd *cd);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
-enum ice_status
+int
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle);
-enum ice_status
+int
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
u16 *node_handle);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
-enum ice_status
+int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
-enum ice_status
+int ice_clear_pf_cfg(struct ice_hw *hw);
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw);
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
bool
@@ -219,84 +230,89 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
u32 ice_get_link_speed(u16 index);
-enum ice_status
+int
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *buf, u8 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid,
u8 *pending_option_idx, bool *pending_option_valid);
-enum ice_status
+int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
-enum ice_status
+int
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
-enum ice_status
+int
__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data);
-enum ice_status
+int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs);
-enum ice_status
+int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
-enum ice_status
+int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_lanqs);
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-enum ice_status
+int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
+ u16 flag, bool lock);
+void ice_sbq_lock(struct ice_hw *hw);
+void ice_sbq_unlock(struct ice_hw *hw);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int
ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
struct ice_aqc_get_sensor_reading_resp *data,
struct ice_sq_cd *cd);
@@ -311,50 +327,53 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
struct ice_eth_stats *cur_stats);
enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e830(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
-enum ice_status
+int
ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
-enum ice_status
+int
ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode,
bool *reset_needed);
-enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
-enum ice_status
+int ice_aq_alternate_clear(struct ice_hw *hw);
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
-enum ice_status
+int
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
-enum ice_status
+int
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
-enum ice_status
+int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
-enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
+int ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist);
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
-enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw);
-enum ice_status
+int ice_lldp_execute_pending_mib(struct ice_hw *hw);
+int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd);
bool ice_is_fw_health_report_supported(struct ice_hw *hw);
diff --git a/sys/dev/ice/ice_common_txrx.h b/sys/dev/ice/ice_common_txrx.h
index 865c1d27da44..5bab344ecd83 100644
--- a/sys/dev/ice/ice_common_txrx.h
+++ b/sys/dev/ice/ice_common_txrx.h
@@ -169,7 +169,7 @@ ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi)
txd->qw1 = htole64(type_cmd_tso_mss);
txd->tunneling_params = htole32(0);
- txq->tso++;
+ txq->stats.tso++;
return ((idx + 1) & (txq->desc_count-1));
}
diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c
index 8aa2a7f765a2..e96c7e230310 100644
--- a/sys/dev/ice/ice_controlq.c
+++ b/sys/dev/ice/ice_controlq.c
@@ -82,6 +82,21 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
}
/**
+ * ice_sb_init_regs - Initialize Sideband registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_sb_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->sbq;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ICE_CQ_INIT_REGS(cq, PF_SB);
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -104,7 +119,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@@ -113,7 +128,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (!cq->sq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -121,7 +136,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@@ -129,7 +144,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
if (!cq->rq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -150,7 +165,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -195,7 +210,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
desc->params.generic.param0 = 0;
desc->params.generic.param1 = 0;
}
- return ICE_SUCCESS;
+ return 0;
unwind_alloc_rq_bufs:
/* don't try to free the one that failed... */
@@ -214,7 +229,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -235,7 +250,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (!bi->va)
goto unwind_alloc_sq_bufs;
}
- return ICE_SUCCESS;
+ return 0;
unwind_alloc_sq_bufs:
/* don't try to free the one that failed... */
@@ -249,7 +264,7 @@ unwind_alloc_sq_bufs:
return ICE_ERR_NO_MEMORY;
}
-static enum ice_status
+static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
/* Clear Head and Tail */
@@ -265,7 +280,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
return ICE_ERR_AQ_ERROR;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -275,7 +290,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
*
* Configure base address and length registers for the transmit queue
*/
-static enum ice_status
+static int
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
@@ -288,10 +303,10 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*
* Configure base address and length registers for the receive (event queue)
*/
-static enum ice_status
+static int
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status status;
+ int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status)
@@ -300,7 +315,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
- return ICE_SUCCESS;
+ return 0;
}
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
@@ -332,9 +347,9 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -394,9 +409,9 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -449,10 +464,10 @@ init_ctrlq_exit:
*
* The main shutdown routine for the Control Transmit Queue
*/
-static enum ice_status
+static int
ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = ICE_SUCCESS;
+ int ret_code = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -491,24 +506,27 @@ shutdown_sq_out:
*/
static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
+ u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
+
+ if (hw->api_maj_ver > exp_fw_api_ver_major) {
/* Major API version is newer than expected, don't load */
ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
- } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
- if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
+ if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
hw->api_maj_ver, hw->api_min_ver,
- EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
- else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
+ else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
- EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
} else {
/* Major API version is older than expected, log a warning */
ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
- EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
}
return true;
}
@@ -520,10 +538,10 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
*
* The main shutdown routine for the Control Receive Queue
*/
-static enum ice_status
+static int
ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = ICE_SUCCESS;
+ int ret_code = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -570,10 +588,10 @@ void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure
*/
-static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+static int ice_init_check_adminq(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -586,7 +604,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
goto init_ctrlq_free_rq;
}
- return ICE_SUCCESS;
+ return 0;
init_ctrlq_free_rq:
ice_shutdown_rq(hw, cq);
@@ -608,10 +626,10 @@ init_ctrlq_free_rq:
*
* NOTE: this function does not initialize the controlq locks
*/
-static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
- enum ice_status ret_code;
+ int ret_code;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -620,6 +638,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_SB:
+ ice_sb_init_regs(hw);
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
ice_mailbox_init_regs(hw);
cq = &hw->mailboxq;
@@ -649,7 +671,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
goto init_ctrlq_free_sq;
/* success! */
- return ICE_SUCCESS;
+ return 0;
init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq);
@@ -657,6 +679,18 @@ init_ctrlq_free_sq:
}
/**
+ * ice_is_sbq_supported - is the sideband queue supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if the sideband control queue interface is
+ * supported for the device, false otherwise
+ */
+static bool ice_is_sbq_supported(struct ice_hw *hw)
+{
+ return ice_is_generic_mac(hw);
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
@@ -678,6 +712,9 @@ ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, unloading);
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
@@ -703,6 +740,9 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
+ /* Shutdown PHY Sideband */
+ if (ice_is_sbq_supported(hw))
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
@@ -720,10 +760,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
*
* NOTE: this function does not initialize the controlq locks.
*/
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+int ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status status;
u32 retry = 0;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -744,6 +784,15 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (status)
return status;
+ /* sideband control queue (SBQ) interface is not supported on some
+ * devices. Initialize if supported, else fallback to the admin queue
+ * interface
+ */
+ if (ice_is_sbq_supported(hw)) {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
+ if (status)
+ return status;
+ }
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -776,9 +825,11 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+int ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_init_ctrlq_locks(&hw->sbq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
@@ -811,6 +862,8 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_destroy_ctrlq_locks(&hw->sbq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
}
@@ -858,6 +911,8 @@ static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
return "AQ";
case ICE_CTL_Q_MAILBOX:
return "MBXQ";
+ case ICE_CTL_Q_SB:
+ return "SBQ";
default:
return "Unrecognized CQ";
}
@@ -944,7 +999,7 @@ bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* command into a descriptor, bumps the send queue tail, waits for the command
* to complete, captures status and data for the command, etc.
*/
-static enum ice_status
+int
ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -952,8 +1007,8 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- enum ice_status status = ICE_SUCCESS;
u32 total_delay = 0;
+ int status = 0;
u16 retval = 0;
u32 val = 0;
@@ -1121,12 +1176,12 @@ sq_send_command_error:
* on the queue, bumps the tail, waits for processing of the command, captures
* command status and results, etc.
*/
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
@@ -1165,15 +1220,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* contains contents of the message, and 'pending' contains the number of
* events left to process.
*/
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
- enum ice_status ret_code = ICE_SUCCESS;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h
index d48d53a37161..0604ebed250e 100644
--- a/sys/dev/ice/ice_controlq.h
+++ b/sys/dev/ice/ice_controlq.h
@@ -37,6 +37,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
+#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -48,15 +49,32 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
-#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x05
+#define EXP_FW_API_VER_BRANCH_E830 0x00
+#define EXP_FW_API_VER_MAJOR_E830 0x01
+#define EXP_FW_API_VER_MINOR_E830 0x07
+
+#define EXP_FW_API_VER_BRANCH_E810 0x00
+#define EXP_FW_API_VER_MAJOR_E810 0x01
+#define EXP_FW_API_VER_MINOR_E810 0x05
+
+#define EXP_FW_API_VER_BRANCH_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_BRANCH_E830 : \
+ EXP_FW_API_VER_BRANCH_E810)
+
+#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MAJOR_E830 : \
+ EXP_FW_API_VER_MAJOR_E810)
+
+#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MINOR_E830 : \
+ EXP_FW_API_VER_MINOR_E810)
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
+ ICE_CTL_Q_SB,
};
/* Control Queue timeout settings - max delay 1s */
diff --git a/sys/dev/ice/ice_dcb.c b/sys/dev/ice/ice_dcb.c
index a06117f90aad..98da42783fe0 100644
--- a/sys/dev/ice/ice_dcb.c
+++ b/sys/dev/ice/ice_dcb.c
@@ -46,14 +46,14 @@
*
* Requests the complete LLDP MIB (entire packet). (0x0A00)
*/
-enum ice_status
+int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_get_mib;
@@ -88,7 +88,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01)
*/
-enum ice_status
+int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
@@ -130,14 +130,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
* Delete the specified TLV from LLDP Local MIB for the given bridge type.
* The firmware places the entire LLDP MIB in the response buffer. (0x0A04)
*/
-enum ice_status
+int
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_add_delete_tlv *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (tlv_len == 0)
return ICE_ERR_PARAM;
@@ -179,14 +179,14 @@ ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
* Firmware will place the complete LLDP MIB in response buffer with the
* updated TLV. (0x0A03)
*/
-enum ice_status
+int
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_update_tlv *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_update_tlv;
@@ -221,7 +221,7 @@ ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
@@ -249,7 +249,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status
+int
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
@@ -730,11 +730,11 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
- enum ice_status ret = ICE_SUCCESS;
u16 offset = 0;
+ int ret = 0;
u16 typelen;
u16 type;
u16 len;
@@ -780,12 +780,12 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
*
* Query DCB configuration from the firmware
*/
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
- enum ice_status ret;
u8 *lldpmib;
+ int ret;
/* Allocate the LLDPDU */
lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
@@ -795,7 +795,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
- if (ret == ICE_SUCCESS)
+ if (!ret)
/* Parse LLDP MIB to get DCB configuration */
ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
@@ -815,13 +815,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* This sends out request/release to ignore PFC condition for a TC.
* It will return the TCs for which PFC is currently ignored. (0x0301)
*/
-enum ice_status
+int
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd)
{
struct ice_aqc_pfc_ignore *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.pfc_ignore;
@@ -851,17 +851,17 @@ ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
enum ice_adminq_opc opcode;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -876,7 +876,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*dcbx_agent_status = false;
- if (status == ICE_SUCCESS &&
+ if (!status &&
cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
*dcbx_agent_status = true;
@@ -891,7 +891,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*
* Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/
-enum ice_status
+int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
@@ -912,12 +912,12 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
* This will return an indication if DSCP-based PFC or VLAN-based PFC
* is enabled. (0x0302)
*/
-enum ice_status
+int
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.set_query_pfc_mode;
@@ -940,12 +940,12 @@ ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
* This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN
* -based PFC (0x0303)
*/
-enum ice_status
+int
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return ICE_ERR_PARAM;
@@ -968,7 +968,7 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
if (cmd->pfc_mode != pfc_mode)
return ICE_ERR_NOT_SUPPORTED;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -980,7 +980,7 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
* This AQ command will tell FW if it will apply or not apply the default DCB
* configuration when link up (0x0306).
*/
-enum ice_status
+int
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd)
{
@@ -1126,11 +1126,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*
* Get IEEE or CEE mode DCB configuration from the Firmware
*/
-STATIC enum ice_status
+STATIC int
ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{
struct ice_dcbx_cfg *dcbx_cfg = NULL;
- enum ice_status ret;
+ int ret;
if (!pi)
return ICE_ERR_PARAM;
@@ -1154,7 +1154,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
- ret = ICE_SUCCESS;
+ ret = 0;
out:
return ret;
@@ -1166,17 +1166,17 @@ out:
*
* Get DCB configuration from the Firmware
*/
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+int ice_get_dcb_cfg(struct ice_port_info *pi)
{
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status ret;
+ int ret;
if (!pi)
return ICE_ERR_PARAM;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
- if (ret == ICE_SUCCESS) {
+ if (!ret) {
/* CEE mode */
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
@@ -1234,10 +1234,10 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
*
* Update DCB configuration from the Firmware
*/
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret = ICE_SUCCESS;
+ int ret = 0;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
@@ -1276,10 +1276,10 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*
* Configure (disable/enable) MIB
*/
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret;
+ int ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
@@ -1728,13 +1728,13 @@ void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
*
* Set DCB configuration to the Firmware
*/
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+int ice_set_dcb_cfg(struct ice_port_info *pi)
{
u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg;
- enum ice_status ret;
struct ice_hw *hw;
u16 miblen;
+ int ret;
if (!pi)
return ICE_ERR_PARAM;
@@ -1770,14 +1770,14 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
*
* query current port ETS configuration
*/
-enum ice_status
+int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!pi || !pi->root)
return ICE_ERR_PARAM;
@@ -1796,14 +1796,14 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
*
* update the SW DB with the new TC changes
*/
-enum ice_status
+int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem;
- enum ice_status status = ICE_SUCCESS;
u32 teid1, teid2;
+ int status = 0;
u8 i, j;
if (!pi)
@@ -1864,12 +1864,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the
* SW DB with the TC changes
*/
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
diff --git a/sys/dev/ice/ice_dcb.h b/sys/dev/ice/ice_dcb.h
index ec2200afe200..373b0313cb6b 100644
--- a/sys/dev/ice/ice_dcb.h
+++ b/sys/dev/ice/ice_dcb.h
@@ -215,64 +215,64 @@ struct ice_dcbx_variables {
u32 deftsaassignment;
};
-enum ice_status
+int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
u8 ice_get_dcbx_status(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+int ice_get_dcb_cfg(struct ice_port_info *pi);
+int ice_set_dcb_cfg(struct ice_port_info *pi);
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event);
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
-enum ice_status
+int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf);
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
-enum ice_status
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd);
#endif /* _ICE_DCB_H_ */
diff --git a/sys/dev/ice/ice_ddp_common.c b/sys/dev/ice/ice_ddp_common.c
index a1573f5ea998..87ecdad5e7bf 100644
--- a/sys/dev/ice/ice_ddp_common.c
+++ b/sys/dev/ice/ice_ddp_common.c
@@ -46,14 +46,14 @@
*
* Download Package (0x0C40)
*/
-static enum ice_status
+static int
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, bool last_buf, u32 *error_offset,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -91,7 +91,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Upload Section (0x0C41)
*/
-enum ice_status
+int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd)
{
@@ -115,14 +115,14 @@ ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Update Package (0x0C42)
*/
-static enum ice_status
+static int
ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
bool last_buf, u32 *error_offset, u32 *error_info,
struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -255,10 +255,10 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
*/
-enum ice_status
+int
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u32 i;
for (i = 0; i < count; i++) {
@@ -287,10 +287,10 @@ ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*
* Obtains change lock and updates package.
*/
-enum ice_status
+int
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
+ int status;
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
@@ -394,8 +394,8 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
return ICE_DDP_PKG_SUCCESS;
for (i = 0; i < count; i++) {
- enum ice_status status;
bool last = false;
+ int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -430,7 +430,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
*
* Get Package Info List (0x0C43)
*/
-static enum ice_status
+static int
ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
@@ -443,21 +443,6 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
}
/**
- * ice_has_signing_seg - determine if package has a signing segment
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- */
-static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- seg_hdr = (struct ice_generic_seg_hdr *)
- ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
-
- return seg_hdr ? true : false;
-}
-
-/**
* ice_get_pkg_segment_id - get correct package segment id, based on device
* @mac_type: MAC type of the device
*/
@@ -466,6 +451,9 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
u32 seg_id;
switch (mac_type) {
+ case ICE_MAC_E830:
+ seg_id = SEGMENT_TYPE_ICE_E830;
+ break;
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K:
case ICE_MAC_GENERIC_3K_E825:
@@ -486,6 +474,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
u32 sign_type;
switch (mac_type) {
+ case ICE_MAC_E830:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
+ break;
case ICE_MAC_GENERIC_3K:
sign_type = SEGMENT_SIGN_TYPE_RSA3K;
break;
@@ -595,6 +586,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
if (state)
goto exit;
+ if (count == 0) {
+ /* this is a "Reference Signature Segment" and download should
+ * be only for the buffers in the signature segment (and not
+ * the hardware configuration segment)
+ */
+ goto exit;
+ }
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
count);
@@ -633,7 +632,7 @@ static enum ice_ddp_state
ice_post_dwnld_pkg_actions(struct ice_hw *hw)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- enum ice_status status;
+ int status;
status = ice_set_vlan_mode(hw);
if (status) {
@@ -655,7 +654,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
- enum ice_status status;
+ int status;
u32 i;
ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
@@ -701,8 +700,8 @@ static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- enum ice_status status;
struct ice_buf_hdr *bh;
+ int status;
if (!bufs || !count)
return ICE_DDP_PKG_ERR;
@@ -779,7 +778,7 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
{
enum ice_ddp_state state;
- if (hw->pkg_has_signing_seg)
+ if (ice_match_signing_seg(pkg_hdr, hw->pkg_seg_id, hw->pkg_sign_type))
state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
else
state = ice_download_pkg_without_sig_seg(hw, ice_seg);
@@ -804,7 +803,6 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
if (!pkg_hdr)
return ICE_DDP_PKG_ERR;
- hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
ice_get_signing_req(hw);
ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
@@ -988,7 +986,7 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
* The ice_seg parameter must not be NULL since the first call to
* ice_enum_labels requires a pointer to an actual ice_seg structure.
*/
-enum ice_status
+int
ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
u16 *value)
{
@@ -1005,7 +1003,7 @@ ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
label_name = ice_enum_labels(ice_seg, type, &state, &val);
if (label_name && !strcmp(label_name, name)) {
*value = val;
- return ICE_SUCCESS;
+ return 0;
}
ice_seg = NULL;
@@ -1100,7 +1098,6 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
(pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
-
return ICE_DDP_PKG_SUCCESS;
}
@@ -1245,7 +1242,7 @@ static int ice_get_prof_index_max(struct ice_hw *hw)
hw->switch_info->max_used_prof_index = max_prof_index;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1269,11 +1266,8 @@ ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
} else {
- return ICE_DDP_PKG_ERR;
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
}
}
@@ -1340,12 +1334,6 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
if (state)
return state;
- /* For packages with signing segments, must be a matching segment */
- if (hw->pkg_has_signing_seg)
- if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
- hw->pkg_sign_type))
- return ICE_DDP_PKG_ERR;
-
/* before downloading the package, check package version for
* compatibility with driver
*/
@@ -1579,7 +1567,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
{
@@ -1638,7 +1626,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_warn(hw, "Required profiles not found in currently loaded DDP package");
return ICE_ERR_CFG;
}
- return ICE_SUCCESS;
+ return 0;
err:
LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
@@ -1717,7 +1705,7 @@ void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-enum ice_status
+int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1742,7 +1730,7 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1849,7 +1837,7 @@ ice_pkg_buf_alloc_single_section_err:
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-enum ice_status
+int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1874,7 +1862,7 @@ ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2193,7 +2181,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* if it is found. The ice_seg parameter must not be NULL since the first call
* to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
*/
-static enum ice_status
+static int
ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
struct ice_boost_tcam_entry **entry)
{
@@ -2212,7 +2200,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
ice_boost_tcam_handler);
if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
*entry = tcam;
- return ICE_SUCCESS;
+ return 0;
}
ice_seg = NULL;
@@ -2275,18 +2263,18 @@ void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
* or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values:
*
- * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * 0 - Means the caller has acquired the global config lock
* and can perform writing of the package.
* ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
* package or has found that no update was necessary; in
* this case, the caller can just skip performing any
* update of the package.
*/
-enum ice_status
+int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
- enum ice_status status;
+ int status;
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
@@ -2315,7 +2303,7 @@ void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -2344,13 +2332,13 @@ void ice_release_change_lock(struct ice_hw *hw)
*
* The function will get or set tx topology
*/
-static enum ice_status
+static int
ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
struct ice_sq_cd *cd, u8 *flags, bool set)
{
struct ice_aqc_get_set_tx_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_set_tx_topo;
if (set) {
@@ -2360,11 +2348,16 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
if (buf)
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+
+ if (!ice_is_e830(hw))
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
}
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
return status;
@@ -2372,7 +2365,7 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
if (!set && flags)
*flags = desc.params.get_set_tx_topo.set_flags;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2384,7 +2377,7 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*/
-enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
{
u8 *current_topo, *new_topo = NULL;
struct ice_run_time_cfg_seg *seg;
@@ -2392,8 +2385,8 @@ enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
struct ice_pkg_hdr *pkg_hdr;
enum ice_ddp_state state;
u16 i, size = 0, offset;
- enum ice_status status;
u32 reg = 0;
+ int status;
u8 flags;
if (!buf || !len)
@@ -2514,7 +2507,7 @@ update_topo:
/* Reset is in progress, re-init the hw again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
ice_check_reset(hw);
- return ICE_SUCCESS;
+ return 0;
}
/* set new topology */
@@ -2531,5 +2524,5 @@ update_topo:
/* CORER will clear the global lock, so no explicit call
* required for release
*/
- return ICE_SUCCESS;
+ return 0;
}
diff --git a/sys/dev/ice/ice_ddp_common.h b/sys/dev/ice/ice_ddp_common.h
index 9305dc83520d..b7dae1f526f0 100644
--- a/sys/dev/ice/ice_ddp_common.h
+++ b/sys/dev/ice/ice_ddp_common.h
@@ -134,6 +134,7 @@ struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
#define SEGMENT_TYPE_SIGNING 0x00001001
+#define SEGMENT_TYPE_ICE_E830 0x00000017
#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
@@ -413,26 +414,26 @@ struct ice_pkg_enum {
struct ice_hw;
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
void *
ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
-enum ice_status
+int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
-enum ice_status
+int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
-enum ice_status
+int
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
-enum ice_status
+int
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
void ice_release_global_cfg_lock(struct ice_hw *hw);
struct ice_generic_seg_hdr *
@@ -444,7 +445,7 @@ enum ice_ddp_state
ice_get_pkg_info(struct ice_hw *hw);
void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
-enum ice_status
+int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access);
@@ -473,6 +474,6 @@ ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
-enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
-#endif /* _ICE_DDP_COMMON_H_ */
+#endif /* _ICE_DDP_H_ */
diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h
index b5cbbfda6a3b..396f59b9d6d9 100644
--- a/sys/dev/ice/ice_devids.h
+++ b/sys/dev/ice/ice_devids.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2023, Intel Corporation
+/* Copyright (c) 2024, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,24 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
@@ -52,11 +70,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
-#define ICE_SUBDEV_ID_E810T3 0x02E9
-#define ICE_SUBDEV_ID_E810T4 0x02EA
-#define ICE_SUBDEV_ID_E810T5 0x0010
-#define ICE_SUBDEV_ID_E810T6 0x0012
-#define ICE_SUBDEV_ID_E810T7 0x0011
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
@@ -91,4 +109,12 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579C
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579D
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579E
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579F
#endif /* _ICE_DEVIDS_H_ */
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
index 8e1200e08a64..6f4d5f05edd0 100644
--- a/sys/dev/ice/ice_drv_info.h
+++ b/sys/dev/ice/ice_drv_info.h
@@ -62,16 +62,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
-const char ice_driver_version[] = "1.39.13-k";
+const char ice_driver_version[] = "1.42.1-k";
const uint8_t ice_major_version = 1;
-const uint8_t ice_minor_version = 39;
-const uint8_t ice_patch_version = 13;
+const uint8_t ice_minor_version = 42;
+const uint8_t ice_patch_version = 1;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
- PVID(vendor, devid, name " - 1.39.13-k")
+ PVID(vendor, devid, name " - 1.42.1-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
- PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.39.13-k")
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.42.1-k")
/**
* @var ice_vendor_info_array
@@ -190,6 +190,45 @@ static const pci_vendor_info_t ice_vendor_info_array[] = {
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
"Intel(R) Ethernet Controller E810-XXV for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_BACKPLANE,
+ "Intel(R) Ethernet Connection E830-CC for backplane"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E830-C-Q2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E830-CC-Q1 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56,
+ "Intel(R) Ethernet Connection E830-CC for QSFP56"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E830-XXV-2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E830-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E830-XXV-4 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_SFP,
+ "Intel(R) Ethernet Connection E830-CC for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_BACKPLANE,
+ "Intel(R) Ethernet Connection E830-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_QSFP,
+ "Intel(R) Ethernet Connection E830-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_SFP,
+ "Intel(R) Ethernet Connection E830-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_XXV_BACKPLANE,
+ "Intel(R) Ethernet Connection E830-XXV for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_XXV_QSFP,
+ "Intel(R) Ethernet Connection E830-XXV for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_XXV_SFP,
+ "Intel(R) Ethernet Connection E830-XXV for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE,
+ "Intel(R) Ethernet Connection E825-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP,
+ "Intel(R) Ethernet Connection E825-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP,
+ "Intel(R) Ethernet Connection E825-C for SFP"),
PVID_END
};
diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h
index 03b8c63af291..821abe4806ca 100644
--- a/sys/dev/ice/ice_features.h
+++ b/sys/dev/ice/ice_features.h
@@ -71,6 +71,8 @@ enum feat_list {
ICE_FEATURE_TX_BALANCE,
ICE_FEATURE_DUAL_NAC,
ICE_FEATURE_TEMP_SENSOR,
+ ICE_FEATURE_NEXT_CLUSTER_ID,
+ ICE_FEATURE_PHY_STATISTICS,
/* Must be last entry */
ICE_FEATURE_COUNT
};
@@ -89,6 +91,7 @@ enum feat_list {
static inline void
ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
{
+ ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
#ifndef DEV_NETMAP
ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
#endif
diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c
index f103e2aa6e71..683e23483b0a 100644
--- a/sys/dev/ice/ice_flex_pipe.c
+++ b/sys/dev/ice/ice_flex_pipe.c
@@ -193,7 +193,7 @@ void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
* ------------------------------
* Result: key: b01 10 11 11 00 00
*/
-static enum ice_status
+static int
ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
u8 *key_inv)
{
@@ -237,7 +237,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
in_key_inv >>= 1;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -295,7 +295,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-static enum ice_status
+static int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -324,7 +324,7 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
key + off + i, key + half_size + off + i))
return ICE_ERR_CFG;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -461,19 +461,19 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-enum ice_status
+int
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = ICE_ERR_MAX_LIMIT;
u16 index;
ice_acquire_lock(&hw->tnl_lock);
if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
hw->tnl.tbl[index].ref++;
- status = ICE_SUCCESS;
+ status = 0;
goto ice_create_tunnel_end;
}
@@ -548,11 +548,11 @@ ice_create_tunnel_end:
* targeting the specific updates requested and then performing an update
* package.
*/
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = ICE_ERR_MAX_LIMIT;
u16 count = 0;
u16 index;
u16 size;
@@ -563,7 +563,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
if (hw->tnl.tbl[index].ref > 1) {
hw->tnl.tbl[index].ref--;
- status = ICE_SUCCESS;
+ status = 0;
goto ice_destroy_tunnel_end;
}
@@ -649,9 +649,9 @@ ice_destroy_tunnel_end:
*
* Replays all tunnels
*/
-enum ice_status ice_replay_tunnels(struct ice_hw *hw)
+int ice_replay_tunnels(struct ice_hw *hw)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u16 i;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -699,7 +699,7 @@ enum ice_status ice_replay_tunnels(struct ice_hw *hw)
* @prot: variable to receive the protocol ID
* @off: variable to receive the protocol offset
*/
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
@@ -716,7 +716,7 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
*prot = fv_ext[fv_idx].prot_id;
*off = fv_ext[fv_idx].off;
- return ICE_SUCCESS;
+ return 0;
}
/* PTG Management */
@@ -729,11 +729,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
* This function will update the XLT1 hardware table to reflect the new
* packet type group configuration.
*/
-enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
{
struct ice_xlt1_section *sect;
struct ice_buf_build *bld;
- enum ice_status status;
+ int status;
u16 index;
bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
@@ -766,14 +766,14 @@ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-static enum ice_status
+static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
return ICE_ERR_PARAM;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -825,7 +825,7 @@ void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
-static enum ice_status
+static int
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
@@ -857,7 +857,7 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -872,11 +872,11 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-static enum ice_status
+static int
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
- enum ice_status status;
u8 original_ptg;
+ int status;
if (ptype > ICE_XLT1_CNT - 1)
return ICE_ERR_PARAM;
@@ -890,7 +890,7 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
/* Is ptype already in the correct PTG? */
if (original_ptg == ptg)
- return ICE_SUCCESS;
+ return 0;
/* Remove from original PTG and move back to the default PTG */
if (original_ptg != ICE_DEFAULT_PTG)
@@ -898,7 +898,7 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
/* Moving to default PTG? Then we're done with this request */
if (ptg == ICE_DEFAULT_PTG)
- return ICE_SUCCESS;
+ return 0;
/* Add ptype to PTG at beginning of list */
hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
@@ -909,7 +909,7 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
hw->blk[blk].xlt1.t[ptype] = ptg;
- return ICE_SUCCESS;
+ return 0;
}
/* Block / table size info */
@@ -1016,13 +1016,13 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
* This function will update the XLT2 hardware table with the input VSI
* group configuration.
*/
-static enum ice_status
+static int
ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
u16 vsig)
{
struct ice_xlt2_section *sect;
struct ice_buf_build *bld;
- enum ice_status status;
+ int status;
bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
ice_struct_size(sect, value, 1),
@@ -1049,14 +1049,14 @@ ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
* This function will update the XLT2 hardware table with the input VSI
* group configuration of used vsis.
*/
-enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
+int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
{
u16 vsi;
for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
/* update only vsis that have been changed */
if (hw->blk[blk].xlt2.vsis[vsi].changed) {
- enum ice_status status;
+ int status;
u16 vsig;
vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
@@ -1068,7 +1068,7 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1081,7 +1081,7 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
-enum ice_status
+int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
@@ -1093,7 +1093,7 @@ ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
*/
*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1150,7 +1150,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-static enum ice_status
+static int
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *chs, u16 *vsig)
{
@@ -1161,7 +1161,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
if (xlt2->vsig_tbl[i].in_use &&
ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_DOES_NOT_EXIST;
@@ -1176,7 +1176,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-static enum ice_status
+static int
ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
@@ -1224,7 +1224,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
*/
INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1237,7 +1237,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
-static enum ice_status
+static int
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
@@ -1253,7 +1253,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
- return ICE_SUCCESS;
+ return 0;
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
@@ -1280,7 +1280,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_cur->changed = 1;
vsi_cur->next_vsi = NULL;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1295,12 +1295,12 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-static enum ice_status
+static int
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
- enum ice_status status;
u16 orig_vsig, idx;
+ int status;
idx = vsig & ICE_VSIG_IDX_M;
@@ -1320,7 +1320,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* no update required if vsigs match */
if (orig_vsig == vsig)
- return ICE_SUCCESS;
+ return 0;
if (orig_vsig != ICE_DEFAULT_VSIG) {
/* remove entry from orig_vsig and add to default VSIG */
@@ -1330,7 +1330,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
}
if (idx == ICE_DEFAULT_VSIG)
- return ICE_SUCCESS;
+ return 0;
/* Create VSI entry and add VSIG and prop_mask values */
hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
@@ -1343,7 +1343,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
hw->blk[blk].xlt2.t[vsi] = vsig;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1353,7 +1353,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* @fv: field vector to search for
* @prof_id: receives the profile ID
*/
-static enum ice_status
+static int
ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
{
@@ -1368,7 +1368,7 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
continue;
*prof_id = i;
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_DOES_NOT_EXIST;
@@ -1424,7 +1424,7 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
-static enum ice_status
+static int
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
@@ -1444,7 +1444,7 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
*
* This function frees an entry in a Profile ID TCAM for a specific block.
*/
-static enum ice_status
+static int
ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
{
u16 res_type;
@@ -1464,12 +1464,12 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
* This function allocates a new profile ID, which also corresponds to a Field
* Vector (Extraction Sequence) entry.
*/
-static enum ice_status
+static int
ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
{
- enum ice_status status;
u16 res_type;
u16 get_prof;
+ int status;
if (!ice_prof_id_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
@@ -1489,7 +1489,7 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
*
* This function frees a profile ID, which also corresponds to a Field Vector.
*/
-static enum ice_status
+static int
ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
u16 tmp_prof_id = (u16)prof_id;
@@ -1507,7 +1507,7 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to increment the reference count
*/
-static enum ice_status
+static int
ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
@@ -1515,7 +1515,7 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
hw->blk[blk].es.ref_count[prof_id]++;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1548,7 +1548,7 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to decrement the reference count
*/
-static enum ice_status
+static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
@@ -1561,7 +1561,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/* Block / table section IDs */
@@ -1811,7 +1811,7 @@ void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+int ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
@@ -1916,7 +1916,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
goto err;
}
- return ICE_SUCCESS;
+ return 0;
err:
ice_free_hw_tbls(hw);
@@ -2143,7 +2143,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
* @nm_msk: never match mask
* @key: output of profile ID key
*/
-static enum ice_status
+static int
ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -2199,7 +2199,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
* @dc_msk: don't care mask
* @nm_msk: never match mask
*/
-static enum ice_status
+static int
ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -2207,7 +2207,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
{
struct ice_prof_tcam_entry;
- enum ice_status status;
+ int status;
status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
@@ -2226,7 +2226,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
* @vsig: VSIG to query
* @refs: pointer to variable to receive the reference count
*/
-static enum ice_status
+static int
ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
@@ -2243,7 +2243,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
ptr = ptr->next_vsi;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2276,7 +2276,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
{
@@ -2307,7 +2307,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
ICE_NONDMA_TO_NONDMA);
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2317,7 +2317,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
{
@@ -2348,7 +2348,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
ICE_NONDMA_TO_NONDMA);
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2357,7 +2357,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct LIST_HEAD_TYPE *chgs)
{
@@ -2383,7 +2383,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
p->value[0] = tmp->ptg;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2392,7 +2392,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct LIST_HEAD_TYPE *chgs)
{
@@ -2425,7 +2425,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2434,18 +2434,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
* @blk: hardware block
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *chgs)
{
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
- enum ice_status status;
u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ int status;
u16 sects;
/* count number of sections we need */
@@ -2472,7 +2472,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
sects = xlt1 + xlt2 + tcam + es;
if (!sects)
- return ICE_SUCCESS;
+ return 0;
/* Build update package buffer */
b = ice_pkg_buf_alloc(hw);
@@ -2541,13 +2541,13 @@ error_tmp:
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
ice_bitmap_t *ptypes, struct ice_fv_word *es)
{
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- enum ice_status status;
+ int status;
u8 prof_id;
u16 ptype;
@@ -2602,7 +2602,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
}
LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
- status = ICE_SUCCESS;
+ status = 0;
err_ice_add_prof:
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
@@ -2640,17 +2640,17 @@ ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
* @id: profile tracking ID
* @cntxt: context
*/
-enum ice_status
+int
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *entry;
+ int status = ICE_ERR_DOES_NOT_EXIST;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id(hw, blk, id);
if (entry) {
entry->context = cntxt;
- status = ICE_SUCCESS;
+ status = 0;
}
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
@@ -2663,17 +2663,17 @@ ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
* @id: profile tracking ID
* @cntxt: pointer to variable to receive the context
*/
-enum ice_status
+int
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *entry;
+ int status = ICE_ERR_DOES_NOT_EXIST;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id(hw, blk, id);
if (entry) {
*cntxt = entry->context;
- status = ICE_SUCCESS;
+ status = 0;
}
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
@@ -2704,14 +2704,14 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* @blk: hardware block
* @idx: the index to release
*/
-static enum ice_status
+static int
ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
{
/* Masks to invoke a never match entry */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status;
+ int status;
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
@@ -2731,11 +2731,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
* @blk: hardware block
* @prof: pointer to profile structure to remove
*/
-static enum ice_status
+static int
ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_vsig_prof *prof)
{
- enum ice_status status;
+ int status;
u16 i;
for (i = 0; i < prof->tcam_count; i++)
@@ -2747,7 +2747,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
return ICE_ERR_HW_TABLE;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2757,7 +2757,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
* @vsig: the VSIG to remove
* @chg: the change list
*/
-static enum ice_status
+static int
ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct LIST_HEAD_TYPE *chg)
{
@@ -2769,7 +2769,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
LIST_FOR_EACH_ENTRY_SAFE(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list) {
- enum ice_status status;
+ int status;
status = ice_rem_prof_id(hw, blk, d);
if (status)
@@ -2814,7 +2814,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @hdl: profile handle indicating which profile to remove
* @chg: list to receive a record of changes
*/
-static enum ice_status
+static int
ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
@@ -2825,7 +2825,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list)
if (p->profile_cookie == hdl) {
- enum ice_status status;
+ int status;
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
@@ -2848,12 +2848,12 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
* @blk: hardware block
* @id: profile tracking ID
*/
-static enum ice_status
+static int
ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- enum ice_status status;
struct LIST_HEAD_TYPE chg;
+ int status;
u16 i;
INIT_LIST_HEAD(&chg);
@@ -2889,10 +2889,10 @@ err_ice_rem_flow_all:
* previously created through ice_add_prof. If any existing entries
* are associated with this profile, they will be removed as well.
*/
-enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *pmap;
- enum ice_status status;
+ int status;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
@@ -2925,13 +2925,13 @@ err_ice_rem_prof:
* @hdl: profile handle
* @chg: change list
*/
-static enum ice_status
+static int
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_chs_chg *p;
+ int status = 0;
u16 i;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
@@ -2979,7 +2979,7 @@ err_ice_get_prof:
*
* This routine makes a copy of the list of profiles in the specified VSIG.
*/
-static enum ice_status
+static int
ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct LIST_HEAD_TYPE *lst)
{
@@ -2999,7 +2999,7 @@ ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
LIST_ADD_TAIL(&p->list, lst);
}
- return ICE_SUCCESS;
+ return 0;
err_ice_get_profs_vsig:
LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
@@ -3017,13 +3017,13 @@ err_ice_get_profs_vsig:
* @lst: the list to be added to
* @hdl: profile handle of entry to add
*/
-static enum ice_status
+static int
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *lst, u64 hdl)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
+ int status = 0;
u16 i;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
@@ -3064,13 +3064,13 @@ err_ice_add_prof_to_lst:
* @vsig: the VSIG to move the VSI to
* @chg: the change list
*/
-static enum ice_status
+static int
ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 orig_vsig;
+ int status;
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
if (!p)
@@ -3092,7 +3092,7 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
LIST_ADD(&p->list_entry, chg);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3124,13 +3124,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
*
* This function appends an enable or disable TCAM entry in the change log
*/
-static enum ice_status
+static int
ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
u16 vsig, struct ice_tcam_inf *tcam,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
+ int status;
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
@@ -3178,7 +3178,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* log change */
LIST_ADD(&p->list_entry, chg);
- return ICE_SUCCESS;
+ return 0;
err_ice_prof_tcam_ena_dis:
ice_free(hw, p);
@@ -3192,13 +3192,13 @@ err_ice_prof_tcam_ena_dis:
* @vsig: the VSIG for which to adjust profile priorities
* @chg: the change list
*/
-static enum ice_status
+static int
ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct LIST_HEAD_TYPE *chg)
{
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
- enum ice_status status = ICE_SUCCESS;
struct ice_vsig_prof *t;
+ int status = 0;
u16 idx;
ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
@@ -3265,7 +3265,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
-static enum ice_status
+static int
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
bool rev, struct LIST_HEAD_TYPE *chg)
{
@@ -3273,11 +3273,11 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
+ int status = 0;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
@@ -3371,13 +3371,13 @@ err_ice_add_prof_id_vsig:
* @hdl: the profile handle of the profile that will be added to the VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 new_vsig;
+ int status;
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
if (!p)
@@ -3404,7 +3404,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
LIST_ADD(&p->list_entry, chg);
- return ICE_SUCCESS;
+ return 0;
err_ice_create_prof_id_vsig:
/* let caller clean up the change list */
@@ -3421,13 +3421,13 @@ err_ice_create_prof_id_vsig:
* @new_vsig: return of new VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
struct LIST_HEAD_TYPE *chg)
{
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 vsig;
vsig = ice_vsig_alloc(hw, blk);
@@ -3448,7 +3448,7 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
*new_vsig = vsig;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3462,8 +3462,8 @@ static bool
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- enum ice_status status;
struct LIST_HEAD_TYPE lst;
+ int status;
INIT_LIST_HEAD(&lst);
@@ -3479,7 +3479,7 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
LIST_DEL(&t->list);
ice_free(hw, t);
- return status == ICE_SUCCESS;
+ return !status;
}
/**
@@ -3496,12 +3496,12 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
* save time in generating a new VSIG and TCAMs till a match is
* found and subsequent rollback when a matching VSIG is found.
*/
-enum ice_status
+int
ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_chs_chg *tmp, *del;
struct LIST_HEAD_TYPE chg;
- enum ice_status status;
+ int status;
/* if target VSIG is default the move is invalid */
if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
@@ -3534,14 +3534,14 @@ ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct LIST_HEAD_TYPE union_lst;
- enum ice_status status;
struct LIST_HEAD_TYPE chg;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -3681,21 +3681,21 @@ err_ice_add_prof_id_flow:
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id)
{
u16 i;
for (i = 0; i < count; i++) {
- enum ice_status status;
+ int status;
status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3704,7 +3704,7 @@ ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
* @lst: list to remove the profile from
* @hdl: the profile handle indicating the profile to remove
*/
-static enum ice_status
+static int
ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
{
struct ice_vsig_prof *ent, *tmp;
@@ -3713,7 +3713,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
if (ent->profile_cookie == hdl) {
LIST_DEL(&ent->list);
ice_free(hw, ent);
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_DOES_NOT_EXIST;
@@ -3730,13 +3730,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct LIST_HEAD_TYPE chg, copy;
- enum ice_status status;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&copy);
@@ -3864,19 +3864,19 @@ err_ice_rem_prof_id_flow:
* using ice_add_flow. The ID value will indicated which profile will be
* removed. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id)
{
u16 i;
for (i = 0; i < count; i++) {
- enum ice_status status;
+ int status;
status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
diff --git a/sys/dev/ice/ice_flex_pipe.h b/sys/dev/ice/ice_flex_pipe.h
index 559905e7fb59..3351b2601e5e 100644
--- a/sys/dev/ice/ice_flex_pipe.h
+++ b/sys/dev/ice/ice_flex_pipe.h
@@ -34,10 +34,10 @@
#include "ice_type.h"
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
-enum ice_status
+int
ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
u16 *value);
void
@@ -45,54 +45,54 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
ice_bitmap_t *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *port);
-enum ice_status
+int
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
bool
ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type);
-enum ice_status ice_replay_tunnels(struct ice_hw *hw);
+int ice_replay_tunnels(struct ice_hw *hw);
/* XLT1/PType group functions */
-enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
+int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
/* XLT2/VSI group functions */
-enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
-enum ice_status
+int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
+int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
ice_bitmap_t *ptypes, struct ice_fv_word *es);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
-enum ice_status
+int
ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
-enum ice_status
+int
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+int ice_init_hw_tbls(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
-enum ice_status
+int
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
-enum ice_status
+int
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
-enum ice_status
+int
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
void ice_fill_blk_tbls(struct ice_hw *hw);
diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c
index c04f86445767..a475833aef60 100644
--- a/sys/dev/ice/ice_flow.c
+++ b/sys/dev/ice/ice_flow.c
@@ -417,8 +417,7 @@ struct ice_flow_prof_params {
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
*/
-static enum ice_status
-ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
@@ -434,7 +433,7 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -444,7 +443,7 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
* This function identifies the packet types associated with the protocol
* headers being present in packet segments of the specified flow profile.
*/
-static enum ice_status
+static int
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof;
@@ -544,10 +543,10 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
}
}
- return ICE_SUCCESS;
+ return 0;
}
-/**
+/*
* ice_flow_xtract_fld - Create an extraction sequence entry for the given field
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
@@ -558,7 +557,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* field. It then allocates one or more extraction sequence entries for the
* given field, and fill the entries with protocol ID and offset information.
*/
-static enum ice_status
+static int
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld)
{
@@ -704,7 +703,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
off += ICE_FLOW_FV_EXTRACT_SZ;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -715,11 +714,11 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* This function iterates through all matched fields in the given segments, and
* creates an extraction sequence for the fields.
*/
-static enum ice_status
+static int
ice_flow_create_xtrct_seq(struct ice_hw *hw,
struct ice_flow_prof_params *params)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 i;
for (i = 0; i < params->prof->segs_cnt; i++) {
@@ -744,10 +743,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
*/
-static enum ice_status
+static int
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ int status;
status = ice_flow_proc_seg_hdrs(params);
if (status)
@@ -759,7 +758,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
switch (params->blk) {
case ICE_BLK_RSS:
- status = ICE_SUCCESS;
+ status = 0;
break;
default:
return ICE_ERR_NOT_IMPL;
@@ -869,18 +868,18 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
* @prof_id: the profile ID handle
* @hw_prof_id: pointer to variable to receive the HW profile ID
*/
-enum ice_status
+int
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof_id)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *map;
+ int status = ICE_ERR_DOES_NOT_EXIST;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, prof_id);
if (map) {
*hw_prof_id = map->prof_id;
- status = ICE_SUCCESS;
+ status = 0;
}
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
@@ -900,7 +899,7 @@ ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
struct ice_flow_seg_info *segs, u8 segs_cnt,
@@ -908,7 +907,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
- enum ice_status status;
+ int status;
u8 i;
if (!prof || (acts_cnt && !acts))
@@ -976,11 +975,11 @@ free_params:
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status;
+ int status;
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
@@ -1003,11 +1002,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
* be added has the same characteristics as the VSIG and will
* thereby have access to all resources added to that VSIG.
*/
-enum ice_status
+int
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
return ICE_ERR_PARAM;
@@ -1030,11 +1029,11 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
status = ice_add_prof_id_flow(hw, blk,
@@ -1061,11 +1060,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (ice_is_bit_set(prof->vsis, vsi_handle)) {
status = ice_rem_prof_id_flow(hw, blk,
@@ -1094,13 +1093,13 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @acts_cnt: number of default actions
* @prof: stores the returned flow profile added
*/
-static enum ice_status
+static int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof)
{
- enum ice_status status;
+ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
return ICE_ERR_MAX_LIMIT;
@@ -1133,11 +1132,11 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-static enum ice_status
+static int
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
ice_acquire_lock(&hw->fl_profs_locks[blk]);
@@ -1275,7 +1274,7 @@ ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
-static enum ice_status
+static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
const struct ice_rss_hash_cfg *cfg)
{
@@ -1323,7 +1322,7 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
if (val && !ice_is_pow2(val))
return ICE_ERR_CFG;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1360,18 +1359,18 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
* the VSI from that profile. If the flow profile has no VSIs it will
* be removed.
*/
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u16 vsig;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
if (LIST_EMPTY(&hw->fl_profs[blk]))
- return ICE_SUCCESS;
+ return 0;
ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
@@ -1477,7 +1476,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
@@ -1497,7 +1496,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
r->hash.hdr_type == hdr_type) {
ice_set_bit(vsi_handle, r->vsis);
- return ICE_SUCCESS;
+ return 0;
}
rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
@@ -1512,7 +1511,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
- return ICE_SUCCESS;
+ return 0;
}
#define ICE_FLOW_PROF_HASH_S 0
@@ -1545,15 +1544,15 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status;
u8 segs_cnt;
+ int status;
if (cfg->symm)
return ICE_ERR_PARAM;
@@ -1657,12 +1656,12 @@ exit:
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
-enum ice_status
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
@@ -1695,15 +1694,15 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
- enum ice_status status;
u8 segs_cnt;
+ int status;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE :
@@ -1755,12 +1754,12 @@ out:
* removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
@@ -1827,11 +1826,10 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_rss_hash_cfg hcfg;
+ int status = 0;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
@@ -1920,10 +1918,10 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
*/
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_rss_cfg *r;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
diff --git a/sys/dev/ice/ice_flow.h b/sys/dev/ice/ice_flow.h
index 31c369c144e0..45952245d533 100644
--- a/sys/dev/ice/ice_flow.h
+++ b/sys/dev/ice/ice_flow.h
@@ -330,24 +330,24 @@ struct ice_flow_action {
u64
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt);
-enum ice_status
+int
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
-enum ice_status
+int
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof);
void
ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 prefix_loc, u8 prefix_sz);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c
index a8caf65aca6d..8e52e34b2752 100644
--- a/sys/dev/ice/ice_fw_logging.c
+++ b/sys/dev/ice/ice_fw_logging.c
@@ -79,7 +79,7 @@ static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS);
static int
ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg)
{
- enum ice_status status;
+ int status;
ice_fwlog_init(&sc->hw, cfg);
@@ -223,7 +223,7 @@ ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
- enum ice_status status;
+ int status;
int error;
u8 enabled;
diff --git a/sys/dev/ice/ice_fwlog.c b/sys/dev/ice/ice_fwlog.c
index c3c6d9101627..07ca94ee003d 100644
--- a/sys/dev/ice/ice_fwlog.c
+++ b/sys/dev/ice/ice_fwlog.c
@@ -29,6 +29,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "ice_osdep.h"
#include "ice_common.h"
#include "ice_fwlog.h"
@@ -120,7 +121,7 @@ static bool valid_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
* ice_init_hw(). Firmware logging will be configured based on these settings
* and also the PF will be registered on init.
*/
-enum ice_status
+int
ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
if (!valid_cfg(hw, cfg))
@@ -128,7 +129,7 @@ ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
cache_cfg(hw, cfg);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -139,14 +140,14 @@ ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
* @options: options from ice_fwlog_cfg->options structure
* @log_resolution: logging resolution
*/
-static enum ice_status
+static int
ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
u16 num_entries, u16 options, u16 log_resolution)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 i;
fw_modules = (struct ice_aqc_fw_log_cfg_resp *)
@@ -208,10 +209,10 @@ bool ice_fwlog_supported(struct ice_hw *hw)
* ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
* for init.
*/
-enum ice_status
+int
ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -268,13 +269,13 @@ update_cached_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
* Only the entries passed in will be affected. All other firmware logging
* settings will be unaffected.
*/
-enum ice_status
+int
ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries,
u16 num_entries)
{
struct ice_fwlog_cfg *cfg;
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -305,7 +306,7 @@ status_out:
* @hw: pointer to the HW structure
* @reg: true to register and false to unregister
*/
-static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
+static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
{
struct ice_aq_desc desc;
@@ -324,9 +325,9 @@ static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
* After this call the PF will start to receive firmware logging based on the
* configuration set in ice_fwlog_set.
*/
-enum ice_status ice_fwlog_register(struct ice_hw *hw)
+int ice_fwlog_register(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -344,9 +345,9 @@ enum ice_status ice_fwlog_register(struct ice_hw *hw)
* ice_fwlog_unregister - Unregister the PF from firmware logging
* @hw: pointer to the HW structure
*/
-enum ice_status ice_fwlog_unregister(struct ice_hw *hw)
+int ice_fwlog_unregister(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -365,14 +366,14 @@ enum ice_status ice_fwlog_unregister(struct ice_hw *hw)
* @hw: pointer to the HW structure
* @cfg: firmware logging configuration to populate
*/
-static enum ice_status
+static int
ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 i, module_id_cnt;
+ int status;
void *buf;
ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
@@ -438,7 +439,7 @@ status_out:
void ice_fwlog_set_support_ena(struct ice_hw *hw)
{
struct ice_fwlog_cfg *cfg;
- enum ice_status status;
+ int status;
hw->fwlog_support_ena = false;
@@ -465,10 +466,10 @@ void ice_fwlog_set_support_ena(struct ice_hw *hw)
* @hw: pointer to the HW structure
* @cfg: config to populate based on current firmware logging settings
*/
-enum ice_status
+int
ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -482,7 +483,7 @@ ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
cache_cfg(hw, cfg);
- return ICE_SUCCESS;
+ return 0;
}
/**
diff --git a/sys/dev/ice/ice_fwlog.h b/sys/dev/ice/ice_fwlog.h
index 41e41de1e670..4cf5b678049d 100644
--- a/sys/dev/ice/ice_fwlog.h
+++ b/sys/dev/ice/ice_fwlog.h
@@ -76,15 +76,15 @@ struct ice_fwlog_cfg {
void ice_fwlog_set_support_ena(struct ice_hw *hw);
bool ice_fwlog_supported(struct ice_hw *hw);
-enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-enum ice_status
+int ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int
ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries,
u16 num_entries);
-enum ice_status ice_fwlog_register(struct ice_hw *hw);
-enum ice_status ice_fwlog_unregister(struct ice_hw *hw);
+int ice_fwlog_register(struct ice_hw *hw);
+int ice_fwlog_unregister(struct ice_hw *hw);
void
ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
#endif /* _ICE_FWLOG_H_ */
diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h
index 8e59ebc76835..47256263d66c 100644
--- a/sys/dev/ice/ice_hw_autogen.h
+++ b/sys/dev/ice/ice_hw_autogen.h
@@ -34,6 +34,12 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
+#define PRTMAC_CTL_TX_PAUSE_ENABLE_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE)
+#define PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S)
+#define PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M)
+#define PRTMAC_CTL_RX_PAUSE_ENABLE_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE)
+#define PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S)
+#define PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M)
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
@@ -43,6 +49,7 @@
#define GLNVM_FLA 0x000B6108
#define GL_HIDA_MAX_INDEX 15
#define GL_HIBA_MAX_INDEX 1023
+#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */
#define GL_RDPU_CNTRL_RX_PAD_EN_S 0
#define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0)
@@ -57,9 +64,15 @@
#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_S 10
#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 10)
#define GL_RDPU_CNTRL_REQ_WB_PM_TH_S 16
-#define GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16)
-#define GL_RDPU_CNTRL_ECO_S 21
-#define GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21)
+#define GL_RDPU_CNTRL_REQ_WB_PM_TH_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_REQ_WB_PM_TH_M : E800_GL_RDPU_CNTRL_REQ_WB_PM_TH_M)
+#define E800_GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16)
+#define E830_GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x3F, 16)
+#define GL_RDPU_CNTRL_ECO_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_ECO_S : E800_GL_RDPU_CNTRL_ECO_S)
+#define E800_GL_RDPU_CNTRL_ECO_S 21
+#define E830_GL_RDPU_CNTRL_ECO_S 23
+#define GL_RDPU_CNTRL_ECO_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_ECO_M : E800_GL_RDPU_CNTRL_ECO_M)
+#define E800_GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21)
+#define E830_GL_RDPU_CNTRL_ECO_M MAKEMASK(0x1FF, 23)
#define MSIX_PBA(_i) (0x00008000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: FLR */
#define MSIX_PBA_MAX_INDEX 2
#define MSIX_PBA_PENBIT_S 0
@@ -456,9 +469,11 @@
#define PF0INT_OICR_CPM_PAGE_QUEUE_S 1
#define PF0INT_OICR_CPM_PAGE_QUEUE_M BIT(1)
#define PF0INT_OICR_CPM_PAGE_RSV1_S 2
-#define PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_CPM_PAGE_HH_COMP_S 10
-#define PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_CPM_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_CPM_PAGE_RSV1_M : E800_PF0INT_OICR_CPM_PAGE_RSV1_M)
+#define E800_PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_CPM_PAGE_HH_COMP_S 10
+#define E800_PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10)
#define PF0INT_OICR_CPM_PAGE_TSYN_TX_S 11
#define PF0INT_OICR_CPM_PAGE_TSYN_TX_M BIT(11)
#define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_S 12
@@ -520,9 +535,11 @@
#define PF0INT_OICR_HLP_PAGE_QUEUE_S 1
#define PF0INT_OICR_HLP_PAGE_QUEUE_M BIT(1)
#define PF0INT_OICR_HLP_PAGE_RSV1_S 2
-#define PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_HLP_PAGE_HH_COMP_S 10
-#define PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_HLP_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_HLP_PAGE_RSV1_M : E800_PF0INT_OICR_HLP_PAGE_RSV1_M)
+#define E800_PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_HLP_PAGE_HH_COMP_S 10
+#define E800_PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10)
#define PF0INT_OICR_HLP_PAGE_TSYN_TX_S 11
#define PF0INT_OICR_HLP_PAGE_TSYN_TX_M BIT(11)
#define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_S 12
@@ -569,9 +586,11 @@
#define PF0INT_OICR_PSM_PAGE_QUEUE_S 1
#define PF0INT_OICR_PSM_PAGE_QUEUE_M BIT(1)
#define PF0INT_OICR_PSM_PAGE_RSV1_S 2
-#define PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_PSM_PAGE_HH_COMP_S 10
-#define PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_PSM_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_PSM_PAGE_RSV1_M : E800_PF0INT_OICR_PSM_PAGE_RSV1_M)
+#define E800_PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_PSM_PAGE_HH_COMP_S 10
+#define E800_PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10)
#define PF0INT_OICR_PSM_PAGE_TSYN_TX_S 11
#define PF0INT_OICR_PSM_PAGE_TSYN_TX_M BIT(11)
#define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_S 12
@@ -620,10 +639,10 @@
#define QTX_COMM_DBELL_PAGE_MAX_INDEX 16383
#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_S 0
#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
-#define QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */
-#define QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255
-#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0
-#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0)
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0)
#define VSI_MBX_ARQBAH(_VSI) (0x02000018 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSI_MBX_ARQBAH_MAX_INDEX 767
#define VSI_MBX_ARQBAH_ARQBAH_S 0
@@ -2026,18 +2045,18 @@
#define GLTPB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0)
#define GLTPB_WB_RL_EN_S 16
#define GLTPB_WB_RL_EN_M BIT(16)
-#define PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */
-#define PRTDCB_FCCFG_TFCE_S 3
-#define PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3)
-#define PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */
-#define PRTDCB_FCRTV_FC_REFRESH_TH_S 0
-#define PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0)
-#define PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */
-#define PRTDCB_FCTTVN_MAX_INDEX 3
-#define PRTDCB_FCTTVN_TTV_2N_S 0
-#define PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0)
-#define PRTDCB_FCTTVN_TTV_2N_P1_S 16
-#define PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16)
+#define E800_PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */
+#define E800_PRTDCB_FCCFG_TFCE_S 3
+#define E800_PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3)
+#define E800_PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */
+#define E800_PRTDCB_FCRTV_FC_REFRESH_TH_S 0
+#define E800_PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */
+#define E800_PRTDCB_FCTTVN_MAX_INDEX 3
+#define E800_PRTDCB_FCTTVN_TTV_2N_S 0
+#define E800_PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTDCB_FCTTVN_TTV_2N_P1_S 16
+#define E800_PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16)
#define PRTDCB_GENC 0x00083000 /* Reset Source: CORER */
#define PRTDCB_GENC_NUMTC_S 2
#define PRTDCB_GENC_NUMTC_M MAKEMASK(0xF, 2)
@@ -2403,214 +2422,222 @@
#define TPB_WB_RL_TC_STAT_MAX_INDEX 31
#define TPB_WB_RL_TC_STAT_BUCKET_S 0
#define TPB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0)
-#define GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2
-#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0
-#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
-#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8
-#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
-#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16
-#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
-#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24
-#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
-#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30
-#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
-#define GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
-#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0
-#define GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2
-#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
-#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2
+#define E800_GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0
+#define E800_GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define E800_GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8
+#define E800_GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define E800_GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30
+#define E800_GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0
+#define E800_GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2
+#define E800_GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
+#define E800_GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
#define GL_ACLEXT_DFLT_L2PRFL_ACL(_i) (0x00393800 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_ACLEXT_DFLT_L2PRFL_ACL_MAX_INDEX 2
#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_S 0
#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
-#define GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
-#define GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2
-#define GL_ACLEXT_FLGS_L1TBL_LSB_S 0
-#define GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_FLGS_L1TBL_MSB_S 16
-#define GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
-#define GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
-#define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FORCE_PID_MAX_INDEX 2
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
-#define GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
-#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_K2N_L2DATA_DATA0_S 0
-#define GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_K2N_L2DATA_DATA1_S 8
-#define GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_K2N_L2DATA_DATA2_S 16
-#define GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_K2N_L2DATA_DATA3_S 24
-#define GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_PMASK0_MAX_INDEX 2
-#define GL_ACLEXT_L2_PMASK0_BITMASK_S 0
-#define GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_PMASK1_MAX_INDEX 2
-#define GL_ACLEXT_L2_PMASK1_BITMASK_S 0
-#define GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_TMASK0_MAX_INDEX 2
-#define GL_ACLEXT_L2_TMASK0_BITMASK_S 0
-#define GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_TMASK1_MAX_INDEX 2
-#define GL_ACLEXT_L2_TMASK1_BITMASK_S 0
-#define GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2BMP0_3_MAX_INDEX 2
-#define GL_ACLEXT_L2BMP0_3_BMP0_S 0
-#define GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_L2BMP0_3_BMP1_S 8
-#define GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_L2BMP0_3_BMP2_S 16
-#define GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_L2BMP0_3_BMP3_S 24
-#define GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2BMP4_7_MAX_INDEX 2
-#define GL_ACLEXT_L2BMP4_7_BMP4_S 0
-#define GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_L2BMP4_7_BMP5_S 8
-#define GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_L2BMP4_7_BMP6_S 16
-#define GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_L2BMP4_7_BMP7_S 24
-#define GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2PRTMOD_MAX_INDEX 2
-#define GL_ACLEXT_L2PRTMOD_XLT1_S 0
-#define GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
-#define GL_ACLEXT_L2PRTMOD_XLT2_S 8
-#define GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
-#define GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
-#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_N2N_L2DATA_DATA0_S 0
-#define GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_N2N_L2DATA_DATA1_S 8
-#define GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_N2N_L2DATA_DATA2_S 16
-#define GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_N2N_L2DATA_DATA3_S 24
-#define GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2
-#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
-#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2
-#define GL_ACLEXT_P2P_L1DATA_DATA_S 0
-#define GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2
-#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
-#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
-#define GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_PLVL_SEL_MAX_INDEX 2
-#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0
-#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
-#define GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
-#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2
-#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0
-#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2
-#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0
-#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2
-#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2
-#define GL_ACLEXT_XLT0_L1DATA_DATA_S 0
-#define GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
-#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_XLT1_L2DATA_DATA_S 0
-#define GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
-#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_XLT2_L2DATA_DATA_S 0
-#define GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define E800_GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2
+#define E800_GL_ACLEXT_FLGS_L1TBL_LSB_S 0
+#define E800_GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_FLGS_L1TBL_MSB_S 16
+#define E800_GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
+#define E800_GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
+#define E800_GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FORCE_PID_MAX_INDEX 2
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
+#define E800_GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
+#define E800_GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA0_S 0
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA1_S 8
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA2_S 16
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA3_S 24
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_PMASK0_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_PMASK0_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_PMASK1_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_PMASK1_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_TMASK0_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_TMASK0_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_TMASK1_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_TMASK1_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2BMP0_3_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2BMP0_3_BMP0_S 0
+#define E800_GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_L2BMP0_3_BMP1_S 8
+#define E800_GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_L2BMP0_3_BMP2_S 16
+#define E800_GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_L2BMP0_3_BMP3_S 24
+#define E800_GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2BMP4_7_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2BMP4_7_BMP4_S 0
+#define E800_GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_L2BMP4_7_BMP5_S 8
+#define E800_GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_L2BMP4_7_BMP6_S 16
+#define E800_GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_L2BMP4_7_BMP7_S 24
+#define E800_GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2PRTMOD_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2PRTMOD_XLT1_S 0
+#define E800_GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
+#define E800_GL_ACLEXT_L2PRTMOD_XLT2_S 8
+#define E800_GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
+#define E800_GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
+#define E800_GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA0_S 0
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA1_S 8
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA2_S 16
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA3_S 24
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
+#define E800_GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_P2P_L1DATA_DATA_S 0
+#define E800_GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2
+#define E800_GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
+#define E800_GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
+#define E800_GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_PLVL_SEL_MAX_INDEX 2
+#define E800_GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0
+#define E800_GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
+#define E800_GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
+#define E800_GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2
+#define E800_GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0
+#define E800_GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT0_L1DATA_DATA_S 0
+#define E800_GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
+#define E800_GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT1_L2DATA_DATA_S 0
+#define E800_GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
+#define E800_GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT2_L2DATA_DATA_S 0
+#define E800_GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
#define GL_PREEXT_CDMD_L1SEL(_i) (0x0020F054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_CDMD_L1SEL_MAX_INDEX 2
#define GL_PREEXT_CDMD_L1SEL_RX_SEL_S 0
-#define GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_PREEXT_CDMD_L1SEL_RX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_RX_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_RX_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define E830_GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x3F, 0)
#define GL_PREEXT_CDMD_L1SEL_TX_SEL_S 8
-#define GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_PREEXT_CDMD_L1SEL_TX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_TX_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_TX_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define E830_GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x3F, 8)
#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_S 16
-#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define E830_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x3F, 16)
#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_S 24
-#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define E830_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x3F, 24)
#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_S 30
#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
#define GL_PREEXT_CTLTBL_L2ADDR(_i) (0x0020F084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
@@ -2632,15 +2659,23 @@
#define GL_PREEXT_FLGS_L1SEL0_1(_i) (0x0020F06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FLGS_L1SEL0_1_MAX_INDEX 2
#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_S 0
-#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M : E800_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M)
+#define E800_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x3FF, 0)
#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_S 16
-#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M : E800_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M)
+#define E800_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x3FF, 16)
#define GL_PREEXT_FLGS_L1SEL2_3(_i) (0x0020F078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FLGS_L1SEL2_3_MAX_INDEX 2
#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_S 0
-#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M : E800_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M)
+#define E800_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x3FF, 0)
#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_S 16
-#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M : E800_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M)
+#define E800_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x3FF, 16)
#define GL_PREEXT_FLGS_L1TBL(_i) (0x0020F060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FLGS_L1TBL_MAX_INDEX 2
#define GL_PREEXT_FLGS_L1TBL_LSB_S 0
@@ -2798,13 +2833,21 @@
#define GL_PSTEXT_CDMD_L1SEL(_i) (0x0020E054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_CDMD_L1SEL_MAX_INDEX 2
#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_S 0
-#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define E830_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x3F, 0)
#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_S 8
-#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define E830_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x3F, 8)
#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_S 16
-#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define E830_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x3F, 16)
#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_S 24
-#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define E830_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x3F, 24)
#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_S 30
#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
#define GL_PSTEXT_CTLTBL_L2ADDR(_i) (0x0020E084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
@@ -2834,15 +2877,23 @@
#define GL_PSTEXT_FLGS_L1SEL0_1(_i) (0x0020E06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FLGS_L1SEL0_1_MAX_INDEX 2
#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_S 0
-#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M : E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x3FF, 0)
#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_S 16
-#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M : E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x3FF, 16)
#define GL_PSTEXT_FLGS_L1SEL2_3(_i) (0x0020E078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FLGS_L1SEL2_3_MAX_INDEX 2
#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_S 0
-#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M : E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x3FF, 0)
#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_S 16
-#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M : E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x3FF, 16)
#define GL_PSTEXT_FLGS_L1TBL(_i) (0x0020E060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FLGS_L1TBL_MAX_INDEX 2
#define GL_PSTEXT_FLGS_L1TBL_LSB_S 0
@@ -4424,11 +4475,11 @@
#define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_S 16
#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
-#define GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */
-#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0
-#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
-#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16
-#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E800_GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
#define GLTPB_PACING_10G 0x000994E4 /* Reset Source: CORER */
#define GLTPB_PACING_10G_N_S 0
#define GLTPB_PACING_10G_N_M MAKEMASK(0xFF, 0)
@@ -4484,8 +4535,8 @@
#define GL_UFUSE_SOC_SOC_TYPE_M BIT(10)
#define GL_UFUSE_SOC_BTS_MODE_S 11
#define GL_UFUSE_SOC_BTS_MODE_M BIT(11)
-#define GL_UFUSE_SOC_SPARE_FUSES_S 12
-#define GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12)
+#define E800_GL_UFUSE_SOC_SPARE_FUSES_S 12
+#define E800_GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12)
#define EMPINT_GPIO_ENA 0x000880C0 /* Reset Source: POR */
#define EMPINT_GPIO_ENA_GPIO0_ENA_S 0
#define EMPINT_GPIO_ENA_GPIO0_ENA_M BIT(0)
@@ -4572,7 +4623,9 @@
#define GLINT_TSYN_PFMSTR_PF_MASTER_M MAKEMASK(0x7, 0)
#define GLINT_TSYN_PHY 0x0016CC50 /* Reset Source: CORER */
#define GLINT_TSYN_PHY_PHY_INDX_S 0
-#define GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define GLINT_TSYN_PHY_PHY_INDX_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLINT_TSYN_PHY_PHY_INDX_M : E800_GLINT_TSYN_PHY_PHY_INDX_M)
+#define E800_GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define E830_GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0xFF, 0)
#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define GLINT_VECT2FUNC_MAX_INDEX 2047
#define GLINT_VECT2FUNC_VF_NUM_S 0
@@ -4632,9 +4685,11 @@
#define PF0INT_OICR_CPM_QUEUE_S 1
#define PF0INT_OICR_CPM_QUEUE_M BIT(1)
#define PF0INT_OICR_CPM_RSV1_S 2
-#define PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_CPM_HH_COMP_S 10
-#define PF0INT_OICR_CPM_HH_COMP_M BIT(10)
+#define PF0INT_OICR_CPM_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_CPM_RSV1_M : E800_PF0INT_OICR_CPM_RSV1_M)
+#define E800_PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_CPM_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_CPM_HH_COMP_S 10
+#define E800_PF0INT_OICR_CPM_HH_COMP_M BIT(10)
#define PF0INT_OICR_CPM_TSYN_TX_S 11
#define PF0INT_OICR_CPM_TSYN_TX_M BIT(11)
#define PF0INT_OICR_CPM_TSYN_EVNT_S 12
@@ -4723,9 +4778,11 @@
#define PF0INT_OICR_HLP_QUEUE_S 1
#define PF0INT_OICR_HLP_QUEUE_M BIT(1)
#define PF0INT_OICR_HLP_RSV1_S 2
-#define PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_HLP_HH_COMP_S 10
-#define PF0INT_OICR_HLP_HH_COMP_M BIT(10)
+#define PF0INT_OICR_HLP_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_HLP_RSV1_M : E800_PF0INT_OICR_HLP_RSV1_M)
+#define E800_PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_HLP_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_HLP_HH_COMP_S 10
+#define E800_PF0INT_OICR_HLP_HH_COMP_M BIT(10)
#define PF0INT_OICR_HLP_TSYN_TX_S 11
#define PF0INT_OICR_HLP_TSYN_TX_M BIT(11)
#define PF0INT_OICR_HLP_TSYN_EVNT_S 12
@@ -4772,9 +4829,11 @@
#define PF0INT_OICR_PSM_QUEUE_S 1
#define PF0INT_OICR_PSM_QUEUE_M BIT(1)
#define PF0INT_OICR_PSM_RSV1_S 2
-#define PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_PSM_HH_COMP_S 10
-#define PF0INT_OICR_PSM_HH_COMP_M BIT(10)
+#define PF0INT_OICR_PSM_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_PSM_RSV1_M : E800_PF0INT_OICR_PSM_RSV1_M)
+#define E800_PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_PSM_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_PSM_HH_COMP_S 10
+#define E800_PF0INT_OICR_PSM_HH_COMP_M BIT(10)
#define PF0INT_OICR_PSM_TSYN_TX_S 11
#define PF0INT_OICR_PSM_TSYN_TX_M BIT(11)
#define PF0INT_OICR_PSM_TSYN_EVNT_S 12
@@ -4895,9 +4954,11 @@
#define PFINT_OICR_QUEUE_S 1
#define PFINT_OICR_QUEUE_M BIT(1)
#define PFINT_OICR_RSV1_S 2
-#define PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2)
-#define PFINT_OICR_HH_COMP_S 10
-#define PFINT_OICR_HH_COMP_M BIT(10)
+#define PFINT_OICR_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFINT_OICR_RSV1_M : E800_PFINT_OICR_RSV1_M)
+#define E800_PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PFINT_OICR_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PFINT_OICR_HH_COMP_S 10
+#define E800_PFINT_OICR_HH_COMP_M BIT(10)
#define PFINT_OICR_TSYN_TX_S 11
#define PFINT_OICR_TSYN_TX_M BIT(11)
#define PFINT_OICR_TSYN_EVNT_S 12
@@ -4963,7 +5024,9 @@
#define PFINT_SB_CTL_INTEVENT_M BIT(31)
#define PFINT_TSYN_MSK 0x0016C980 /* Reset Source: CORER */
#define PFINT_TSYN_MSK_PHY_INDX_S 0
-#define PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define PFINT_TSYN_MSK_PHY_INDX_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFINT_TSYN_MSK_PHY_INDX_M : E800_PFINT_TSYN_MSK_PHY_INDX_M)
+#define E800_PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define E830_PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0xFF, 0)
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define QINT_RQCTL_MAX_INDEX 2047
#define QINT_RQCTL_MSIX_INDX_S 0
@@ -5230,76 +5293,96 @@
#define VSILAN_QTABLE_QINDEX_0_M MAKEMASK(0x7FF, 0)
#define VSILAN_QTABLE_QINDEX_1_S 16
#define VSILAN_QTABLE_QINDEX_1_M MAKEMASK(0x7FF, 16)
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0
-#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
-#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0
-#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
-#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0
-#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_LINK_DOWN_COUNTER_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_LINK_DOWN_COUNTER : E800_PRTMAC_LINK_DOWN_COUNTER)
+#define E800_PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_LINK_DOWN_COUNTER 0x001E2460 /* Reset Source: GLOBR */
#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_S 0
#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
-#define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7
+#define PRTMAC_MD_OVRRIDE_ENABLE_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_ENABLE(_i) : E800_PRTMAC_MD_OVRRIDE_ENABLE(_i))
+#define E800_PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define E830_PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E2500 + ((_i) * 32)) /* _i=0...1 */ /* Reset Source: GLOBR */
+#define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX : E800_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX)
+#define E800_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7
+#define E830_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 1
#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_S 0
#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
-#define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7
+#define PRTMAC_MD_OVRRIDE_VAL_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_VAL(_i) : E800_PRTMAC_MD_OVRRIDE_VAL(_i))
+#define E800_PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define E830_PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E2600 + ((_i) * 32)) /* _i=0...1 */ /* Reset Source: GLOBR */
+#define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX : E800_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX)
+#define E800_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7
+#define E830_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 1
#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_S 0
#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0)
#define PRTMAC_RX_CNT_MRKR 0x001E48E0 /* Reset Source: GLOBR */
#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_S 0
#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */
+#define PRTMAC_RX_PKT_DRP_CNT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT : E800_PRTMAC_RX_PKT_DRP_CNT)
+#define E800_PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */
+#define E830_PRTMAC_RX_PKT_DRP_CNT 0x001E2420 /* Reset Source: GLOBR */
#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_S 0
-#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16
-#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16)
+#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M : E800_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M)
+#define E800_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFF, 0)
+#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S : E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S)
+#define E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 28
+#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M : E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M)
+#define E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xF, 28)
#define PRTMAC_TX_CNT_MRKR 0x001E48C0 /* Reset Source: GLOBR */
#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_S 0
#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */
+#define PRTMAC_TX_LNK_UP_CNT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_TX_LNK_UP_CNT : E800_PRTMAC_TX_LNK_UP_CNT)
+#define E800_PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TX_LNK_UP_CNT 0x001E2480 /* Reset Source: GLOBR */
#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_S 0
#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_M MAKEMASK(0xFFFF, 0)
#define GL_MDCK_CFG1_TX_PQM 0x002D2DF4 /* Reset Source: CORER */
@@ -5360,8 +5443,8 @@
#define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_M BIT(24)
#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_S 25
#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_M BIT(25)
-#define GL_MDCK_EN_TX_PQM_RSVD_S 26
-#define GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26)
+#define E800_GL_MDCK_EN_TX_PQM_RSVD_S 26
+#define E800_GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26)
#define GL_MDCK_RX 0x0029422C /* Reset Source: CORER */
#define GL_MDCK_RX_DESC_ADDR_S 0
#define GL_MDCK_RX_DESC_ADDR_M BIT(0)
@@ -5470,17 +5553,24 @@
#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
#define GL_FWRESETCNT_FWRESETCNT_S 0
#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */
+#define GL_MNG_FW_RAM_STAT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FW_RAM_STAT : E800_GL_MNG_FW_RAM_STAT)
+#define E800_GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */
+#define E830_GL_MNG_FW_RAM_STAT 0x000830F4 /* Reset Source: POR */
#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_S 0
#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_M BIT(0)
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1)
#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
-#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GL_MNG_FWSM_FW_MODES_S 0
-#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
-#define GL_MNG_FWSM_RSV0_S 3
-#define GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3)
+#define GL_MNG_FWSM_FW_MODES_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_FW_MODES_M : E800_GL_MNG_FWSM_FW_MODES_M)
+#define E800_GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define E830_GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x3, 0)
+#define GL_MNG_FWSM_RSV0_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_RSV0_S : E800_GL_MNG_FWSM_RSV0_S)
+#define E800_GL_MNG_FWSM_RSV0_S 3
+#define E830_GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_RSV0_M : E800_GL_MNG_FWSM_RSV0_M)
+#define E800_GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3)
+#define E830_GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
#define GL_MNG_FWSM_RSV1_S 11
@@ -5504,12 +5594,20 @@
#define GL_MNG_HWARB_CTRL 0x000B6130 /* Reset Source: POR */
#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_S 0
#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_M BIT(0)
-#define GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
-#define GL_MNG_SHA_EXTEND_MAX_INDEX 7
+#define GL_MNG_SHA_EXTEND_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND(_i) : E800_GL_MNG_SHA_EXTEND(_i))
+#define E800_GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_SHA_EXTEND(_i) (0x00083340 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_MAX_INDEX : E800_GL_MNG_SHA_EXTEND_MAX_INDEX)
+#define E800_GL_MNG_SHA_EXTEND_MAX_INDEX 7
+#define E830_GL_MNG_SHA_EXTEND_MAX_INDEX 11
#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_S 0
#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
-#define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7
+#define GL_MNG_SHA_EXTEND_ROM_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_ROM(_i) : E800_GL_MNG_SHA_EXTEND_ROM(_i))
+#define E800_GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_SHA_EXTEND_ROM(_i) (0x000832C0 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX : E800_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX)
+#define E800_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7
+#define E830_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 11
#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_S 0
#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_M MAKEMASK(0xFFFFFFFF, 0)
#define GL_MNG_SHA_EXTEND_STATUS 0x00083148 /* Reset Source: EMPR */
@@ -5908,8 +6006,8 @@
#define GLPCI_CAPSUP 0x0009DE8C /* Reset Source: PCIR */
#define GLPCI_CAPSUP_PCIE_VER_S 0
#define GLPCI_CAPSUP_PCIE_VER_M BIT(0)
-#define GLPCI_CAPSUP_RESERVED_2_S 1
-#define GLPCI_CAPSUP_RESERVED_2_M BIT(1)
+#define E800_GLPCI_CAPSUP_RESERVED_2_S 1
+#define E800_GLPCI_CAPSUP_RESERVED_2_M BIT(1)
#define GLPCI_CAPSUP_LTR_EN_S 2
#define GLPCI_CAPSUP_LTR_EN_M BIT(2)
#define GLPCI_CAPSUP_TPH_EN_S 3
@@ -6359,9 +6457,9 @@
#define PFPE_MRTEIDXMASK 0x0050A300 /* Reset Source: PFR */
#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0
#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
-#define PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */
-#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
-#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define E800_PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */
+#define E800_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
+#define E800_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
#define PFPE_TCPNOWTIMER 0x0050A280 /* Reset Source: PFR */
#define PFPE_TCPNOWTIMER_TCP_NOW_S 0
#define PFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
@@ -6430,10 +6528,10 @@
#define VFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16)
#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17
#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17)
-#define VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
-#define VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255
-#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
-#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define E800_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define E800_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255
+#define E800_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
+#define E800_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
#define VFPE_TCPNOWTIMER(_VF) (0x00509400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
#define VFPE_TCPNOWTIMER_MAX_INDEX 255
#define VFPE_TCPNOWTIMER_TCP_NOW_S 0
@@ -7137,15 +7235,21 @@
#define GLRPB_DHW(_i) (0x000AC000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GLRPB_DHW_MAX_INDEX 15
#define GLRPB_DHW_DHW_TCN_S 0
-#define GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DHW_DHW_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DHW_DHW_TCN_M : E800_GLRPB_DHW_DHW_TCN_M)
+#define E800_GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_DHW_DHW_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_DLW(_i) (0x000AC044 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GLRPB_DLW_MAX_INDEX 15
#define GLRPB_DLW_DLW_TCN_S 0
-#define GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DLW_DLW_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DLW_DLW_TCN_M : E800_GLRPB_DLW_DLW_TCN_M)
+#define E800_GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_DLW_DLW_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_DPS(_i) (0x000AC084 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GLRPB_DPS_MAX_INDEX 15
#define GLRPB_DPS_DPS_TCN_S 0
-#define GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DPS_DPS_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DPS_DPS_TCN_M : E800_GLRPB_DPS_DPS_TCN_M)
+#define E800_GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_DPS_DPS_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_DSI_EN 0x000AC324 /* Reset Source: CORER */
#define GLRPB_DSI_EN_DSI_EN_S 0
#define GLRPB_DSI_EN_DSI_EN_M BIT(0)
@@ -7154,15 +7258,21 @@
#define GLRPB_SHW(_i) (0x000AC120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLRPB_SHW_MAX_INDEX 7
#define GLRPB_SHW_SHW_S 0
-#define GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SHW_SHW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SHW_SHW_M : E800_GLRPB_SHW_SHW_M)
+#define E800_GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_SHW_SHW_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_SLW(_i) (0x000AC140 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLRPB_SLW_MAX_INDEX 7
#define GLRPB_SLW_SLW_S 0
-#define GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SLW_SLW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SLW_SLW_M : E800_GLRPB_SLW_SLW_M)
+#define E800_GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_SLW_SLW_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_SPS(_i) (0x000AC0C4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLRPB_SPS_MAX_INDEX 7
#define GLRPB_SPS_SPS_TCN_S 0
-#define GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SPS_SPS_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SPS_SPS_TCN_M : E800_GLRPB_SPS_SPS_TCN_M)
+#define E800_GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_SPS_SPS_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_TC_CFG(_i) (0x000AC2A4 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLRPB_TC_CFG_MAX_INDEX 31
#define GLRPB_TC_CFG_D_POOL_S 0
@@ -7172,11 +7282,15 @@
#define GLRPB_TCHW(_i) (0x000AC330 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLRPB_TCHW_MAX_INDEX 31
#define GLRPB_TCHW_TCHW_S 0
-#define GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_TCHW_TCHW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_TCHW_TCHW_M : E800_GLRPB_TCHW_TCHW_M)
+#define E800_GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_TCHW_TCHW_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_TCLW(_i) (0x000AC3B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLRPB_TCLW_MAX_INDEX 31
#define GLRPB_TCLW_TCLW_S 0
-#define GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_TCLW_TCLW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_TCLW_TCLW_M : E800_GLRPB_TCLW_TCLW_M)
+#define E800_GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_TCLW_TCLW_M MAKEMASK(0x3FFFFF, 0)
#define GLQF_APBVT(_i) (0x00450000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define GLQF_APBVT_MAX_INDEX 2047
#define GLQF_APBVT_APBVT_S 0
@@ -7189,9 +7303,13 @@
#define GLQF_FD_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0)
#define GLQF_FD_CNT 0x00460018 /* Reset Source: CORER */
#define GLQF_FD_CNT_FD_GCNT_S 0
-#define GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_CNT_FD_GCNT_M : E800_GLQF_FD_CNT_FD_GCNT_M)
+#define E800_GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define GLQF_FD_CNT_FD_BCNT_S 16
-#define GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define GLQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_CNT_FD_BCNT_M : E800_GLQF_FD_CNT_FD_BCNT_M)
+#define E800_GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define E830_GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define GLQF_FD_CTL 0x00460000 /* Reset Source: CORER */
#define GLQF_FD_CTL_FDLONG_S 0
#define GLQF_FD_CTL_FDLONG_M MAKEMASK(0xF, 0)
@@ -7201,12 +7319,18 @@
#define GLQF_FD_CTL_FLT_ADDR_REPORT_M BIT(5)
#define GLQF_FD_SIZE 0x00460010 /* Reset Source: CORER */
#define GLQF_FD_SIZE_FD_GSIZE_S 0
-#define GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_SIZE_FD_GSIZE_M : E800_GLQF_FD_SIZE_FD_GSIZE_M)
+#define E800_GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0)
#define GLQF_FD_SIZE_FD_BSIZE_S 16
-#define GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_SIZE_FD_BSIZE_M : E800_GLQF_FD_SIZE_FD_BSIZE_M)
+#define E800_GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define E830_GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16)
#define GLQF_FDCNT_0 0x00460020 /* Reset Source: CORER */
#define GLQF_FDCNT_0_BUCKETCNT_S 0
-#define GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FDCNT_0_BUCKETCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FDCNT_0_BUCKETCNT_M : E800_GLQF_FDCNT_0_BUCKETCNT_M)
+#define E800_GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0xFFFF, 0)
#define GLQF_FDCNT_0_CNT_NOT_VLD_S 31
#define GLQF_FDCNT_0_CNT_NOT_VLD_M BIT(31)
#define GLQF_FDEVICTENA(_i) (0x00452000 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
@@ -7430,22 +7554,34 @@
#define GLQF_PROF2TC_REGION_7_M MAKEMASK(0x7, 29)
#define PFQF_FD_CNT 0x00460180 /* Reset Source: CORER */
#define PFQF_FD_CNT_FD_GCNT_S 0
-#define PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_CNT_FD_GCNT_M : E800_PFQF_FD_CNT_FD_GCNT_M)
+#define E800_PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define PFQF_FD_CNT_FD_BCNT_S 16
-#define PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_CNT_FD_BCNT_M : E800_PFQF_FD_CNT_FD_BCNT_M)
+#define E800_PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define E830_PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define PFQF_FD_ENA 0x0043A000 /* Reset Source: CORER */
#define PFQF_FD_ENA_FD_ENA_S 0
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100 /* Reset Source: CORER */
#define PFQF_FD_SIZE_FD_GSIZE_S 0
-#define PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SIZE_FD_GSIZE_M : E800_PFQF_FD_SIZE_FD_GSIZE_M)
+#define E800_PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0)
#define PFQF_FD_SIZE_FD_BSIZE_S 16
-#define PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SIZE_FD_BSIZE_M : E800_PFQF_FD_SIZE_FD_BSIZE_M)
+#define E800_PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define E830_PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16)
#define PFQF_FD_SUBTRACT 0x00460200 /* Reset Source: CORER */
#define PFQF_FD_SUBTRACT_FD_GCNT_S 0
-#define PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_SUBTRACT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SUBTRACT_FD_GCNT_M : E800_PFQF_FD_SUBTRACT_FD_GCNT_M)
+#define E800_PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define PFQF_FD_SUBTRACT_FD_BCNT_S 16
-#define PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_SUBTRACT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SUBTRACT_FD_BCNT_M : E800_PFQF_FD_SUBTRACT_FD_BCNT_M)
+#define E800_PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define E830_PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define PFQF_HLUT(_i) (0x00430000 + ((_i) * 64)) /* _i=0...511 */ /* Reset Source: CORER */
#define PFQF_HLUT_MAX_INDEX 511
#define PFQF_HLUT_LUT0_S 0
@@ -7673,20 +7809,20 @@
#define GLPRT_AORCL_AORCL_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPRCH_MAX_INDEX 7
-#define GLPRT_BPRCH_UPRCH_S 0
-#define GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define E800_GLPRT_BPRCH_UPRCH_S 0
+#define E800_GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPRCL_MAX_INDEX 7
-#define GLPRT_BPRCL_UPRCH_S 0
-#define GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GLPRT_BPRCL_UPRCH_S 0
+#define E800_GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPTCH_MAX_INDEX 7
-#define GLPRT_BPTCH_UPRCH_S 0
-#define GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define E800_GLPRT_BPTCH_UPRCH_S 0
+#define E800_GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0)
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPTCL_MAX_INDEX 7
-#define GLPRT_BPTCL_UPRCH_S 0
-#define GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GLPRT_BPTCL_UPRCH_S 0
+#define E800_GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_CRCERRS_MAX_INDEX 7
#define GLPRT_CRCERRS_CRCERRS_S 0
@@ -8001,8 +8137,8 @@
#define GLPRT_UPTCH_UPTCH_M MAKEMASK(0xFF, 0)
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_UPTCL_MAX_INDEX 7
-#define GLPRT_UPTCL_VUPTCH_S 0
-#define GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GLPRT_UPTCL_VUPTCH_S 0
+#define E800_GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLSTAT_ACL_CNT_0_H(_i) (0x00388004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
#define GLSTAT_ACL_CNT_0_H_MAX_INDEX 511
#define GLSTAT_ACL_CNT_0_H_CNT_MSB_S 0
@@ -8897,9 +9033,13 @@
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
#define VSIQF_FD_CNT_MAX_INDEX 767
#define VSIQF_FD_CNT_FD_GCNT_S 0
-#define VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0)
+#define VSIQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_CNT_FD_GCNT_M : E800_VSIQF_FD_CNT_FD_GCNT_M)
+#define E800_VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0)
+#define E830_VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define VSIQF_FD_CNT_FD_BCNT_S 16
-#define VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16)
+#define VSIQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_CNT_FD_BCNT_M : E800_VSIQF_FD_CNT_FD_BCNT_M)
+#define E800_VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16)
+#define E830_VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define VSIQF_FD_CTL1(_VSI) (0x00411000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSIQF_FD_CTL1_MAX_INDEX 767
#define VSIQF_FD_CTL1_FLT_ENA_S 0
@@ -8923,9 +9063,13 @@
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSIQF_FD_SIZE_MAX_INDEX 767
#define VSIQF_FD_SIZE_FD_GSIZE_S 0
-#define VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0)
+#define VSIQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_SIZE_FD_GSIZE_M : E800_VSIQF_FD_SIZE_FD_GSIZE_M)
+#define E800_VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0)
+#define E830_VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0)
#define VSIQF_FD_SIZE_FD_BSIZE_S 16
-#define VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16)
+#define VSIQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_SIZE_FD_BSIZE_M : E800_VSIQF_FD_SIZE_FD_BSIZE_M)
+#define E800_VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16)
+#define E830_VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16)
#define VSIQF_HASH_CTL(_VSI) (0x0040D000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSIQF_HASH_CTL_MAX_INDEX 767
#define VSIQF_HASH_CTL_HASH_LUT_SEL_S 0
@@ -9049,7 +9193,9 @@
#define PFPM_WUS_FLX7_M BIT(23)
#define PFPM_WUS_FW_RST_WK_S 31
#define PFPM_WUS_FW_RST_WK_M BIT(31)
-#define PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define PRTPM_SAH_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTPM_SAH(_i) : E800_PRTPM_SAH(_i))
+#define E800_PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define E830_PRTPM_SAH(_i) (0x001E2380 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
#define PRTPM_SAH_MAX_INDEX 3
#define PRTPM_SAH_PFPM_SAH_S 0
#define PRTPM_SAH_PFPM_SAH_M MAKEMASK(0xFFFF, 0)
@@ -9059,7 +9205,9 @@
#define PRTPM_SAH_MC_MAG_EN_M BIT(30)
#define PRTPM_SAH_AV_S 31
#define PRTPM_SAH_AV_M BIT(31)
-#define PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define PRTPM_SAL_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTPM_SAL(_i) : E800_PRTPM_SAL(_i))
+#define E800_PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define E830_PRTPM_SAL(_i) (0x001E2300 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
#define PRTPM_SAL_MAX_INDEX 3
#define PRTPM_SAL_PFPM_SAL_S 0
#define PRTPM_SAL_PFPM_SAL_M MAKEMASK(0xFFFFFFFF, 0)
@@ -9072,7 +9220,9 @@
#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_M MAKEMASK(0x3, 13)
#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_S 31
#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_M BIT(31)
-#define VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */
+#define VFPE_MRTEIDXMASK_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFPE_MRTEIDXMASK : E800_VFPE_MRTEIDXMASK)
+#define E800_VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */
+#define E830_VFPE_MRTEIDXMASK(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0
#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
#define GLTSYN_HH_DLAY 0x0008881C /* Reset Source: CORER */
@@ -9175,8 +9325,12 @@
#define VFINT_ITR0_MAX_INDEX 2
#define VFINT_ITR0_INTERVAL_S 0
#define VFINT_ITR0_INTERVAL_M MAKEMASK(0xFFF, 0)
-#define VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */
-#define VFINT_ITRN_MAX_INDEX 2
+#define VFINT_ITRN_BY_MAC(hw, _i, _j) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFINT_ITRN(_i, _j) : E800_VFINT_ITRN(_i, _j))
+#define E800_VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */
+#define E830_VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 64)) /* _i=0...15, _j=0...2 */ /* Reset Source: CORER */
+#define VFINT_ITRN_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFINT_ITRN_MAX_INDEX : E800_VFINT_ITRN_MAX_INDEX)
+#define E800_VFINT_ITRN_MAX_INDEX 2
+#define E830_VFINT_ITRN_MAX_INDEX 15
#define VFINT_ITRN_INTERVAL_S 0
#define VFINT_ITRN_INTERVAL_M MAKEMASK(0xFFF, 0)
#define QRX_TAIL1(_QRX) (0x00002000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
@@ -9471,13 +9625,13 @@
#define VFPE_IPCONFIG01_USEENTIREIDRANGE_M BIT(16)
#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_S 17
#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_M BIT(17)
-#define VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
-#define VFPE_MRTEIDXMASK1_MAX_INDEX 255
-#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0
-#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
-#define VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */
-#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0
-#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define E800_VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define E800_VFPE_MRTEIDXMASK1_MAX_INDEX 255
+#define E800_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0
+#define E800_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
+#define E800_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */
+#define E800_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0
+#define E800_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
#define VFPE_TCPNOWTIMER1 0x0000A800 /* Reset Source: VFR */
#define VFPE_TCPNOWTIMER1_TCP_NOW_S 0
#define VFPE_TCPNOWTIMER1_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
@@ -9486,5 +9640,1646 @@
#define VFPE_WQEALLOC1_PEQPID_M MAKEMASK(0x3FFFF, 0)
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20)
+#define E830_GL_QRX_CONTEXT_CTL 0x00296640 /* Reset Source: CORER */
+#define E830_GL_QRX_CONTEXT_CTL_QUEUE_ID_S 0
+#define E830_GL_QRX_CONTEXT_CTL_QUEUE_ID_M MAKEMASK(0xFFF, 0)
+#define E830_GL_QRX_CONTEXT_CTL_CMD_S 16
+#define E830_GL_QRX_CONTEXT_CTL_CMD_M MAKEMASK(0x7, 16)
+#define E830_GL_QRX_CONTEXT_CTL_CMD_EXEC_S 19
+#define E830_GL_QRX_CONTEXT_CTL_CMD_EXEC_M BIT(19)
+#define E830_GL_QRX_CONTEXT_DATA(_i) (0x00296620 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_QRX_CONTEXT_DATA_MAX_INDEX 7
+#define E830_GL_QRX_CONTEXT_DATA_DATA_S 0
+#define E830_GL_QRX_CONTEXT_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_QRX_CONTEXT_STAT 0x00296644 /* Reset Source: CORER */
+#define E830_GL_QRX_CONTEXT_STAT_CMD_IN_PROG_S 0
+#define E830_GL_QRX_CONTEXT_STAT_CMD_IN_PROG_M BIT(0)
+#define E830_GL_RCB_INTERNAL(_i) (0x00122600 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_RCB_INTERNAL_MAX_INDEX 63
+#define E830_GL_RCB_INTERNAL_INTERNAL_S 0
+#define E830_GL_RCB_INTERNAL_INTERNAL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_RLAN_INTERNAL(_i) (0x00296700 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_RLAN_INTERNAL_MAX_INDEX 63
+#define E830_GL_RLAN_INTERNAL_INTERNAL_S 0
+#define E830_GL_RLAN_INTERNAL_INTERNAL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS 0x002D30F0 /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_DBLQ_S 0
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_DBLQ_M MAKEMASK(0xFF, 0)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_FDBL_S 8
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_FDBL_M MAKEMASK(0xFF, 8)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_TXT_S 16
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_TXT_M MAKEMASK(0xFF, 16)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS 0x002D30F4 /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_DBLQ_S 0
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_DBLQ_M MAKEMASK(0x3F, 0)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_FDBL_S 6
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_FDBL_M MAKEMASK(0x3F, 6)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_TXT_S 12
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_TXT_M MAKEMASK(0x3F, 12)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS 0x002D30F8 /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_DBLQ_FDBL_S 0
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_DBLQ_FDBL_M MAKEMASK(0xFF, 0)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_TXT_S 8
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_TXT_M MAKEMASK(0xFF, 8)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS 0x002D30FC /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_DBLQ_FDBL_S 0
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_DBLQ_FDBL_M MAKEMASK(0x3F, 0)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_TXT_S 6
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_TXT_M MAKEMASK(0x3F, 6)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_LSB_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_LSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_LSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_MSB(_DBQM) (0x002E0004 + ((_DBQM) * 8)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_MSB_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_MSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_MSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTCLAN_CQ_CNTX2_SRC_VSI_S 18
+#define E830_GLTCLAN_CQ_CNTX2_SRC_VSI_M MAKEMASK(0x3FF, 18)
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS 0x002D320C /* Reset Source: CORER */
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_DBL_S 0
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_DBL_M MAKEMASK(0xFF, 0)
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_COMP_S 8
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_COMP_M MAKEMASK(0xFF, 8)
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS 0x002D3210 /* Reset Source: CORER */
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_DBL_S 0
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_DBL_M MAKEMASK(0x3F, 0)
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_COMP_S 6
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_COMP_M MAKEMASK(0x3F, 6)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64)) /* _i=0...15, _j=0...15 */ /* Reset Source: CORER */
+#define E830_GLTXTIME_FETCH_PROFILE_MAX_INDEX 15
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_S 0
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M MAKEMASK(0x1FF, 0)
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_FIFO_TRESH_S 9
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_FIFO_TRESH_M MAKEMASK(0x7F, 9)
+#define E830_GLTXTIME_OUTST_REQ_CNTL 0x002D3214 /* Reset Source: CORER */
+#define E830_GLTXTIME_OUTST_REQ_CNTL_THRESHOLD_S 0
+#define E830_GLTXTIME_OUTST_REQ_CNTL_THRESHOLD_M MAKEMASK(0x3FF, 0)
+#define E830_GLTXTIME_OUTST_REQ_CNTL_SNAPSHOT_S 10
+#define E830_GLTXTIME_OUTST_REQ_CNTL_SNAPSHOT_M MAKEMASK(0x3FF, 10)
+#define E830_GLTXTIME_QTX_CNTX_CTL 0x002D3204 /* Reset Source: CORER */
+#define E830_GLTXTIME_QTX_CNTX_CTL_QUEUE_ID_S 0
+#define E830_GLTXTIME_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x7FF, 0)
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_S 16
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16)
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_EXEC_S 19
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define E830_GLTXTIME_QTX_CNTX_DATA(_i) (0x002D3104 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */
+#define E830_GLTXTIME_QTX_CNTX_DATA_MAX_INDEX 6
+#define E830_GLTXTIME_QTX_CNTX_DATA_DATA_S 0
+#define E830_GLTXTIME_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTXTIME_QTX_CNTX_STAT 0x002D3208 /* Reset Source: CORER */
+#define E830_GLTXTIME_QTX_CNTX_STAT_CMD_IN_PROG_S 0
+#define E830_GLTXTIME_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0)
+#define E830_GLTXTIME_TS_CFG 0x002D3100 /* Reset Source: CORER */
+#define E830_GLTXTIME_TS_CFG_TXTIME_ENABLE_S 0
+#define E830_GLTXTIME_TS_CFG_TXTIME_ENABLE_M BIT(0)
+#define E830_GLTXTIME_TS_CFG_STORAGE_MODE_S 2
+#define E830_GLTXTIME_TS_CFG_STORAGE_MODE_M MAKEMASK(0x7, 2)
+#define E830_GLTXTIME_TS_CFG_PIPE_LATENCY_STATIC_S 5
+#define E830_GLTXTIME_TS_CFG_PIPE_LATENCY_STATIC_M MAKEMASK(0x1FFF, 5)
+#define E830_MBX_PF_DEC_ERR 0x00234100 /* Reset Source: CORER */
+#define E830_MBX_PF_DEC_ERR_DEC_ERR_S 0
+#define E830_MBX_PF_DEC_ERR_DEC_ERR_M BIT(0)
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 /* Reset Source: CORER */
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH_TRESH_S 0
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH_TRESH_M MAKEMASK(0x3FF, 0)
+#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_MBX_VF_DEC_TRIG_MAX_INDEX 255
+#define E830_MBX_VF_DEC_TRIG_DEC_S 0
+#define E830_MBX_VF_DEC_TRIG_DEC_M MAKEMASK(0x3FF, 0)
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MAX_INDEX 255
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MSGS_S 0
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MSGS_M MAKEMASK(0x3FF, 0)
+#define E830_GLRCB_AG_ARBITER_CONFIG 0x00122500 /* Reset Source: CORER */
+#define E830_GLRCB_AG_ARBITER_CONFIG_CREDIT_MAX_S 0
+#define E830_GLRCB_AG_ARBITER_CONFIG_CREDIT_MAX_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG 0x00122518 /* Reset Source: CORER */
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_CREDIT_MAX_S 0
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_CREDIT_MAX_M MAKEMASK(0x7F, 0)
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_STRICT_WRR_S 7
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_STRICT_WRR_M BIT(7)
+#define E830_GLRCB_AG_DCB_NODE_CONFIG(_i) (0x00122510 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_DCB_NODE_CONFIG_MAX_INDEX 1
+#define E830_GLRCB_AG_DCB_NODE_CONFIG_BWSHARE_S 0
+#define E830_GLRCB_AG_DCB_NODE_CONFIG_BWSHARE_M MAKEMASK(0xF, 0)
+#define E830_GLRCB_AG_DCB_NODE_STATE(_i) (0x00122508 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_DCB_NODE_STATE_MAX_INDEX 1
+#define E830_GLRCB_AG_DCB_NODE_STATE_CREDITS_S 0
+#define E830_GLRCB_AG_DCB_NODE_STATE_CREDITS_M MAKEMASK(0xFF, 0)
+#define E830_GLRCB_AG_NODE_CONFIG(_i) (0x001224E0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_NODE_CONFIG_MAX_INDEX 7
+#define E830_GLRCB_AG_NODE_CONFIG_BWSHARE_S 0
+#define E830_GLRCB_AG_NODE_CONFIG_BWSHARE_M MAKEMASK(0x7F, 0)
+#define E830_GLRCB_AG_NODE_STATE(_i) (0x001224C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_NODE_STATE_MAX_INDEX 7
+#define E830_GLRCB_AG_NODE_STATE_CREDITS_S 0
+#define E830_GLRCB_AG_NODE_STATE_CREDITS_M MAKEMASK(0xFFFFF, 0)
+#define E830_PRT_AG_PORT_FC_MAP 0x00122520 /* Reset Source: CORER */
+#define E830_PRT_AG_PORT_FC_MAP_AG_BITMAP_S 0
+#define E830_PRT_AG_PORT_FC_MAP_AG_BITMAP_M MAKEMASK(0xFF, 0)
+#define E830_GL_FW_LOGS_CTL 0x000827F8 /* Reset Source: POR */
+#define E830_GL_FW_LOGS_CTL_PAGE_SELECT_S 0
+#define E830_GL_FW_LOGS_CTL_PAGE_SELECT_M MAKEMASK(0x3FF, 0)
+#define E830_GL_FW_LOGS_STS 0x000827FC /* Reset Source: POR */
+#define E830_GL_FW_LOGS_STS_MAX_PAGE_S 0
+#define E830_GL_FW_LOGS_STS_MAX_PAGE_M MAKEMASK(0x3FF, 0)
+#define E830_GL_FW_LOGS_STS_FW_LOGS_ENA_S 31
+#define E830_GL_FW_LOGS_STS_FW_LOGS_ENA_M BIT(31)
+#define E830_GL_RTCTL 0x000827F0 /* Reset Source: POR */
+#define E830_GL_RTCTL_RTCTL_S 0
+#define E830_GL_RTCTL_RTCTL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_RTCTM 0x000827F4 /* Reset Source: POR */
+#define E830_GL_RTCTM_RTCTM_S 0
+#define E830_GL_RTCTM_RTCTM_M MAKEMASK(0xFFFF, 0)
+#define E830_GLGEN_RTRIG_EMPR_WO_GLOBR_S 3
+#define E830_GLGEN_RTRIG_EMPR_WO_GLOBR_M BIT(3)
+#define E830_GLPE_TSCD_NUM_PQS 0x0051E2FC /* Reset Source: CORER */
+#define E830_GLPE_TSCD_NUM_PQS_NUM_PQS_S 0
+#define E830_GLPE_TSCD_NUM_PQS_NUM_PQS_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH2 0x0009972C /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT4_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT4_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT5_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT5_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GLTPB_100G_RPB_FC_THRESH3 0x00099730 /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT6_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT6_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT7_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT7_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PORT_TIMER_SEL(_i) (0x00088BE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_PORT_TIMER_SEL_MAX_INDEX 7
+#define E830_PORT_TIMER_SEL_TIMER_SEL_S 0
+#define E830_PORT_TIMER_SEL_TIMER_SEL_M BIT(0)
+#define E830_GL_RDPU_CNTRL_CHECKSUM_COMPLETE_INV_S 22
+#define E830_GL_RDPU_CNTRL_CHECKSUM_COMPLETE_INV_M BIT(22)
+#define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT 0x001E2280 /* Reset Source: GLOBR */
+#define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_S 0
+#define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) /* _i=0...63 */ /* Reset Source: GLOBR */
+#define E830_PRTTSYN_TXTIME_H_MAX_INDEX 63
+#define E830_PRTTSYN_TXTIME_H_TX_TIMESTAMP_HIGH_S 0
+#define E830_PRTTSYN_TXTIME_H_TX_TIMESTAMP_HIGH_M MAKEMASK(0xFF, 0)
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) /* _i=0...63 */ /* Reset Source: GLOBR */
+#define E830_PRTTSYN_TXTIME_L_MAX_INDEX 63
+#define E830_PRTTSYN_TXTIME_L_TX_VALID_S 0
+#define E830_PRTTSYN_TXTIME_L_TX_VALID_M BIT(0)
+#define E830_PRTTSYN_TXTIME_L_TX_TIMESTAMP_LOW_S 1
+#define E830_PRTTSYN_TXTIME_L_TX_TIMESTAMP_LOW_M MAKEMASK(0x7FFFFFFF, 1)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN 0x000FD200 /* Reset Source: CORER */
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_FROM_Q_NOT_ALLOWED_S 0
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_FROM_Q_NOT_ALLOWED_M BIT(0)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_RANGE_VIOLATION_S 1
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_RANGE_VIOLATION_M BIT(1)
+#define E830_GL_MDET_RX_FIFO 0x00296840 /* Reset Source: CORER */
+#define E830_GL_MDET_RX_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_RX_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_RX_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_RX_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_RX_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_RX_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_RX_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_RX_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_RX_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_RX_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_RX_FIFO_VALID_S 21
+#define E830_GL_MDET_RX_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_RX_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_RX_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_RX_PF_CNT(_i) (0x00296800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_RX_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_RX_PF_CNT_CNT_S 0
+#define E830_GL_MDET_RX_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_RX_VF(_i) (0x00296820 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_RX_VF_MAX_INDEX 7
+#define E830_GL_MDET_RX_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_RX_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_PQM_FIFO 0x002D4B00 /* Reset Source: CORER */
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_TX_PQM_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_TX_PQM_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_TX_PQM_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_TX_PQM_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_TX_PQM_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_TX_PQM_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_TX_PQM_FIFO_VALID_S 21
+#define E830_GL_MDET_TX_PQM_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_TX_PQM_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_TX_PQM_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_TX_PQM_PF_CNT(_i) (0x002D4AC0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_PQM_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_TX_PQM_PF_CNT_CNT_S 0
+#define E830_GL_MDET_TX_PQM_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_PQM_VF(_i) (0x002D4AE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_PQM_VF_MAX_INDEX 7
+#define E830_GL_MDET_TX_PQM_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_TX_PQM_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TCLAN_FIFO 0x000FCFD0 /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_TX_TCLAN_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_TX_TCLAN_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_TX_TCLAN_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_TX_TCLAN_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_TX_TCLAN_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_TX_TCLAN_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_TX_TCLAN_FIFO_VALID_S 21
+#define E830_GL_MDET_TX_TCLAN_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_TX_TCLAN_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_TX_TCLAN_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_TX_TCLAN_PF_CNT(_i) (0x000FCF90 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TCLAN_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_TX_TCLAN_PF_CNT_CNT_S 0
+#define E830_GL_MDET_TX_TCLAN_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TCLAN_VF(_i) (0x000FCFB0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TCLAN_VF_MAX_INDEX 7
+#define E830_GL_MDET_TX_TCLAN_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_TX_TCLAN_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TDPU_FIFO 0x00049D80 /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_TX_TDPU_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_TX_TDPU_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_TX_TDPU_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_TX_TDPU_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_TX_TDPU_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_TX_TDPU_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_TX_TDPU_FIFO_VALID_S 21
+#define E830_GL_MDET_TX_TDPU_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_TX_TDPU_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_TX_TDPU_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_TX_TDPU_PF_CNT(_i) (0x00049D40 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TDPU_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_TX_TDPU_PF_CNT_CNT_S 0
+#define E830_GL_MDET_TX_TDPU_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TDPU_VF(_i) (0x00049D60 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TDPU_VF_MAX_INDEX 7
+#define E830_GL_MDET_TX_TDPU_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_TX_TDPU_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH(_i) (0x00083400 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH_MAX_INDEX 11
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH_GL_MNG_ECDSA_PUBKEY_S 0
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH_GL_MNG_ECDSA_PUBKEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW(_i) (0x00083300 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW_MAX_INDEX 11
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW_GL_MNG_ECDSA_PUBKEY_S 0
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW_GL_MNG_ECDSA_PUBKEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_PPRS_RX_SIZE_CTRL_0(_i) (0x00084900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_PPRS_RX_SIZE_CTRL_1(_i) (0x00085900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_PPRS_RX_SIZE_CTRL_2(_i) (0x00086900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_PPRS_RX_SIZE_CTRL_3(_i) (0x00087900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP 0x00200740 /* Reset Source: CORER */
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_S 0
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_S 8
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_S 16
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_S 24
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP 0x00200744 /* Reset Source: CORER */
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_S 0
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_S 8
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_S 16
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_S 24
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_RPRS_PROT_ID_MAP(_i) (0x00200800 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GL_RPRS_PROT_ID_MAP_MAX_INDEX 255
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID0_S 0
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID0_M MAKEMASK(0xFF, 0)
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID1_S 8
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID1_M MAKEMASK(0xFF, 8)
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID2_S 16
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID2_M MAKEMASK(0xFF, 16)
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID3_S 24
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID3_M MAKEMASK(0xFF, 24)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL(_i) (0x00201000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_MAX_INDEX 63
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_S 0
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_M MAKEMASK(0x3, 0)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_S 2
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_M MAKEMASK(0x3, 2)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_S 4
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_M MAKEMASK(0x3, 4)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_S 6
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_M MAKEMASK(0x3, 6)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_S 8
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_M MAKEMASK(0x3, 8)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_S 10
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_M MAKEMASK(0x3, 10)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_S 12
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_M MAKEMASK(0x3, 12)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_S 14
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_M MAKEMASK(0x3, 14)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_S 16
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_M MAKEMASK(0x3, 16)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_S 18
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_M MAKEMASK(0x3, 18)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_S 20
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_M MAKEMASK(0x3, 20)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_S 22
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_M MAKEMASK(0x3, 22)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_S 24
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_M MAKEMASK(0x3, 24)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_S 26
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_M MAKEMASK(0x3, 26)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_S 28
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_M MAKEMASK(0x3, 28)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_S 30
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_M MAKEMASK(0x3, 30)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL 0x00200748 /* Reset Source: CORER */
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_S 0
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_M BIT(0)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_S 1
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_M BIT(1)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_S 2
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_M BIT(2)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_S 3
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_M BIT(3)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_S 4
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_M BIT(4)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_S 5
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_M BIT(5)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP 0x00203A04 /* Reset Source: CORER */
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_S 0
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_S 8
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_S 16
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_S 24
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP 0x00203A08 /* Reset Source: CORER */
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_S 0
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_S 8
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_S 16
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_S 24
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_TPRS_PROT_ID_MAP(_i) (0x00202200 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GL_TPRS_PROT_ID_MAP_MAX_INDEX 255
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID0_S 0
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID0_M MAKEMASK(0xFF, 0)
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID1_S 8
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID1_M MAKEMASK(0xFF, 8)
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID2_S 16
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID2_M MAKEMASK(0xFF, 16)
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID3_S 24
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID3_M MAKEMASK(0xFF, 24)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL(_i) (0x00202A00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_MAX_INDEX 63
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_S 0
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_M MAKEMASK(0x3, 0)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_S 2
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_M MAKEMASK(0x3, 2)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_S 4
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_M MAKEMASK(0x3, 4)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_S 6
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_M MAKEMASK(0x3, 6)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_S 8
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_M MAKEMASK(0x3, 8)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_S 10
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_M MAKEMASK(0x3, 10)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_S 12
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_M MAKEMASK(0x3, 12)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_S 14
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_M MAKEMASK(0x3, 14)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_S 16
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_M MAKEMASK(0x3, 16)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_S 18
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_M MAKEMASK(0x3, 18)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_S 20
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_M MAKEMASK(0x3, 20)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_S 22
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_M MAKEMASK(0x3, 22)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_S 24
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_M MAKEMASK(0x3, 24)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_S 26
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_M MAKEMASK(0x3, 26)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_S 28
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_M MAKEMASK(0x3, 28)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_S 30
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_M MAKEMASK(0x3, 30)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL 0x00203A00 /* Reset Source: CORER */
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_S 0
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_M BIT(0)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_S 1
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_M BIT(1)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_S 2
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_M BIT(2)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_S 3
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_M BIT(3)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_S 4
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_M BIT(4)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_S 5
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_M BIT(5)
+#define E830_PRT_TDPU_TX_SIZE_CTRL 0x00049D20 /* Reset Source: CORER */
+#define E830_PRT_TDPU_TX_SIZE_CTRL_MAX_HEADER_SIZE_S 16
+#define E830_PRT_TDPU_TX_SIZE_CTRL_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_PRT_TPB_RX_LB_SIZE_CTRL 0x00099740 /* Reset Source: CORER */
+#define E830_PRT_TPB_RX_LB_SIZE_CTRL_MAX_HEADER_SIZE_S 16
+#define E830_PRT_TPB_RX_LB_SIZE_CTRL_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE(_DBQM) (0x04000008 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE(_DBQM) (0x0400000C + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PF0INT_OICR_PSM_PAGE_PTM_COMP_S 8
+#define E830_PF0INT_OICR_PSM_PAGE_PTM_COMP_M BIT(8)
+#define E830_PF0INT_OICR_PSM_PAGE_PQM_DBL_TO_S 9
+#define E830_PF0INT_OICR_PSM_PAGE_PQM_DBL_TO_M BIT(9)
+#define E830_PF0INT_OICR_PSM_PAGE_RSV5_S 10
+#define E830_PF0INT_OICR_PSM_PAGE_RSV5_M BIT(10)
+#define E830_GL_HIBA(_i) (0x00081000 + ((_i) * 4)) /* _i=0...1023 */ /* Reset Source: EMPR */
+#define E830_GL_HIBA_MAX_INDEX 1023
+#define E830_GL_HIBA_GL_HIBA_S 0
+#define E830_GL_HIBA_GL_HIBA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_HICR 0x00082040 /* Reset Source: EMPR */
+#define E830_GL_HICR_C_S 1
+#define E830_GL_HICR_C_M BIT(1)
+#define E830_GL_HICR_SV_S 2
+#define E830_GL_HICR_SV_M BIT(2)
+#define E830_GL_HICR_EV_S 3
+#define E830_GL_HICR_EV_M BIT(3)
+#define E830_GL_HICR_EN 0x00082044 /* Reset Source: EMPR */
+#define E830_GL_HICR_EN_EN_S 0
+#define E830_GL_HICR_EN_EN_M BIT(0)
+#define E830_GL_HIDA(_i) (0x00082000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: EMPR */
+#define E830_GL_HIDA_MAX_INDEX 15
+#define E830_GL_HIDA_GL_HIDB_S 0
+#define E830_GL_HIDA_GL_HIDB_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_0_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_0_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_1_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_1_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_2_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_2_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_3_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_3_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_4_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_4_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_5_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_5_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_6(_i) (0x0045CE00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GLFLXP_RXDID_FLX_WRD_6_MAX_INDEX 63
+#define E830_GLFLXP_RXDID_FLX_WRD_6_PROT_MDID_S 0
+#define E830_GLFLXP_RXDID_FLX_WRD_6_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_EXTRACTION_OFFSET_S 8
+#define E830_GLFLXP_RXDID_FLX_WRD_6_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_L2TAG_OVRD_EN_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_6_L2TAG_OVRD_EN_M BIT(18)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_SPARE_S 19
+#define E830_GLFLXP_RXDID_FLX_WRD_6_SPARE_M MAKEMASK(0x7, 19)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_RXDID_OPCODE_S 30
+#define E830_GLFLXP_RXDID_FLX_WRD_6_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define E830_GLFLXP_RXDID_FLX_WRD_7(_i) (0x0045CF00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GLFLXP_RXDID_FLX_WRD_7_MAX_INDEX 63
+#define E830_GLFLXP_RXDID_FLX_WRD_7_PROT_MDID_S 0
+#define E830_GLFLXP_RXDID_FLX_WRD_7_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_EXTRACTION_OFFSET_S 8
+#define E830_GLFLXP_RXDID_FLX_WRD_7_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_L2TAG_OVRD_EN_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_7_L2TAG_OVRD_EN_M BIT(18)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_SPARE_S 19
+#define E830_GLFLXP_RXDID_FLX_WRD_7_SPARE_M MAKEMASK(0x7, 19)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_RXDID_OPCODE_S 30
+#define E830_GLFLXP_RXDID_FLX_WRD_7_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define E830_GLFLXP_RXDID_FLX_WRD_8(_i) (0x0045D500 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GLFLXP_RXDID_FLX_WRD_8_MAX_INDEX 63
+#define E830_GLFLXP_RXDID_FLX_WRD_8_PROT_MDID_S 0
+#define E830_GLFLXP_RXDID_FLX_WRD_8_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_EXTRACTION_OFFSET_S 8
+#define E830_GLFLXP_RXDID_FLX_WRD_8_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_L2TAG_OVRD_EN_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_8_L2TAG_OVRD_EN_M BIT(18)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_SPARE_S 19
+#define E830_GLFLXP_RXDID_FLX_WRD_8_SPARE_M MAKEMASK(0x7, 19)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_RXDID_OPCODE_S 30
+#define E830_GLFLXP_RXDID_FLX_WRD_8_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define E830_GL_FW_LOGS(_i) (0x00082800 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: POR */
+#define E830_GL_FW_LOGS_MAX_INDEX 255
+#define E830_GL_FW_LOGS_GL_FW_LOGS_S 0
+#define E830_GL_FW_LOGS_GL_FW_LOGS_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_FWSTS_FWABS_S 10
+#define E830_GL_FWSTS_FWABS_M MAKEMASK(0x3, 10)
+#define E830_GL_FWSTS_FW_FAILOVER_TRIG_S 12
+#define E830_GL_FWSTS_FW_FAILOVER_TRIG_M BIT(12)
+#define E830_GLGEN_RSTAT_EMPR_WO_GLOBR_CNT_S 19
+#define E830_GLGEN_RSTAT_EMPR_WO_GLOBR_CNT_M MAKEMASK(0x3, 19)
+#define E830_GLGEN_RSTAT_EMPR_TYPE_S 21
+#define E830_GLGEN_RSTAT_EMPR_TYPE_M BIT(21)
+#define E830_GLPCI_PLATFORM_INFO 0x0009DDC4 /* Reset Source: POR */
+#define E830_GLPCI_PLATFORM_INFO_PLATFORM_TYPE_S 0
+#define E830_GLPCI_PLATFORM_INFO_PLATFORM_TYPE_M MAKEMASK(0xFF, 0)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_FROM_Q_NOT_ALLOWED_S 21
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_FROM_Q_NOT_ALLOWED_M BIT(21)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_RANGE_VIOLATION_S 22
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_RANGE_VIOLATION_M BIT(22)
+#define E830_GL_MDCK_TDAT_TCLAN_DESC_TYPE_ACL_DTYPE_NOT_ALLOWED_S 23
+#define E830_GL_MDCK_TDAT_TCLAN_DESC_TYPE_ACL_DTYPE_NOT_ALLOWED_M BIT(23)
+#define E830_GL_TPB_LOCAL_TOPO 0x000996F4 /* Reset Source: CORER */
+#define E830_GL_TPB_LOCAL_TOPO_ALLOW_TOPO_OVERRIDE_S 0
+#define E830_GL_TPB_LOCAL_TOPO_ALLOW_TOPO_OVERRIDE_M BIT(0)
+#define E830_GL_TPB_LOCAL_TOPO_TOPO_VAL_S 1
+#define E830_GL_TPB_LOCAL_TOPO_TOPO_VAL_M MAKEMASK(0x3, 1)
+#define E830_GL_TPB_PM_RESET 0x000996F0 /* Reset Source: CORER */
+#define E830_GL_TPB_PM_RESET_MAC_PM_RESET_S 0
+#define E830_GL_TPB_PM_RESET_MAC_PM_RESET_M BIT(0)
+#define E830_GL_TPB_PM_RESET_RPB_PM_RESET_S 1
+#define E830_GL_TPB_PM_RESET_RPB_PM_RESET_M BIT(1)
+#define E830_GLTPB_100G_MAC_FC_THRESH1 0x00099724 /* Reset Source: CORER */
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT2_FC_THRESH_S 0
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT2_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT3_FC_THRESH_S 16
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT3_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GLTPB_100G_RPB_FC_THRESH0 0x0009963C /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT0_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT1_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GLTPB_100G_RPB_FC_THRESH1 0x00099728 /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT2_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT2_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT3_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT3_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GL_UFUSE_SOC_MAX_PORT_SPEED_S 12
+#define E830_GL_UFUSE_SOC_MAX_PORT_SPEED_M MAKEMASK(0xFFFF, 12)
+#define E830_PF0INT_OICR_PSM_PTM_COMP_S 8
+#define E830_PF0INT_OICR_PSM_PTM_COMP_M BIT(8)
+#define E830_PF0INT_OICR_PSM_PQM_DBL_TO_S 9
+#define E830_PF0INT_OICR_PSM_PQM_DBL_TO_M BIT(9)
+#define E830_PF0INT_OICR_PSM_RSV5_S 10
+#define E830_PF0INT_OICR_PSM_RSV5_M BIT(10)
+#define E830_PFINT_OICR_PTM_COMP_S 8
+#define E830_PFINT_OICR_PTM_COMP_M BIT(8)
+#define E830_PFINT_OICR_PQM_DBL_TO_S 9
+#define E830_PFINT_OICR_PQM_DBL_TO_M BIT(9)
+#define E830_PFINT_OICR_RSV5_S 10
+#define E830_PFINT_OICR_RSV5_M BIT(10)
+#define E830_QRX_CTRL_IDE_S 27
+#define E830_QRX_CTRL_IDE_M BIT(27)
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA 0x001E3854 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH 0x001E3864 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA 0x001E3858 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH 0x001E3868 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA 0x001E385C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH 0x001E386C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA 0x001E3860 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH 0x001E3870 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_COMMAND_CONFIG 0x001E3808 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ENA_S 0
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ENA_M BIT(0)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ENA_S 1
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ENA_M BIT(1)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PROMIS_EN_S 4
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PROMIS_EN_M BIT(4)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAD_EN_S 5
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAD_EN_M BIT(5)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CRC_FWD_S 6
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CRC_FWD_M BIT(6)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_FWD_S 7
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_FWD_M BIT(7)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_IGNORE_S 8
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_IGNORE_M BIT(8)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ADDR_INS_S 9
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ADDR_INS_M BIT(9)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_LOOPBACK_EN_S 10
+#define E830_PRTMAC_200G_COMMAND_CONFIG_LOOPBACK_EN_M BIT(10)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_PAD_EN_S 11
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_PAD_EN_M BIT(11)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SW_RESET_S 12
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SW_RESET_M BIT(12)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CNTL_FRM_ENA_S 13
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CNTL_FRM_ENA_M BIT(13)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ERR_DISC_S 14
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ERR_DISC_M BIT(14)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PHY_TXENA_S 15
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PHY_TXENA_M BIT(15)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SEND_IDLE_S 16
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SEND_IDLE_M BIT(16)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_NO_LGTH_CHECK_S 17
+#define E830_PRTMAC_200G_COMMAND_CONFIG_NO_LGTH_CHECK_M BIT(17)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PFC_MODE_S 19
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PFC_MODE_M BIT(19)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_PFC_COMP_S 20
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_PFC_COMP_M BIT(20)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_SFD_ANY_S 21
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_SFD_ANY_M BIT(21)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FLUSH_S 22
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FLUSH_M BIT(22)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_TX_STOP_S 25
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_TX_STOP_M BIT(25)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FIFO_RESET_S 26
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FIFO_RESET_M BIT(26)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_HDL_DIS_S 27
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_HDL_DIS_M BIT(27)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_INV_LOOP_S 31
+#define E830_PRTMAC_200G_COMMAND_CONFIG_INV_LOOP_M BIT(31)
+#define E830_PRTMAC_200G_CRC_INV_M 0x001E384C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CRC_INV_MASK_CRC_INV_MASK_S 0
+#define E830_PRTMAC_200G_CRC_INV_MASK_CRC_INV_MASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_FRM_LENGTH 0x001E3814 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_FRM_LENGTH_FRM_LENGTH_S 0
+#define E830_PRTMAC_200G_FRM_LENGTH_FRM_LENGTH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_FRM_LENGTH_TX_MTU_S 16
+#define E830_PRTMAC_200G_FRM_LENGTH_TX_MTU_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_HASHTABLE_LOAD 0x001E382C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_HASH_TABLE_ADDR_S 0
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_HASH_TABLE_ADDR_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_MCAST_EN_S 8
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_MCAST_EN_M BIT(8)
+#define E830_PRTMAC_200G_MAC_ADDR_0 0x001E380C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MAC_ADDR_0_MAC_ADDR_0_S 0
+#define E830_PRTMAC_200G_MAC_ADDR_0_MAC_ADDR_0_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_MAC_ADDR_1 0x001E3810 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MAC_ADDR_1_MAC_ADDR_1_S 0
+#define E830_PRTMAC_200G_MAC_ADDR_1_MAC_ADDR_1_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS 0x001E3830 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_BUSY_S 0
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_BUSY_M BIT(0)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_RD_ERR_S 1
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_RD_ERR_M BIT(1)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_HOLD_TIME_S 2
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_HOLD_TIME_M MAKEMASK(0x7, 2)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_S 5
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_M BIT(5)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLS_45_EN_S 6
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLS_45_EN_M BIT(6)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_S 7
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_M MAKEMASK(0x1FF, 7)
+#define E830_PRTMAC_200G_MDIO_COMMAND 0x001E3834 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_COMMAND_S 0
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_COMMAND_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_MDIO_COMMAND_RESERVED_2_S 16
+#define E830_PRTMAC_200G_MDIO_COMMAND_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_BUSY_S 31
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_200G_MDIO_DATA 0x001E3838 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_DATA_S 0
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_DATA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_MDIO_DATA_RESERVED_2_S 16
+#define E830_PRTMAC_200G_MDIO_DATA_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_BUSY_S 31
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_200G_MDIO_REGADDR 0x001E383C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_REGADDR_MDIO_REGADDR_S 0
+#define E830_PRTMAC_200G_MDIO_REGADDR_MDIO_REGADDR_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_REVISION 0x001E3800 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_REVISION_CORE_REVISION_S 0
+#define E830_PRTMAC_200G_REVISION_CORE_REVISION_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_200G_REVISION_CORE_VERSION_S 8
+#define E830_PRTMAC_200G_REVISION_CORE_VERSION_M MAKEMASK(0xFF, 8)
+#define E830_PRTMAC_200G_REVISION_CUSTOMER_VERSION_S 16
+#define E830_PRTMAC_200G_REVISION_CUSTOMER_VERSION_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_RX_PAUSE_STATUS 0x001E3874 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_RX_PAUSE_STATUS_RX_PAUSE_STATUS_S 0
+#define E830_PRTMAC_200G_RX_PAUSE_STATUS_RX_PAUSE_STATUS_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_200G_SCRATCH 0x001E3804 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_SCRATCH_SCRATCH_S 0
+#define E830_PRTMAC_200G_SCRATCH_SCRATCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_STATUS 0x001E3840 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_STATUS_RX_LOC_FAULT_S 0
+#define E830_PRTMAC_200G_STATUS_RX_LOC_FAULT_M BIT(0)
+#define E830_PRTMAC_200G_STATUS_RX_REM_FAULT_S 1
+#define E830_PRTMAC_200G_STATUS_RX_REM_FAULT_M BIT(1)
+#define E830_PRTMAC_200G_STATUS_PHY_LOS_S 2
+#define E830_PRTMAC_200G_STATUS_PHY_LOS_M BIT(2)
+#define E830_PRTMAC_200G_STATUS_TS_AVAIL_S 3
+#define E830_PRTMAC_200G_STATUS_TS_AVAIL_M BIT(3)
+#define E830_PRTMAC_200G_STATUS_RESERVED_5_S 4
+#define E830_PRTMAC_200G_STATUS_RESERVED_5_M BIT(4)
+#define E830_PRTMAC_200G_STATUS_TX_EMPTY_S 5
+#define E830_PRTMAC_200G_STATUS_TX_EMPTY_M BIT(5)
+#define E830_PRTMAC_200G_STATUS_RX_EMPTY_S 6
+#define E830_PRTMAC_200G_STATUS_RX_EMPTY_M BIT(6)
+#define E830_PRTMAC_200G_STATUS_RESERVED1_S 7
+#define E830_PRTMAC_200G_STATUS_RESERVED1_M BIT(7)
+#define E830_PRTMAC_200G_STATUS_TX_ISIDLE_S 8
+#define E830_PRTMAC_200G_STATUS_TX_ISIDLE_M BIT(8)
+#define E830_PRTMAC_200G_STATUS_RESERVED2_S 9
+#define E830_PRTMAC_200G_STATUS_RESERVED2_M MAKEMASK(0x7FFFFF, 9)
+#define E830_PRTMAC_200G_TS_TIMESTAMP 0x001E387C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_TS_TIMESTAMP_TS_TIMESTAMP_S 0
+#define E830_PRTMAC_200G_TS_TIMESTAMP_TS_TIMESTAMP_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS 0x001E3820 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_S 0
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_S 16
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_TX_IPG_LENGTH 0x001E3844 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_AVG_IPG_LEN_S 0
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_AVG_IPG_LEN_M MAKEMASK(0x7F, 0)
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_IPG_COMP_12_0_S 19
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_IPG_COMP_12_0_M MAKEMASK(0x1FFF, 19)
+#define E830_PRTMAC_200G_XIF_MODE 0x001E3880 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_XIF_MODE_RESERVED_1_S 0
+#define E830_PRTMAC_200G_XIF_MODE_RESERVED_1_M MAKEMASK(0x1F, 0)
+#define E830_PRTMAC_200G_XIF_MODE_ONE_STEP_ENA_S 5
+#define E830_PRTMAC_200G_XIF_MODE_ONE_STEP_ENA_M BIT(5)
+#define E830_PRTMAC_200G_XIF_MODE_PFC_PULSE_MODE_S 17
+#define E830_PRTMAC_200G_XIF_MODE_PFC_PULSE_MODE_M BIT(17)
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_MODE_S 18
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_MODE_M BIT(18)
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_16PRI_S 19
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_16PRI_M BIT(19)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0 0x001E3C00 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0_APPROVED_SW_ADDR_MAC_100G_0_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0_APPROVED_SW_ADDR_MAC_100G_0_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1 0x001E3C20 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1_APPROVED_SW_ADDR_MAC_100G_1_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1_APPROVED_SW_ADDR_MAC_100G_1_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2 0x001E3C40 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2_APPROVED_SW_ADDR_MAC_100G_2_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2_APPROVED_SW_ADDR_MAC_100G_2_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3 0x001E3C60 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3_APPROVED_SW_ADDR_MAC_100G_3_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3_APPROVED_SW_ADDR_MAC_100G_3_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0 0x001E3C80 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0_APPROVED_SW_ADDR_MAC_200G_0_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0_APPROVED_SW_ADDR_MAC_200G_0_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1 0x001E3CA0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1_APPROVED_SW_ADDR_MAC_200G_1_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1_APPROVED_SW_ADDR_MAC_200G_1_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2 0x001E3CC0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2_APPROVED_SW_ADDR_MAC_200G_2_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2_APPROVED_SW_ADDR_MAC_200G_2_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3 0x001E3CE0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3_APPROVED_SW_ADDR_MAC_200G_3_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3_APPROVED_SW_ADDR_MAC_200G_3_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_CF_GEN_STATUS 0x001E33C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CF_GEN_STATUS_CF_GEN_SENT_S 0
+#define E830_PRTMAC_CF_GEN_STATUS_CF_GEN_SENT_M BIT(0)
+#define E830_PRTMAC_CL01_PAUSE_QUANTA 0x001E32A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL01_QUANTA_THRESH 0x001E3320 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL23_PAUSE_QUANTA 0x001E32C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL23_QUANTA_THRESH 0x001E3340 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL45_PAUSE_QUANTA 0x001E32E0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL45_QUANTA_THRESH 0x001E3360 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL67_PAUSE_QUANTA 0x001E3300 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL67_QUANTA_THRESH 0x001E3380 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_COMMAND_CONFIG 0x001E3040 /* Reset Source: GLOBR */
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ENA_S 0
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ENA_M BIT(0)
+#define E830_PRTMAC_COMMAND_CONFIG_RX_ENA_S 1
+#define E830_PRTMAC_COMMAND_CONFIG_RX_ENA_M BIT(1)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED1_S 3
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED1_M BIT(3)
+#define E830_PRTMAC_COMMAND_CONFIG_PROMIS_EN_S 4
+#define E830_PRTMAC_COMMAND_CONFIG_PROMIS_EN_M BIT(4)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED2_S 5
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED2_M BIT(5)
+#define E830_PRTMAC_COMMAND_CONFIG_CRC_FWD_S 6
+#define E830_PRTMAC_COMMAND_CONFIG_CRC_FWD_M BIT(6)
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_FWD_S 7
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_FWD_M BIT(7)
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_IGNORE_S 8
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_IGNORE_M BIT(8)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ADDR_INS_S 9
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ADDR_INS_M BIT(9)
+#define E830_PRTMAC_COMMAND_CONFIG_LOOP_ENA_S 10
+#define E830_PRTMAC_COMMAND_CONFIG_LOOP_ENA_M BIT(10)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAD_EN_S 11
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAD_EN_M BIT(11)
+#define E830_PRTMAC_COMMAND_CONFIG_SW_RESET_S 12
+#define E830_PRTMAC_COMMAND_CONFIG_SW_RESET_M BIT(12)
+#define E830_PRTMAC_COMMAND_CONFIG_CNTL_FRM_ENA_S 13
+#define E830_PRTMAC_COMMAND_CONFIG_CNTL_FRM_ENA_M BIT(13)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED3_S 14
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED3_M BIT(14)
+#define E830_PRTMAC_COMMAND_CONFIG_PHY_TXENA_S 15
+#define E830_PRTMAC_COMMAND_CONFIG_PHY_TXENA_M BIT(15)
+#define E830_PRTMAC_COMMAND_CONFIG_FORCE_SEND__S 16
+#define E830_PRTMAC_COMMAND_CONFIG_FORCE_SEND__M BIT(16)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED4_S 17
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED4_M BIT(17)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED5_S 18
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED5_M BIT(18)
+#define E830_PRTMAC_COMMAND_CONFIG_PFC_MODE_S 19
+#define E830_PRTMAC_COMMAND_CONFIG_PFC_MODE_M BIT(19)
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_PFC_COMP_S 20
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_PFC_COMP_M BIT(20)
+#define E830_PRTMAC_COMMAND_CONFIG_RX_SFD_ANY_S 21
+#define E830_PRTMAC_COMMAND_CONFIG_RX_SFD_ANY_M BIT(21)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FLUSH_S 22
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FLUSH_M BIT(22)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_LOWP_ENA_S 23
+#define E830_PRTMAC_COMMAND_CONFIG_TX_LOWP_ENA_M BIT(23)
+#define E830_PRTMAC_COMMAND_CONFIG_REG_LOWP_RXEMPTY_S 24
+#define E830_PRTMAC_COMMAND_CONFIG_REG_LOWP_RXEMPTY_M BIT(24)
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_TX_STOP_S 25
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_TX_STOP_M BIT(25)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FIFO_RESET_S 26
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FIFO_RESET_M BIT(26)
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_HDL_DIS_S 27
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_HDL_DIS_M BIT(27)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAUSE_DIS_S 28
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAUSE_DIS_M BIT(28)
+#define E830_PRTMAC_COMMAND_CONFIG_RX_PAUSE_DIS_S 29
+#define E830_PRTMAC_COMMAND_CONFIG_RX_PAUSE_DIS_M BIT(29)
+#define E830_PRTMAC_COMMAND_CONFIG_SHORT_PREAM_S 30
+#define E830_PRTMAC_COMMAND_CONFIG_SHORT_PREAM_M BIT(30)
+#define E830_PRTMAC_COMMAND_CONFIG_NO_PREAM_S 31
+#define E830_PRTMAC_COMMAND_CONFIG_NO_PREAM_M BIT(31)
+#define E830_PRTMAC_CRC_INV_M 0x001E3260 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CRC_INV_MASK_CRC_INV_MASK_S 0
+#define E830_PRTMAC_CRC_INV_MASK_CRC_INV_MASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_CRC_MODE 0x001E3240 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CRC_MODE_DISABLE_RX_CRC_CHECKING_S 16
+#define E830_PRTMAC_CRC_MODE_DISABLE_RX_CRC_CHECKING_M BIT(16)
+#define E830_PRTMAC_CRC_MODE_ONE_BYTE_CRC_S 18
+#define E830_PRTMAC_CRC_MODE_ONE_BYTE_CRC_M BIT(18)
+#define E830_PRTMAC_CRC_MODE_TWO_BYTES_CRC_S 19
+#define E830_PRTMAC_CRC_MODE_TWO_BYTES_CRC_M BIT(19)
+#define E830_PRTMAC_CRC_MODE_ZERO_BYTE_CRC_S 20
+#define E830_PRTMAC_CRC_MODE_ZERO_BYTE_CRC_M BIT(20)
+#define E830_PRTMAC_CSR_TIMEOUT_CFG 0x001E3D00 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CSR_TIMEOUT_CFG_CSR_TIMEOUT_EN_S 0
+#define E830_PRTMAC_CSR_TIMEOUT_CFG_CSR_TIMEOUT_EN_M BIT(0)
+#define E830_PRTMAC_CTL_RX_CFG 0x001E2160 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CTL_RX_CFG_SUB_CRC_STAT_S 0
+#define E830_PRTMAC_CTL_RX_CFG_SUB_CRC_STAT_M BIT(0)
+#define E830_PRTMAC_CTL_RX_CFG_FRM_DROP_FOR_STAT_MODE_S 1
+#define E830_PRTMAC_CTL_RX_CFG_FRM_DROP_FOR_STAT_MODE_M MAKEMASK(0x3, 1)
+#define E830_PRTMAC_CTL_RX_CFG_MAC_PAC_AFULL_TRSH_S 3
+#define E830_PRTMAC_CTL_RX_CFG_MAC_PAC_AFULL_TRSH_M MAKEMASK(0x7, 3)
+#define E830_PRTMAC_CTL_RX_PAUSE_ENABLE 0x001E2180 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S 0
+#define E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E830_PRTMAC_CTL_TX_PAUSE_ENABLE 0x001E21A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S 0
+#define E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E830_PRTMAC_FRM_LENGTH 0x001E30A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_FRM_LENGTH_FRM_LENGTH_S 0
+#define E830_PRTMAC_FRM_LENGTH_FRM_LENGTH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_FRM_LENGTH_TX_MTU_S 16
+#define E830_PRTMAC_FRM_LENGTH_TX_MTU_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_MAC_ADDR_0 0x001E3060 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MAC_ADDR_0_MAC_ADDR_0_S 0
+#define E830_PRTMAC_MAC_ADDR_0_MAC_ADDR_0_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_MAC_ADDR_1 0x001E3080 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MAC_ADDR_1_MAC_ADDR_1_S 0
+#define E830_PRTMAC_MAC_ADDR_1_MAC_ADDR_1_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_MDIO_CFG_STATUS 0x001E3180 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_BUSY_S 0
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_BUSY_M BIT(0)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_RD_ERR_S 1
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_RD_ERR_M BIT(1)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_HOLD_TIME_S 2
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_HOLD_TIME_M MAKEMASK(0x7, 2)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_S 5
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_M BIT(5)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLS_45_EN_S 6
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLS_45_EN_M BIT(6)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_S 7
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_M MAKEMASK(0x1FF, 7)
+#define E830_PRTMAC_MDIO_COMMAND 0x001E31A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_COMMAND_S 0
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_COMMAND_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_MDIO_COMMAND_RESERVED_2_S 16
+#define E830_PRTMAC_MDIO_COMMAND_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_BUSY_S 31
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_MDIO_DATA 0x001E31C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_DATA_MDIO_DATA_S 0
+#define E830_PRTMAC_MDIO_DATA_MDIO_DATA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_MDIO_DATA_RESERVED_2_S 16
+#define E830_PRTMAC_MDIO_DATA_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_MDIO_DATA_MDIO_BUSY_S 31
+#define E830_PRTMAC_MDIO_DATA_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_MDIO_REGADDR 0x001E31E0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_REGADDR_MDIO_REGADDR_S 0
+#define E830_PRTMAC_MDIO_REGADDR_MDIO_REGADDR_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_REVISION 0x001E3000 /* Reset Source: GLOBR */
+#define E830_PRTMAC_REVISION_CORE_REVISION_S 0
+#define E830_PRTMAC_REVISION_CORE_REVISION_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_REVISION_CORE_VERSION_S 8
+#define E830_PRTMAC_REVISION_CORE_VERSION_M MAKEMASK(0xFF, 8)
+#define E830_PRTMAC_REVISION_CUSTOMER_VERSION_S 16
+#define E830_PRTMAC_REVISION_CUSTOMER_VERSION_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT 0x001E24C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT_RX_OFLOW_PKT_DRP_BSOP_CNT_S 0
+#define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT_RX_OFLOW_PKT_DRP_BSOP_CNT_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_RX_PAUSE_STATUS 0x001E33A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_RX_PAUSE_STATUS_RX_PAUSE_STATUS_S 0
+#define E830_PRTMAC_RX_PAUSE_STATUS_RX_PAUSE_STATUS_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_OFLOW_PKT_DRP_CNT_S 12
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_OFLOW_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 12)
+#define E830_PRTMAC_SCRATCH 0x001E3020 /* Reset Source: GLOBR */
+#define E830_PRTMAC_SCRATCH_SCRATCH_S 0
+#define E830_PRTMAC_SCRATCH_SCRATCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_STATUS 0x001E3200 /* Reset Source: GLOBR */
+#define E830_PRTMAC_STATUS_RX_LOC_FAULT_S 0
+#define E830_PRTMAC_STATUS_RX_LOC_FAULT_M BIT(0)
+#define E830_PRTMAC_STATUS_RX_REM_FAULT_S 1
+#define E830_PRTMAC_STATUS_RX_REM_FAULT_M BIT(1)
+#define E830_PRTMAC_STATUS_PHY_LOS_S 2
+#define E830_PRTMAC_STATUS_PHY_LOS_M BIT(2)
+#define E830_PRTMAC_STATUS_TS_AVAIL_S 3
+#define E830_PRTMAC_STATUS_TS_AVAIL_M BIT(3)
+#define E830_PRTMAC_STATUS_RX_LOWP_S 4
+#define E830_PRTMAC_STATUS_RX_LOWP_M BIT(4)
+#define E830_PRTMAC_STATUS_TX_EMPTY_S 5
+#define E830_PRTMAC_STATUS_TX_EMPTY_M BIT(5)
+#define E830_PRTMAC_STATUS_RX_EMPTY_S 6
+#define E830_PRTMAC_STATUS_RX_EMPTY_M BIT(6)
+#define E830_PRTMAC_STATUS_RX_LINT_FAULT_S 7
+#define E830_PRTMAC_STATUS_RX_LINT_FAULT_M BIT(7)
+#define E830_PRTMAC_STATUS_TX_ISIDLE_S 8
+#define E830_PRTMAC_STATUS_TX_ISIDLE_M BIT(8)
+#define E830_PRTMAC_STATUS_RESERVED_10_S 9
+#define E830_PRTMAC_STATUS_RESERVED_10_M MAKEMASK(0x7FFFFF, 9)
+#define E830_PRTMAC_STATUS_SPARE 0x001E2740 /* Reset Source: GLOBR */
+#define E830_PRTMAC_STATUS_SPARE_DFD_STATUS_SPARE_S 0
+#define E830_PRTMAC_STATUS_SPARE_DFD_STATUS_SPARE_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_RX_PCS_LATENCY 0x001E2220 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_RX_PCS_LATENCY_TS_RX_PCS_LATENCY_S 0
+#define E830_PRTMAC_TS_RX_PCS_LATENCY_TS_RX_PCS_LATENCY_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_TS_TIMESTAMP 0x001E33E0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TIMESTAMP_TS_TIMESTAMP_S 0
+#define E830_PRTMAC_TS_TIMESTAMP_TS_TIMESTAMP_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TX_MEM_VALID_H_TIMESTAMP_TX_VALID_ARR_H_S 0
+#define E830_PRTMAC_TS_TX_MEM_VALID_H_TIMESTAMP_TX_VALID_ARR_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TX_MEM_VALID_L_TIMESTAMP_TX_VALID_ARR_L_S 0
+#define E830_PRTMAC_TS_TX_MEM_VALID_L_TIMESTAMP_TX_VALID_ARR_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_TX_PCS_LATENCY 0x001E2200 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TX_PCS_LATENCY_TS_TX_PCS_LATENCY_S 0
+#define E830_PRTMAC_TS_TX_PCS_LATENCY_TS_TX_PCS_LATENCY_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_TX_FIFO_SECTIONS 0x001E3100 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_S 0
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_S 16
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_TX_IPG_LENGTH 0x001E3220 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TX_IPG_LENGTH_AVG_IPG_LEN_S 0
+#define E830_PRTMAC_TX_IPG_LENGTH_AVG_IPG_LEN_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_23_16_S 8
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_23_16_M MAKEMASK(0xFF, 8)
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_15_0_S 16
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_15_0_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_USER_TX_PAUSE_CNT 0x001E2760 /* Reset Source: GLOBR */
+#define E830_PRTMAC_USER_TX_PAUSE_CNT_USER_TX_PAUSE_CNT_S 0
+#define E830_PRTMAC_USER_TX_PAUSE_CNT_USER_TX_PAUSE_CNT_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_XIF_MODE 0x001E3400 /* Reset Source: GLOBR */
+#define E830_PRTMAC_XIF_MODE_XGMII_ENA_S 0
+#define E830_PRTMAC_XIF_MODE_XGMII_ENA_M BIT(0)
+#define E830_PRTMAC_XIF_MODE_RESERVED_2_S 1
+#define E830_PRTMAC_XIF_MODE_RESERVED_2_M MAKEMASK(0x7, 1)
+#define E830_PRTMAC_XIF_MODE_PAUSETIMERX8_S 4
+#define E830_PRTMAC_XIF_MODE_PAUSETIMERX8_M BIT(4)
+#define E830_PRTMAC_XIF_MODE_ONE_STEP_ENA_S 5
+#define E830_PRTMAC_XIF_MODE_ONE_STEP_ENA_M BIT(5)
+#define E830_PRTMAC_XIF_MODE_RX_PAUSE_BYPASS_S 6
+#define E830_PRTMAC_XIF_MODE_RX_PAUSE_BYPASS_M BIT(6)
+#define E830_PRTMAC_XIF_MODE_RESERVED1_S 7
+#define E830_PRTMAC_XIF_MODE_RESERVED1_M BIT(7)
+#define E830_PRTMAC_XIF_MODE_TX_MAC_RS_ERR_S 8
+#define E830_PRTMAC_XIF_MODE_TX_MAC_RS_ERR_M BIT(8)
+#define E830_PRTMAC_XIF_MODE_TS_DELTA_MODE_S 9
+#define E830_PRTMAC_XIF_MODE_TS_DELTA_MODE_M BIT(9)
+#define E830_PRTMAC_XIF_MODE_TS_DELAY_MODE_S 10
+#define E830_PRTMAC_XIF_MODE_TS_DELAY_MODE_M BIT(10)
+#define E830_PRTMAC_XIF_MODE_TS_BINARY_MODE_S 11
+#define E830_PRTMAC_XIF_MODE_TS_BINARY_MODE_M BIT(11)
+#define E830_PRTMAC_XIF_MODE_TS_UPD64_MODE_S 12
+#define E830_PRTMAC_XIF_MODE_TS_UPD64_MODE_M BIT(12)
+#define E830_PRTMAC_XIF_MODE_RESERVED2_S 13
+#define E830_PRTMAC_XIF_MODE_RESERVED2_M MAKEMASK(0x7, 13)
+#define E830_PRTMAC_XIF_MODE_RX_CNT_MODE_S 16
+#define E830_PRTMAC_XIF_MODE_RX_CNT_MODE_M BIT(16)
+#define E830_PRTMAC_XIF_MODE_PFC_PULSE_MODE_S 17
+#define E830_PRTMAC_XIF_MODE_PFC_PULSE_MODE_M BIT(17)
+#define E830_PRTMAC_XIF_MODE_PFC_LP_MODE_S 18
+#define E830_PRTMAC_XIF_MODE_PFC_LP_MODE_M BIT(18)
+#define E830_PRTMAC_XIF_MODE_PFC_LP_16PRI_S 19
+#define E830_PRTMAC_XIF_MODE_PFC_LP_16PRI_M BIT(19)
+#define E830_PRTMAC_XIF_MODE_TS_SFD_ENA_S 20
+#define E830_PRTMAC_XIF_MODE_TS_SFD_ENA_M BIT(20)
+#define E830_PRTMAC_XIF_MODE_RESERVED3_S 21
+#define E830_PRTMAC_XIF_MODE_RESERVED3_M MAKEMASK(0x7FF, 21)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF 0x001E2700 /* Reset Source: GLOBR */
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF0_S 0
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF0_M MAKEMASK(0xF, 0)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF1_S 4
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF1_M MAKEMASK(0xF, 4)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF2_S 8
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF2_M MAKEMASK(0xF, 8)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF3_S 12
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF3_M MAKEMASK(0xF, 12)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF4_S 16
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF4_M MAKEMASK(0xF, 16)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF5_S 20
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF5_M MAKEMASK(0xF, 20)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF6_S 24
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF6_M MAKEMASK(0xF, 24)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF7_S 28
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF7_M MAKEMASK(0xF, 28)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SW_ABOVE_HW_TAIL_S 28
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SW_ABOVE_HW_TAIL_M BIT(28)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SAME_TAIL_S 29
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SAME_TAIL_M BIT(29)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_TAIL_GE_QLEN_S 30
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_TAIL_GE_QLEN_M BIT(30)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_UR_S 31
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_UR_M BIT(31)
+#define E830_GL_MDET_HIF_UR_FIFO 0x00096844 /* Reset Source: CORER */
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_HIF_UR_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_HIF_UR_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_HIF_UR_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_HIF_UR_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_HIF_UR_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_HIF_UR_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_HIF_UR_FIFO_VALID_S 21
+#define E830_GL_MDET_HIF_UR_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_HIF_UR_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_HIF_UR_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_HIF_UR_PF_CNT(_i) (0x00096804 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_HIF_UR_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_HIF_UR_PF_CNT_CNT_S 0
+#define E830_GL_MDET_HIF_UR_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_HIF_UR_VF(_i) (0x00096824 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_HIF_UR_VF_MAX_INDEX 7
+#define E830_GL_MDET_HIF_UR_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_HIF_UR_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PF_MDET_HIF_UR 0x00096880 /* Reset Source: CORER */
+#define E830_PF_MDET_HIF_UR_VALID_S 0
+#define E830_PF_MDET_HIF_UR_VALID_M BIT(0)
+#define E830_VM_MDET_TX_TCLAN(_i) (0x000FC348 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define E830_VM_MDET_TX_TCLAN_MAX_INDEX 767
+#define E830_VM_MDET_TX_TCLAN_VALID_S 0
+#define E830_VM_MDET_TX_TCLAN_VALID_M BIT(0)
+#define E830_VP_MDET_HIF_UR(_VF) (0x00096C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_VP_MDET_HIF_UR_MAX_INDEX 255
+#define E830_VP_MDET_HIF_UR_VALID_S 0
+#define E830_VP_MDET_HIF_UR_VALID_M BIT(0)
+#define E830_GLNVM_FLA_GLOBAL_LOCKED_S 7
+#define E830_GLNVM_FLA_GLOBAL_LOCKED_M BIT(7)
+#define E830_DMA_AGENT_AT0 0x000BE268 /* Reset Source: PCIR */
+#define E830_DMA_AGENT_AT0_RLAN_PASID_SELECTED_S 0
+#define E830_DMA_AGENT_AT0_RLAN_PASID_SELECTED_M MAKEMASK(0x3, 0)
+#define E830_DMA_AGENT_AT0_TCLAN_PASID_SELECTED_S 2
+#define E830_DMA_AGENT_AT0_TCLAN_PASID_SELECTED_M MAKEMASK(0x3, 2)
+#define E830_DMA_AGENT_AT0_PQM_DBL_PASID_SELECTED_S 4
+#define E830_DMA_AGENT_AT0_PQM_DBL_PASID_SELECTED_M MAKEMASK(0x3, 4)
+#define E830_DMA_AGENT_AT0_PQM_DESC_PASID_SELECTED_S 6
+#define E830_DMA_AGENT_AT0_PQM_DESC_PASID_SELECTED_M MAKEMASK(0x3, 6)
+#define E830_DMA_AGENT_AT0_PQM_TS_DESC_PASID_SELECTED_S 8
+#define E830_DMA_AGENT_AT0_PQM_TS_DESC_PASID_SELECTED_M MAKEMASK(0x3, 8)
+#define E830_DMA_AGENT_AT0_RDPU_PASID_SELECTED_S 10
+#define E830_DMA_AGENT_AT0_RDPU_PASID_SELECTED_M MAKEMASK(0x3, 10)
+#define E830_DMA_AGENT_AT0_TDPU_PASID_SELECTED_S 12
+#define E830_DMA_AGENT_AT0_TDPU_PASID_SELECTED_M MAKEMASK(0x3, 12)
+#define E830_DMA_AGENT_AT0_MBX_PASID_SELECTED_S 14
+#define E830_DMA_AGENT_AT0_MBX_PASID_SELECTED_M MAKEMASK(0x3, 14)
+#define E830_DMA_AGENT_AT0_MNG_PASID_SELECTED_S 16
+#define E830_DMA_AGENT_AT0_MNG_PASID_SELECTED_M MAKEMASK(0x3, 16)
+#define E830_DMA_AGENT_AT0_TEP_PMAT_PASID_SELECTED_S 18
+#define E830_DMA_AGENT_AT0_TEP_PMAT_PASID_SELECTED_M MAKEMASK(0x3, 18)
+#define E830_DMA_AGENT_AT0_RX_PE_PASID_SELECTED_S 20
+#define E830_DMA_AGENT_AT0_RX_PE_PASID_SELECTED_M MAKEMASK(0x3, 20)
+#define E830_DMA_AGENT_AT0_TX_PE_PASID_SELECTED_S 22
+#define E830_DMA_AGENT_AT0_TX_PE_PASID_SELECTED_M MAKEMASK(0x3, 22)
+#define E830_DMA_AGENT_AT0_PEPMAT_PASID_SELECTED_S 24
+#define E830_DMA_AGENT_AT0_PEPMAT_PASID_SELECTED_M MAKEMASK(0x3, 24)
+#define E830_DMA_AGENT_AT0_FPMAT_PASID_SELECTED_S 26
+#define E830_DMA_AGENT_AT0_FPMAT_PASID_SELECTED_M MAKEMASK(0x3, 26)
+#define E830_DMA_AGENT_AT1 0x000BE26C /* Reset Source: PCIR */
+#define E830_DMA_AGENT_AT1_RLAN_PASID_SELECTED_S 0
+#define E830_DMA_AGENT_AT1_RLAN_PASID_SELECTED_M MAKEMASK(0x3, 0)
+#define E830_DMA_AGENT_AT1_TCLAN_PASID_SELECTED_S 2
+#define E830_DMA_AGENT_AT1_TCLAN_PASID_SELECTED_M MAKEMASK(0x3, 2)
+#define E830_DMA_AGENT_AT1_PQM_DBL_PASID_SELECTED_S 4
+#define E830_DMA_AGENT_AT1_PQM_DBL_PASID_SELECTED_M MAKEMASK(0x3, 4)
+#define E830_DMA_AGENT_AT1_PQM_DESC_PASID_SELECTED_S 6
+#define E830_DMA_AGENT_AT1_PQM_DESC_PASID_SELECTED_M MAKEMASK(0x3, 6)
+#define E830_DMA_AGENT_AT1_PQM_TS_DESC_PASID_SELECTED_S 8
+#define E830_DMA_AGENT_AT1_PQM_TS_DESC_PASID_SELECTED_M MAKEMASK(0x3, 8)
+#define E830_DMA_AGENT_AT1_RDPU_PASID_SELECTED_S 10
+#define E830_DMA_AGENT_AT1_RDPU_PASID_SELECTED_M MAKEMASK(0x3, 10)
+#define E830_DMA_AGENT_AT1_TDPU_PASID_SELECTED_S 12
+#define E830_DMA_AGENT_AT1_TDPU_PASID_SELECTED_M MAKEMASK(0x3, 12)
+#define E830_DMA_AGENT_AT1_MBX_PASID_SELECTED_S 14
+#define E830_DMA_AGENT_AT1_MBX_PASID_SELECTED_M MAKEMASK(0x3, 14)
+#define E830_DMA_AGENT_AT1_MNG_PASID_SELECTED_S 16
+#define E830_DMA_AGENT_AT1_MNG_PASID_SELECTED_M MAKEMASK(0x3, 16)
+#define E830_DMA_AGENT_AT1_TEP_PMAT_PASID_SELECTED_S 18
+#define E830_DMA_AGENT_AT1_TEP_PMAT_PASID_SELECTED_M MAKEMASK(0x3, 18)
+#define E830_DMA_AGENT_AT1_RX_PE_PASID_SELECTED_S 20
+#define E830_DMA_AGENT_AT1_RX_PE_PASID_SELECTED_M MAKEMASK(0x3, 20)
+#define E830_DMA_AGENT_AT1_TX_PE_PASID_SELECTED_S 22
+#define E830_DMA_AGENT_AT1_TX_PE_PASID_SELECTED_M MAKEMASK(0x3, 22)
+#define E830_DMA_AGENT_AT1_PEPMAT_PASID_SELECTED_S 24
+#define E830_DMA_AGENT_AT1_PEPMAT_PASID_SELECTED_M MAKEMASK(0x3, 24)
+#define E830_DMA_AGENT_AT1_FPMAT_PASID_SELECTED_S 26
+#define E830_DMA_AGENT_AT1_FPMAT_PASID_SELECTED_M MAKEMASK(0x3, 26)
+#define E830_GLPCI_CAPSUP_DOE_EN_S 1
+#define E830_GLPCI_CAPSUP_DOE_EN_M BIT(1)
+#define E830_GLPCI_CAPSUP_GEN5_EXT_EN_S 12
+#define E830_GLPCI_CAPSUP_GEN5_EXT_EN_M BIT(12)
+#define E830_GLPCI_CAPSUP_PTM_EN_S 13
+#define E830_GLPCI_CAPSUP_PTM_EN_M BIT(13)
+#define E830_GLPCI_CAPSUP_SNPS_RAS_EN_S 14
+#define E830_GLPCI_CAPSUP_SNPS_RAS_EN_M BIT(14)
+#define E830_GLPCI_CAPSUP_SIOV_EN_S 15
+#define E830_GLPCI_CAPSUP_SIOV_EN_M BIT(15)
+#define E830_GLPCI_CAPSUP_PTM_VSEC_EN_S 22
+#define E830_GLPCI_CAPSUP_PTM_VSEC_EN_M BIT(22)
+#define E830_GLPCI_CAPSUP_SNPS_RAS_PROT_EN_S 23
+#define E830_GLPCI_CAPSUP_SNPS_RAS_PROT_EN_M BIT(23)
+#define E830_GLPCI_DOE_BUSY_STATUS 0x0009DF70 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_REQ_S 0
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_REQ_M BIT(0)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_EMPR_S 1
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_EMPR_M BIT(1)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_PCIER_S 2
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_PCIER_M BIT(2)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FLR_S 3
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FLR_M BIT(3)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_CFG_ABORT_S 4
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_CFG_ABORT_M BIT(4)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FW_S 5
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FW_M BIT(5)
+#define E830_GLPCI_DOE_CFG 0x0009DF54 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_CFG_ENABLE_S 0
+#define E830_GLPCI_DOE_CFG_ENABLE_M BIT(0)
+#define E830_GLPCI_DOE_CFG_ITR_SUPPORT_S 1
+#define E830_GLPCI_DOE_CFG_ITR_SUPPORT_M BIT(1)
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_PIOSF_EP_BIT_S 2
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_PIOSF_EP_BIT_M BIT(2)
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_SBIOSF_AER_MSG_S 3
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_SBIOSF_AER_MSG_M BIT(3)
+#define E830_GLPCI_DOE_CFG_MSIX_VECTOR_S 8
+#define E830_GLPCI_DOE_CFG_MSIX_VECTOR_M MAKEMASK(0x7FF, 8)
+#define E830_GLPCI_DOE_CTRL 0x0009DF60 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_CTRL_BUSY_FW_SET_S 0
+#define E830_GLPCI_DOE_CTRL_BUSY_FW_SET_M BIT(0)
+#define E830_GLPCI_DOE_CTRL_DOE_CFG_ERR_SET_S 1
+#define E830_GLPCI_DOE_CTRL_DOE_CFG_ERR_SET_M BIT(1)
+#define E830_GLPCI_DOE_DBG 0x0009DF6C /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_DBG_CFG_BUSY_S 0
+#define E830_GLPCI_DOE_DBG_CFG_BUSY_M BIT(0)
+#define E830_GLPCI_DOE_DBG_CFG_DATA_OBJECT_READY_S 1
+#define E830_GLPCI_DOE_DBG_CFG_DATA_OBJECT_READY_M BIT(1)
+#define E830_GLPCI_DOE_DBG_CFG_ERROR_S 2
+#define E830_GLPCI_DOE_DBG_CFG_ERROR_M BIT(2)
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_ENABLE_S 3
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_ENABLE_M BIT(3)
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_STATUS_S 4
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_STATUS_M BIT(4)
+#define E830_GLPCI_DOE_DBG_REQ_BUF_SW_WR_PTR_S 8
+#define E830_GLPCI_DOE_DBG_REQ_BUF_SW_WR_PTR_M MAKEMASK(0x1FF, 8)
+#define E830_GLPCI_DOE_DBG_RESP_BUF_SW_RD_PTR_S 20
+#define E830_GLPCI_DOE_DBG_RESP_BUF_SW_RD_PTR_M MAKEMASK(0x1FF, 20)
+#define E830_GLPCI_DOE_ERR_EN 0x0009DF64 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_ERR_EN_RD_REQ_BUF_ECC_ERR_EN_S 0
+#define E830_GLPCI_DOE_ERR_EN_RD_REQ_BUF_ECC_ERR_EN_M BIT(0)
+#define E830_GLPCI_DOE_ERR_EN_RD_RESP_BUF_ECC_ERR_EN_S 1
+#define E830_GLPCI_DOE_ERR_EN_RD_RESP_BUF_ECC_ERR_EN_M BIT(1)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_CFG_POISONED_EN_S 2
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_CFG_POISONED_EN_M BIT(2)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_EN_S 3
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_EN_M BIT(3)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_REQ_EN_S 4
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_REQ_EN_M BIT(4)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_EN_S 5
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_EN_M BIT(5)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_FW_EN_S 6
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_FW_EN_M BIT(6)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_OVERFLOW_EN_S 7
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_OVERFLOW_EN_M BIT(7)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_REQ_BUF_EMPTY_EN_S 8
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_REQ_BUF_EMPTY_EN_M BIT(8)
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_READY_LOW_EN_S 9
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_READY_LOW_EN_M BIT(9)
+#define E830_GLPCI_DOE_ERR_EN_SW_REQ_DURING_MNG_RST_EN_S 10
+#define E830_GLPCI_DOE_ERR_EN_SW_REQ_DURING_MNG_RST_EN_M BIT(10)
+#define E830_GLPCI_DOE_ERR_EN_FW_SET_ERROR_EN_S 11
+#define E830_GLPCI_DOE_ERR_EN_FW_SET_ERROR_EN_M BIT(11)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_EN_S 12
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_EN_M BIT(12)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_ABORT_EN_S 13
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_ABORT_EN_M BIT(13)
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_EN_S 14
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_EN_M BIT(14)
+#define E830_GLPCI_DOE_ERR_STATUS 0x0009DF68 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_ERR_STATUS_RD_REQ_BUF_ECC_ERR_S 0
+#define E830_GLPCI_DOE_ERR_STATUS_RD_REQ_BUF_ECC_ERR_M BIT(0)
+#define E830_GLPCI_DOE_ERR_STATUS_RD_RESP_BUF_ECC_ERR_S 1
+#define E830_GLPCI_DOE_ERR_STATUS_RD_RESP_BUF_ECC_ERR_M BIT(1)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_CFG_POISONED_S 2
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_CFG_POISONED_M BIT(2)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_S 3
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_M BIT(3)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_REQ_S 4
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_REQ_M BIT(4)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_S 5
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_M BIT(5)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_FW_S 6
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_FW_M BIT(6)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_OVERFLOW_S 7
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_OVERFLOW_M BIT(7)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_REQ_BUF_EMPTY_S 8
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_REQ_BUF_EMPTY_M BIT(8)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_READY_LOW_S 9
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_READY_LOW_M BIT(9)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_REQ_DURING_MNG_RST_S 10
+#define E830_GLPCI_DOE_ERR_STATUS_SW_REQ_DURING_MNG_RST_M BIT(10)
+#define E830_GLPCI_DOE_ERR_STATUS_FW_SET_ERROR_S 11
+#define E830_GLPCI_DOE_ERR_STATUS_FW_SET_ERROR_M BIT(11)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_S 12
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_M BIT(12)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_ABORT_S 13
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_ABORT_M BIT(13)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_S 14
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_M BIT(14)
+#define E830_GLPCI_DOE_ERR_STATUS_CFG_ERR_IDX_S 24
+#define E830_GLPCI_DOE_ERR_STATUS_CFG_ERR_IDX_M MAKEMASK(0x1F, 24)
+#define E830_GLPCI_DOE_REQ_MSG_NUM_DWS 0x0009DF58 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_REQ_MSG_NUM_DWS_GLPCI_DOE_REQ_MSG_NUM_DWS_S 0
+#define E830_GLPCI_DOE_REQ_MSG_NUM_DWS_GLPCI_DOE_REQ_MSG_NUM_DWS_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_DOE_RESP 0x0009DF5C /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_RESP_MSG_NUM_DWS_S 0
+#define E830_GLPCI_DOE_RESP_MSG_NUM_DWS_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_DOE_RESP_READY_SET_S 16
+#define E830_GLPCI_DOE_RESP_READY_SET_M BIT(16)
+#define E830_GLPCI_ERR_DBG 0x0009DF84 /* Reset Source: PCIR */
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_FULL_DROP_CTR_S 0
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_FULL_DROP_CTR_M MAKEMASK(0x3, 0)
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_SM_S 2
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_SM_M BIT(2)
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_FIFO_NUM_ENTRIES_S 3
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_FIFO_NUM_ENTRIES_M MAKEMASK(0x7, 3)
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_NUM_ENTRIES_S 6
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_NUM_ENTRIES_M MAKEMASK(0xF, 6)
+#define E830_GLPCI_NPQ_CFG_HIGH_TO_S 20
+#define E830_GLPCI_NPQ_CFG_HIGH_TO_M BIT(20)
+#define E830_GLPCI_NPQ_CFG_INC_150MS_TO_S 21
+#define E830_GLPCI_NPQ_CFG_INC_150MS_TO_M BIT(21)
+#define E830_GLPCI_PUSH_PQM_CTRL 0x0009DF74 /* Reset Source: POR */
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_LEGACY_RANGE_EN_S 0
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_LEGACY_RANGE_EN_M BIT(0)
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_TXTIME_RANGE_EN_S 1
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_TXTIME_RANGE_EN_M BIT(1)
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_4K_RANGE_EN_S 2
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_4K_RANGE_EN_M BIT(2)
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_LEGACY_RANGE_EN_S 3
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_LEGACY_RANGE_EN_M BIT(3)
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_TXTIME_RANGE_EN_S 4
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_TXTIME_RANGE_EN_M BIT(4)
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_VAL_S 8
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_VAL_M MAKEMASK(0xF, 8)
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_DIS_S 12
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_DIS_M BIT(12)
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_2DWS_ONE_CHUNK_EN_S 16
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_2DWS_ONE_CHUNK_EN_M BIT(16)
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_1DW_ON_XLR_S 17
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_1DW_ON_XLR_M BIT(17)
+#define E830_GLPCI_PUSH_PQM_DBG 0x0009DF7C /* Reset Source: PCIR */
+#define E830_GLPCI_PUSH_PQM_DBG_EVENTS_CTR_S 0
+#define E830_GLPCI_PUSH_PQM_DBG_EVENTS_CTR_M MAKEMASK(0xFF, 0)
+#define E830_GLPCI_PUSH_PQM_DBG_DROP_CTR_S 8
+#define E830_GLPCI_PUSH_PQM_DBG_DROP_CTR_M MAKEMASK(0xFF, 8)
+#define E830_GLPCI_PUSH_PQM_DBG_ASYNC_FIFO_USED_SPACE_S 16
+#define E830_GLPCI_PUSH_PQM_DBG_ASYNC_FIFO_USED_SPACE_M MAKEMASK(0xF, 16)
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_USED_SPACE_S 20
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_USED_SPACE_M MAKEMASK(0x1F, 20)
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_PUSH_WHEN_FULL_ERR_S 25
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_PUSH_WHEN_FULL_ERR_M BIT(25)
+#define E830_GLPCI_PUSH_PQM_IF_TO_STATUS 0x0009DF78 /* Reset Source: PCIR */
+#define E830_GLPCI_PUSH_PQM_IF_TO_STATUS_GLPCI_PUSH_PQM_IF_TO_STATUS_S 0
+#define E830_GLPCI_PUSH_PQM_IF_TO_STATUS_GLPCI_PUSH_PQM_IF_TO_STATUS_M BIT(0)
+#define E830_GLPCI_RDPU_CMD_DBG 0x000BE264 /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU0_CMD_POP_CNT_S 0
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU0_CMD_POP_CNT_M MAKEMASK(0xFF, 0)
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU1_CMD_POP_CNT_S 8
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU1_CMD_POP_CNT_M MAKEMASK(0xFF, 8)
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU2_CMD_POP_CNT_S 16
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU2_CMD_POP_CNT_M MAKEMASK(0xFF, 16)
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU3_CMD_POP_CNT_S 24
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU3_CMD_POP_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0 0x000BE25C /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU0_CMD_NUM_ENTRIES_S 0
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU0_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU1_CMD_NUM_ENTRIES_S 16
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU1_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 16)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1 0x000BE260 /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU2_CMD_NUM_ENTRIES_S 0
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU2_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU3_CMD_NUM_ENTRIES_S 16
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU3_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 16)
+#define E830_GLPCI_RDPU_TAG 0x000BE258 /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_TAG_OVERRIDE_DELAY_S 0
+#define E830_GLPCI_RDPU_TAG_OVERRIDE_DELAY_M MAKEMASK(0xFF, 0)
+#define E830_GLPCI_RDPU_TAG_EXPECTED_TAG_S 8
+#define E830_GLPCI_RDPU_TAG_EXPECTED_TAG_M MAKEMASK(0x3FF, 8)
+#define E830_GLPCI_SB_AER_MSG_OUT 0x0009DF80 /* Reset Source: PCIR */
+#define E830_GLPCI_SB_AER_MSG_OUT_EN_S 0
+#define E830_GLPCI_SB_AER_MSG_OUT_EN_M BIT(0)
+#define E830_GLPCI_SB_AER_MSG_OUT_ANF_SET_EN_S 1
+#define E830_GLPCI_SB_AER_MSG_OUT_ANF_SET_EN_M BIT(1)
+#define E830_PF_FUNC_RID_HOST_S 16
+#define E830_PF_FUNC_RID_HOST_M MAKEMASK(0x3, 16)
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI(_i) (0x00553004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI_MAX_INDEX 127
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI_RXNPECNMARKEDPKTSHI_S 0
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI_RXNPECNMARKEDPKTSHI_M MAKEMASK(0xFFFFFF, 0)
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO(_i) (0x00553000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO_MAX_INDEX 127
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO_RXNPECNMARKEDPKTSLO_S 0
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO_RXNPECNMARKEDPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPES_PFRXRPCNPHANDLED(_i) (0x00552C00 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXRPCNPHANDLED_MAX_INDEX 127
+#define E830_GLPES_PFRXRPCNPHANDLED_RXRPCNPHANDLED_S 0
+#define E830_GLPES_PFRXRPCNPHANDLED_RXRPCNPHANDLED_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPES_PFRXRPCNPIGNORED(_i) (0x00552800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXRPCNPIGNORED_MAX_INDEX 127
+#define E830_GLPES_PFRXRPCNPIGNORED_RXRPCNPIGNORED_S 0
+#define E830_GLPES_PFRXRPCNPIGNORED_RXRPCNPIGNORED_M MAKEMASK(0xFFFFFF, 0)
+#define E830_GLPES_PFTXNPCNPSENT(_i) (0x00553800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFTXNPCNPSENT_MAX_INDEX 127
+#define E830_GLPES_PFTXNPCNPSENT_TXNPCNPSENT_S 0
+#define E830_GLPES_PFTXNPCNPSENT_TXNPCNPSENT_M MAKEMASK(0xFFFFFF, 0)
+#define E830_GLQF_FLAT_HLUT(_i) (0x004C0000 + ((_i) * 4)) /* _i=0...8191 */ /* Reset Source: CORER */
+#define E830_GLQF_FLAT_HLUT_MAX_INDEX 8191
+#define E830_GLQF_FLAT_HLUT_LUT0_S 0
+#define E830_GLQF_FLAT_HLUT_LUT0_M MAKEMASK(0xFF, 0)
+#define E830_GLQF_FLAT_HLUT_LUT1_S 8
+#define E830_GLQF_FLAT_HLUT_LUT1_M MAKEMASK(0xFF, 8)
+#define E830_GLQF_FLAT_HLUT_LUT2_S 16
+#define E830_GLQF_FLAT_HLUT_LUT2_M MAKEMASK(0xFF, 16)
+#define E830_GLQF_FLAT_HLUT_LUT3_S 24
+#define E830_GLQF_FLAT_HLUT_LUT3_M MAKEMASK(0xFF, 24)
+#define E830_GLQF_QGRP_CNTX(_i) (0x00490000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define E830_GLQF_QGRP_CNTX_MAX_INDEX 2047
+#define E830_GLQF_QGRP_CNTX_QG_LUT_BASE_S 0
+#define E830_GLQF_QGRP_CNTX_QG_LUT_BASE_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_QGRP_CNTX_QG_LUT_SIZE_S 16
+#define E830_GLQF_QGRP_CNTX_QG_LUT_SIZE_M MAKEMASK(0xF, 16)
+#define E830_GLQF_QGRP_CNTX_VSI_S 20
+#define E830_GLQF_QGRP_CNTX_VSI_M MAKEMASK(0x3FF, 20)
+#define E830_GLQF_QGRP_PF_OWNER(_i) (0x00484000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define E830_GLQF_QGRP_PF_OWNER_MAX_INDEX 2047
+#define E830_GLQF_QGRP_PF_OWNER_OWNER_PF_S 0
+#define E830_GLQF_QGRP_PF_OWNER_OWNER_PF_M MAKEMASK(0x7, 0)
+#define E830_PFQF_LUT_ALLOC 0x0048E000 /* Reset Source: CORER */
+#define E830_PFQF_LUT_ALLOC_LUT_BASE_S 0
+#define E830_PFQF_LUT_ALLOC_LUT_BASE_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_LUT_ALLOC_LUT_SIZE_S 16
+#define E830_PFQF_LUT_ALLOC_LUT_SIZE_M MAKEMASK(0xF, 16)
+#define E830_VSIQF_DEF_QGRP(_VSI) (0x00486000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSIQF_DEF_QGRP_MAX_INDEX 767
+#define E830_VSIQF_DEF_QGRP_DEF_QGRP_S 0
+#define E830_VSIQF_DEF_QGRP_DEF_QGRP_M MAKEMASK(0x7FF, 0)
+#define E830_GLPRT_BPRCH_BPRCH_S 0
+#define E830_GLPRT_BPRCH_BPRCH_M MAKEMASK(0xFF, 0)
+#define E830_GLPRT_BPRCL_BPRCL_S 0
+#define E830_GLPRT_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPRT_BPTCH_BPTCH_S 0
+#define E830_GLPRT_BPTCH_BPTCH_M MAKEMASK(0xFF, 0)
+#define E830_GLPRT_BPTCL_BPTCL_S 0
+#define E830_GLPRT_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPRT_UPTCL_UPTCL_S 0
+#define E830_GLPRT_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPTM_ART_CTL 0x00088B50 /* Reset Source: POR */
+#define E830_GLPTM_ART_CTL_ACTIVE_S 0
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_CTL_TIME_OUT_S 1
+#define E830_GLPTM_ART_CTL_TIME_OUT_M BIT(1)
+#define E830_GLPTM_ART_CTL_PTM_READY_S 2
+#define E830_GLPTM_ART_CTL_PTM_READY_M BIT(2)
+#define E830_GLPTM_ART_CTL_PTM_AUTO_S 3
+#define E830_GLPTM_ART_CTL_PTM_AUTO_M BIT(3)
+#define E830_GLPTM_ART_CTL_PTM_AUTO_LATCH_S 4
+#define E830_GLPTM_ART_CTL_PTM_AUTO_LATCH_M BIT(4)
+#define E830_GLPTM_ART_CTL_LATCH_PTP_T1_S 5
+#define E830_GLPTM_ART_CTL_LATCH_PTP_T1_M BIT(5)
+#define E830_GLPTM_ART_CTL_AUTO_POURSE_S 6
+#define E830_GLPTM_ART_CTL_AUTO_POURSE_M BIT(6)
+#define E830_GLPTM_ART_TIME_H 0x00088B54 /* Reset Source: POR */
+#define E830_GLPTM_ART_TIME_H_ART_TIME_H_S 0
+#define E830_GLPTM_ART_TIME_H_ART_TIME_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPTM_ART_TIME_L 0x00088B58 /* Reset Source: POR */
+#define E830_GLPTM_ART_TIME_L_ART_TIME_L_S 0
+#define E830_GLPTM_ART_TIME_L_ART_TIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLTSYN_PTMTIME_H_MAX_INDEX 1
+#define E830_GLTSYN_PTMTIME_H_TSYNEVNT_H_S 0
+#define E830_GLTSYN_PTMTIME_H_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLTSYN_PTMTIME_L_MAX_INDEX 1
+#define E830_GLTSYN_PTMTIME_L_TSYNEVNT_L_S 0
+#define E830_GLTSYN_PTMTIME_L_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_0_AL 0x0008A004 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_0_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_0_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_1_AL 0x0008B004 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_1_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_1_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_0_AL 0x0008A000 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_0_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_0_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_1_AL 0x0008B000 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_1_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_1_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PFPTM_SEM 0x00088B00 /* Reset Source: PFR */
+#define E830_PFPTM_SEM_BUSY_S 0
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
+#define E830_PFPTM_SEM_PF_OWNER_S 4
+#define E830_PFPTM_SEM_PF_OWNER_M MAKEMASK(0x7, 4)
+#define E830_VSI_PASID_1(_VSI) (0x00094000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSI_PASID_1_MAX_INDEX 767
+#define E830_VSI_PASID_1_PASID_S 0
+#define E830_VSI_PASID_1_PASID_M MAKEMASK(0xFFFFF, 0)
+#define E830_VSI_PASID_1_EN_S 31
+#define E830_VSI_PASID_1_EN_M BIT(31)
+#define E830_VSI_PASID_2(_VSI) (0x00095000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSI_PASID_2_MAX_INDEX 767
+#define E830_VSI_PASID_2_PASID_S 0
+#define E830_VSI_PASID_2_PASID_M MAKEMASK(0xFFFFF, 0)
+#define E830_VSI_PASID_2_EN_S 31
+#define E830_VSI_PASID_2_EN_M BIT(31)
+#define E830_GLPE_CQM_FUNC_INVALIDATE_PMF_ID_S 15
+#define E830_GLPE_CQM_FUNC_INVALIDATE_PMF_ID_M MAKEMASK(0x3F, 15)
+#define E830_GLPE_CQM_FUNC_INVALIDATE_INVALIDATE_TYPE_S 29
+#define E830_GLPE_CQM_FUNC_INVALIDATE_INVALIDATE_TYPE_M MAKEMASK(0x3, 29)
+#define E830_VFPE_MRTEIDXMASK_MAX_INDEX 255
+#define E830_VSIQF_QGRP_CFG(_VSI) (0x00492000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define E830_VSIQF_QGRP_CFG_MAX_INDEX 767
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_ENABLE_S 0
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_ENABLE_M BIT(0)
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_GEN_INDEX_S 1
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_GEN_INDEX_M MAKEMASK(0x7, 1)
+#define E830_GLDCB_RTC_BLOCKED 0x0012274C /* Reset Source: CORER */
+#define E830_GLDCB_RTC_BLOCKED_BLOCKED_S 0
+#define E830_GLDCB_RTC_BLOCKED_BLOCKED_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCID 0x00122900 /* Reset Source: CORER */
+#define E830_GLDCB_RTCID_IMM_DROP_TC_S 0
+#define E830_GLDCB_RTCID_IMM_DROP_TC_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCTI_CDS_SET 0x00122748 /* Reset Source: CORER */
+#define E830_GLDCB_RTCTI_CDS_SET_CDS_SET_S 0
+#define E830_GLDCB_RTCTI_CDS_SET_CDS_SET_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCTQ_PD(_i) (0x00122700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLDCB_RTCTQ_PD_MAX_INDEX 7
+#define E830_GLDCB_RTCTQ_PD_RXQNUM_S 0
+#define E830_GLDCB_RTCTQ_PD_RXQNUM_M MAKEMASK(0x7FF, 0)
+#define E830_GLDCB_RTCTQ_PD_IS_PF_Q_S 16
+#define E830_GLDCB_RTCTQ_PD_IS_PF_Q_M BIT(16)
+#define E830_GLDCB_RTCTQ_SET 0x00122750 /* Reset Source: CORER */
+#define E830_GLDCB_RTCTQ_SET_RTCTQ_VALID_S 0
+#define E830_GLDCB_RTCTQ_SET_RTCTQ_VALID_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCTQ_STICKY_EN 0x00122754 /* Reset Source: CORER */
+#define E830_GLDCB_RTCTQ_STICKY_EN_EN_S 0
+#define E830_GLDCB_RTCTQ_STICKY_EN_EN_M BIT(0)
+#define E830_GLDCB_RTCTS_PD(_i) (0x00122720 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLDCB_RTCTS_PD_MAX_INDEX 7
+#define E830_GLDCB_RTCTS_PD_PFCTIMER_S 0
+#define E830_GLDCB_RTCTS_PD_PFCTIMER_M MAKEMASK(0x3FFF, 0)
+#define E830_GLRPB_TC_TOTAL_PC(_i) (0x000ACD00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define E830_GLRPB_TC_TOTAL_PC_MAX_INDEX 31
+#define E830_GLRPB_TC_TOTAL_PC_BYTE_CNT_S 0
+#define E830_GLRPB_TC_TOTAL_PC_BYTE_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_VFINT_ITRN_64(_i, _j) (0x00002C00 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...2 */ /* Reset Source: CORER */
+#define E830_VFINT_ITRN_64_MAX_INDEX 63
+#define E830_VFINT_ITRN_64_INTERVAL_S 0
+#define E830_VFINT_ITRN_64_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB1(_DBQM) (0x0000D000 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_LSB1_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_DBELL_LSB1_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_LSB1_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_MSB1(_DBQM) (0x0000D004 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_MSB1_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_DBELL_MSB1_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_MSB1_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB(_DBQM) (0x00040000 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB(_DBQM) (0x00040004 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_0_AL1 0x00003004 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_0_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_0_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_1_AL1 0x0000300C /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_1_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_1_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_0_AL1 0x00003000 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_0_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_0_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_1_AL1 0x00003008 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_1_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_1_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_VSI_VSI2F_LEM(_VSI) (0x006100A0 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSI_VSI2F_LEM_MAX_INDEX 767
+#define E830_VSI_VSI2F_LEM_VFVMNUMBER_S 0
+#define E830_VSI_VSI2F_LEM_VFVMNUMBER_M MAKEMASK(0x3FF, 0)
+#define E830_VSI_VSI2F_LEM_FUNCTIONTYPE_S 10
+#define E830_VSI_VSI2F_LEM_FUNCTIONTYPE_M MAKEMASK(0x3, 10)
+#define E830_VSI_VSI2F_LEM_PFNUMBER_S 12
+#define E830_VSI_VSI2F_LEM_PFNUMBER_M MAKEMASK(0x7, 12)
+#define E830_VSI_VSI2F_LEM_BUFFERNUMBER_S 16
+#define E830_VSI_VSI2F_LEM_BUFFERNUMBER_M MAKEMASK(0x7, 16)
+#define E830_VSI_VSI2F_LEM_VSI_NUMBER_S 20
+#define E830_VSI_VSI2F_LEM_VSI_NUMBER_M MAKEMASK(0x3FF, 20)
+#define E830_VSI_VSI2F_LEM_VSI_ENABLE_S 31
+#define E830_VSI_VSI2F_LEM_VSI_ENABLE_M BIT(31)
#endif /* !_ICE_HW_AUTOGEN_H_ */
diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h
index 4ac5fffe5b7e..3a5dc201189a 100644
--- a/sys/dev/ice/ice_iflib.h
+++ b/sys/dev/ice/ice_iflib.h
@@ -146,7 +146,6 @@ struct ice_tx_queue {
struct ice_tx_desc *tx_base;
bus_addr_t tx_paddr;
struct tx_stats stats;
- u64 tso;
u16 desc_count;
u32 tail;
struct ice_irq_vector *irqv;
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 860958bffbaf..693e0ca5efc6 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -948,10 +948,9 @@ struct ice_tx_ctx_desc {
__le64 qw1;
};
-#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */
-#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */
-#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */
-#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */
+#define ICE_TX_GCS_DESC_START 0 /* 8 BITS */
+#define ICE_TX_GCS_DESC_OFFSET 8 /* 4 BITS */
+#define ICE_TX_GCS_DESC_TYPE 12 /* 3 BITS */
#define ICE_TXD_CTX_QW1_DTYPE_S 0
#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
@@ -2375,4 +2374,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
#define ICE_LINK_SPEED_40000MBPS 40000
#define ICE_LINK_SPEED_50000MBPS 50000
#define ICE_LINK_SPEED_100000MBPS 100000
+#define ICE_LINK_SPEED_200000MBPS 200000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index 7077859cc877..ef55df061f3c 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -79,6 +79,7 @@ static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_inf
static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname,
struct ice_rq_event_info *event);
static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf);
+static void ice_update_port_oversize(struct ice_softc *sc, u64 rx_errors);
static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf);
static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf);
static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info);
@@ -181,7 +182,22 @@ static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg);
static void ice_start_dcbx_agent(struct ice_softc *sc);
static u16 ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
struct sbuf *sbuf, u16 cluster_id);
+static void ice_fw_debug_dump_print_clusters(struct ice_softc *sc,
+ struct sbuf *sbuf);
static void ice_remove_vsi_mirroring(struct ice_vsi *vsi);
+static int ice_get_tx_rx_equalizations(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization *ptr);
+static int ice_fec_counter_read(struct ice_hw *hw, u32 receiver_id,
+ u32 reg_offset, u16 *output);
+static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ice_fec_stats_to_sysctl *fec_stats);
+static bool ice_is_serdes_muxed(struct ice_hw *hw);
+static int ice_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed);
+static int ice_update_port_topology(u8 lport,
+ struct ice_port_topology *port_topology,
+ bool is_muxed);
+static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology);
static int ice_module_init(void);
static int ice_module_exit(void);
@@ -231,6 +247,7 @@ static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_phy_stats(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS);
@@ -313,6 +330,10 @@ ice_set_ctrlq_len(struct ice_hw *hw)
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+ hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+ hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+ hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
}
/**
@@ -628,7 +649,7 @@ ice_setup_vsi_mirroring(struct ice_vsi *vsi)
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u16 rule_id, dest_vsi;
u16 count = 1;
@@ -674,7 +695,7 @@ static void
ice_remove_vsi_mirroring(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->sc->hw;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
bool keep_alloc = false;
if (vsi->rule_mir_ingress != ICE_INVAL_MIRROR_RULE_ID)
@@ -684,7 +705,7 @@ ice_remove_vsi_mirroring(struct ice_vsi *vsi)
device_printf(vsi->sc->dev, "Could not remove mirror VSI ingress rule, err %s aq_err %s\n",
ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
- status = ICE_SUCCESS;
+ status = 0;
if (vsi->rule_mir_egress != ICE_INVAL_MIRROR_RULE_ID)
status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_egress, keep_alloc, NULL);
@@ -709,7 +730,7 @@ ice_initialize_vsi(struct ice_vsi *vsi)
struct ice_vsi_ctx ctx = { 0 };
struct ice_hw *hw = &vsi->sc->hw;
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- enum ice_status status;
+ int status;
int err;
/* For now, we only have code supporting PF VSIs */
@@ -783,7 +804,7 @@ ice_deinit_vsi(struct ice_vsi *vsi)
struct ice_vsi_ctx ctx = { 0 };
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/* Assert that the VSI pointer matches in the list */
MPASS(vsi == sc->all_vsi[vsi->idx]);
@@ -863,6 +884,8 @@ uint64_t
ice_aq_speed_to_rate(struct ice_port_info *pi)
{
switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ return IF_Gbps(200);
case ICE_AQ_LINK_SPEED_100GB:
return IF_Gbps(100);
case ICE_AQ_LINK_SPEED_50GB:
@@ -901,6 +924,8 @@ static const char *
ice_aq_speed_to_str(struct ice_port_info *pi)
{
switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ return "200 Gbps";
case ICE_AQ_LINK_SPEED_100GB:
return "100 Gbps";
case ICE_AQ_LINK_SPEED_50GB:
@@ -1098,6 +1123,26 @@ ice_get_phy_type_high(uint64_t phy_type_high)
return IFM_100G_AUI2_AC;
case ICE_PHY_TYPE_HIGH_100G_AUI2:
return IFM_100G_AUI2;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ return IFM_200G_CR4_PAM4;
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ return IFM_200G_SR4;
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ return IFM_200G_FR4;
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ return IFM_200G_LR4;
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ return IFM_200G_DR4;
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ return IFM_200G_KR4_PAM4;
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ return IFM_200G_AUI4_AC;
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ return IFM_200G_AUI4;
+ case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC:
+ return IFM_200G_AUI8_AC;
+ case ICE_PHY_TYPE_HIGH_200G_AUI8:
+ return IFM_200G_AUI8;
default:
return IFM_UNKNOWN;
}
@@ -1192,7 +1237,17 @@ ice_phy_types_to_max_rate(struct ice_port_info *pi)
IF_Gbps(100ULL),
IF_Gbps(100ULL),
IF_Gbps(100ULL),
- IF_Gbps(100ULL)
+ IF_Gbps(100ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
};
/* coverity[address_of] */
@@ -1226,12 +1281,12 @@ ice_phy_types_to_max_rate(struct ice_port_info *pi)
* @pre this function must be protected from being called while another thread
* is accessing the ifmedia types.
*/
-enum ice_status
+int
ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
{
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
struct ice_port_info *pi = sc->hw.port_info;
- enum ice_status status;
+ int status;
uint64_t phy_low, phy_high;
int bit;
@@ -1248,7 +1303,7 @@ ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(sc->dev,
"%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -1305,7 +1360,7 @@ ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(media, IFM_ETHER | IFM_AUTO);
- return (ICE_SUCCESS);
+ return (0);
}
/**
@@ -1589,7 +1644,7 @@ ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
- enum ice_status status;
+ int status;
int i;
int err = 0;
u16 qg_size, pf_q;
@@ -1654,7 +1709,7 @@ ice_setup_rx_ctx(struct ice_rx_queue *rxq)
struct ice_vsi *vsi = rxq->vsi;
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
u32 rxdid = ICE_RXDID_FLEX_NIC;
u32 regval;
u16 pf_q;
@@ -1936,7 +1991,7 @@ ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
struct ice_list_head mac_addr_list;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&mac_addr_list);
@@ -2006,7 +2061,7 @@ ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
struct ice_list_head mac_addr_list;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&mac_addr_list);
@@ -2132,7 +2187,7 @@ ice_process_link_event(struct ice_softc *sc,
struct ice_port_info *pi = sc->hw.port_info;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
/* Sanity check that the data length isn't too small */
MPASS(le16toh(e->desc.datalen) >= ICE_GET_LINK_STATUS_DATALEN_V1);
@@ -2167,7 +2222,7 @@ ice_process_link_event(struct ice_softc *sc,
if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) {
status = ice_aq_set_link_restart_an(pi, false, NULL);
- if (status != ICE_SUCCESS && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
+ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -2237,7 +2292,7 @@ ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
struct ice_rq_event_info event = { { 0 } };
struct ice_hw *hw = &sc->hw;
struct ice_ctl_q_info *cq;
- enum ice_status status;
+ int status;
const char *qname;
int loop = 0;
@@ -2246,6 +2301,10 @@ ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
cq = &hw->adminq;
qname = "Admin";
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ qname = "Sideband";
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qname = "Mailbox";
@@ -2277,14 +2336,9 @@ ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
if (status == ICE_ERR_AQ_NO_WORK)
break;
if (status) {
- if (q_type == ICE_CTL_Q_ADMIN)
- device_printf(sc->dev,
- "%s Receive Queue event error %s\n",
- qname, ice_status_str(status));
- else
- device_printf(sc->dev,
- "%s Receive Queue event error %s\n",
- qname, ice_status_str(status));
+ device_printf(sc->dev,
+ "%s Receive Queue event error %s\n",
+ qname, ice_status_str(status));
free(event.msg_buf, M_ICE);
return (EIO);
}
@@ -2479,6 +2533,22 @@ ice_print_nvm_version(struct ice_softc *sc)
}
/**
+ * ice_update_port_oversize - Update port oversize stats
+ * @sc: device private structure
+ * @rx_errors: VSI error drops
+ *
+ * Add ERROR_CNT from GLV_REPC VSI register and rx_oversize stats counter
+ */
+static void
+ice_update_port_oversize(struct ice_softc *sc, u64 rx_errors)
+{
+ struct ice_hw_port_stats *cur_ps;
+ cur_ps = &sc->stats.cur;
+
+ sc->soft_stats.rx_roc_error = rx_errors + cur_ps->rx_oversize;
+}
+
+/**
* ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
*
@@ -2522,7 +2592,7 @@ ice_update_vsi_hw_stats(struct ice_vsi *vsi)
ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded,
cur_es);
-
+ ice_update_port_oversize(vsi->sc, cur_es->rx_errors);
#undef ICE_VSI_STAT40
#undef ICE_VSI_STAT32
@@ -2704,7 +2774,7 @@ ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
u8 pba_string[32] = "";
- enum ice_status status;
+ int status;
UNREFERENCED_PARAMETER(arg2);
@@ -2896,7 +2966,17 @@ static const uint16_t phy_link_speeds[] = {
ICE_AQ_LINK_SPEED_100GB,
ICE_AQ_LINK_SPEED_100GB,
ICE_AQ_LINK_SPEED_100GB,
- ICE_AQ_LINK_SPEED_100GB
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
};
#define ICE_SYSCTL_HELP_ADVERTISE_SPEED \
@@ -2914,6 +2994,7 @@ static const uint16_t phy_link_speeds[] = {
"\n\t 0x100 - 40G" \
"\n\t 0x200 - 50G" \
"\n\t 0x400 - 100G" \
+"\n\t 0x800 - 200G" \
"\n\t0x8000 - Unknown" \
"\n\t" \
"\nUse \"sysctl -x\" to view flags properly."
@@ -2997,6 +3078,17 @@ static const uint16_t phy_link_speeds[] = {
ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
ICE_PHY_TYPE_HIGH_100G_AUI2)
+#define ICE_PHYS_200GB \
+ (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_SR4 | \
+ ICE_PHY_TYPE_HIGH_200G_FR4 | \
+ ICE_PHY_TYPE_HIGH_200G_LR4 | \
+ ICE_PHY_TYPE_HIGH_200G_DR4 | \
+ ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI8)
/**
* ice_aq_phy_types_to_link_speeds - Convert the PHY Types to speeds
@@ -3063,6 +3155,8 @@ ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
*phy_type_low |= ICE_PHYS_100GB_LOW;
*phy_type_high |= ICE_PHYS_100GB_HIGH;
}
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_200GB)
+ *phy_type_high |= ICE_PHYS_200GB;
}
/**
@@ -3103,7 +3197,7 @@ ice_intersect_phy_types_and_speeds(struct ice_softc *sc,
"DFLT" };
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
u16 report_speeds, temp_speeds;
u8 report_type;
bool apply_speed_filter = false;
@@ -3132,7 +3226,7 @@ ice_intersect_phy_types_and_speeds(struct ice_softc *sc,
apply_speed_filter = true;
status = ice_aq_get_phy_caps(pi, false, phy_data->report_mode, &pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(sc->dev,
"%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n",
__func__, report_types[report_type],
@@ -3205,7 +3299,7 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
if ((ret) || (req->newptr == NULL))
return (ret);
- if (sysctl_speeds > 0x7FF) {
+ if (sysctl_speeds > ICE_SYSCTL_SPEEDS_VALID_RANGE) {
device_printf(dev,
"%s: \"%u\" is outside of the range of acceptable values.\n",
__func__, sysctl_speeds);
@@ -3355,7 +3449,7 @@ ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS)
enum ice_fc_mode old_mode, new_mode;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret, fc_num;
bool mode_set = false;
struct sbuf buf;
@@ -3369,7 +3463,7 @@ ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3512,7 +3606,7 @@ __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
uint64_t types;
int ret;
@@ -3523,7 +3617,7 @@ __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3549,7 +3643,7 @@ __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3612,7 +3706,7 @@ ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi = hw->port_info;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -3625,7 +3719,7 @@ ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode)
return (ESHUTDOWN);
status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3711,7 +3805,7 @@ ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS)
struct ice_aqc_get_link_status *resp;
struct ice_aq_desc desc;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -3732,7 +3826,7 @@ ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS)
resp->lport_num = pi->lport;
status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_send_cmd failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3762,7 +3856,7 @@ ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
u32 lldp_state;
@@ -3803,7 +3897,7 @@ ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
u32 lldp_state;
@@ -3868,7 +3962,7 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret;
u32 old_state;
u8 fw_lldp_enabled;
@@ -4004,7 +4098,7 @@ ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
int ret;
@@ -4114,7 +4208,7 @@ ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
int ret;
@@ -4157,7 +4251,8 @@ ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS)
if (!hw->port_info->qos_cfg.is_sw_lldp)
return (EPERM);
- ret = ice_ets_str_to_tbl(up2tc_user_buf, new_up2tc, 7);
+ ret = ice_ets_str_to_tbl(up2tc_user_buf, new_up2tc,
+ ICE_MAX_TRAFFIC_CLASS - 1);
if (ret) {
device_printf(dev, "%s: Could not parse input priority assignment table: %s\n",
__func__, up2tc_user_buf);
@@ -4202,7 +4297,7 @@ ice_config_pfc(struct ice_softc *sc, u8 new_mode)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
pi = hw->port_info;
local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -4326,7 +4421,7 @@ ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 user_pfc_mode, aq_pfc_mode;
int ret;
@@ -4565,7 +4660,7 @@ ice_add_device_sysctls(struct ice_softc *sc)
hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD,
NULL, "Port Hardware Statistics");
- ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur);
+ ice_add_sysctls_mac_stats(ctx, hw_node, sc);
/* Add the main PF VSI stats now. Other VSIs will add their own stats
* during creation
@@ -4817,7 +4912,6 @@ ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS)
stat += hs->rx_fragments;
stat += hs->rx_oversize;
stat += hs->rx_jabber;
- stat += hs->rx_len_errors;
stat += hs->crc_errors;
stat += hs->illegal_bytes;
@@ -5057,17 +5151,18 @@ ice_add_sysctls_mac_pfc_stats(struct sysctl_ctx_list *ctx,
* ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics
* @ctx: the sysctl ctx to use
* @parent: parent node to add the sysctls under
- * @stats: the hw ports stat structure to pull values from
+ * @sc: device private structure
*
* Add global MAC statistics sysctls.
*/
void
ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
- struct ice_hw_port_stats *stats)
+ struct ice_softc *sc)
{
struct sysctl_oid *mac_node;
struct sysctl_oid_list *parent_list, *mac_list;
+ struct ice_hw_port_stats *stats = &sc->stats.cur;
parent_list = SYSCTL_CHILDREN(parent);
@@ -5092,9 +5187,7 @@ ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
- {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
- {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"},
{&stats->eth.rx_discards, "rx_discards",
"Discarded Rx Packets by Port (shortage of storage space)"},
/* Packet Transmission Stats */
@@ -5127,6 +5220,11 @@ ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
entry->description);
entry++;
}
+ /* Port oversize packet stats */
+ SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, "rx_oversized",
+ CTLFLAG_RD | CTLFLAG_STATS, &sc->soft_stats.rx_roc_error,
+ 0, "Oversized packets received");
+
}
/**
@@ -5168,6 +5266,9 @@ ice_configure_misc_interrupts(struct ice_softc *sc)
/* Associate the Mailbox interrupt with ITR 0, and enable it */
wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M);
+ /* Associate the SB Queue interrupt with ITR 0, and enable it */
+ wr32(hw, PFINT_SB_CTL, PFINT_SB_CTL_CAUSE_ENA_M);
+
/* Associate the AdminQ interrupt with ITR 0, and enable it */
wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M);
}
@@ -5313,7 +5414,7 @@ ice_sync_multicast_filters(struct ice_softc *sc)
struct ice_fltr_mgmt_list_entry *itr;
struct ice_mcast_sync_data data = {};
struct ice_list_head *rules, remove_list;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&data.add_list);
@@ -5399,13 +5500,13 @@ free_filter_lists:
*
* Programs HW filters so that the given VSI will receive the specified VLANs.
*/
-enum ice_status
+int
ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry *vlan_entries;
- enum ice_status status;
+ int status;
MPASS(length > 0);
@@ -5450,7 +5551,7 @@ done:
*
* Programs a HW filter so that the given VSI will receive the specified VLAN.
*/
-enum ice_status
+int
ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
{
return ice_add_vlan_hw_filters(vsi, &vid, 1);
@@ -5464,13 +5565,13 @@ ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
*
* Removes previously programmed HW filters for the specified VSI.
*/
-enum ice_status
+int
ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry *vlan_entries;
- enum ice_status status;
+ int status;
MPASS(length > 0);
@@ -5515,7 +5616,7 @@ done:
*
* Removes a previously programmed HW filter for the specified VSI.
*/
-enum ice_status
+int
ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
{
return ice_remove_vlan_hw_filters(vsi, &vid, 1);
@@ -6123,35 +6224,35 @@ ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS)
}
#define ICE_SYSCTL_DEBUG_MASK_HELP \
-"\nSelect debug statements to print to kernel messages" \
+"\nSelect debug statements to print to kernel message log" \
"\nFlags:" \
-"\n\t 0x1 - Function Tracing" \
-"\n\t 0x2 - Driver Initialization" \
-"\n\t 0x4 - Release" \
-"\n\t 0x8 - FW Logging" \
-"\n\t 0x10 - Link" \
-"\n\t 0x20 - PHY" \
-"\n\t 0x40 - Queue Context" \
-"\n\t 0x80 - NVM" \
-"\n\t 0x100 - LAN" \
-"\n\t 0x200 - Flow" \
-"\n\t 0x400 - DCB" \
-"\n\t 0x800 - Diagnostics" \
-"\n\t 0x1000 - Flow Director" \
-"\n\t 0x2000 - Switch" \
-"\n\t 0x4000 - Scheduler" \
-"\n\t 0x8000 - RDMA" \
-"\n\t 0x10000 - DDP Package" \
-"\n\t 0x20000 - Resources" \
-"\n\t 0x40000 - ACL" \
-"\n\t 0x80000 - PTP" \
-"\n\t 0x100000 - Admin Queue messages" \
-"\n\t 0x200000 - Admin Queue descriptors" \
-"\n\t 0x400000 - Admin Queue descriptor buffers" \
-"\n\t 0x800000 - Admin Queue commands" \
-"\n\t 0x1000000 - Parser" \
-"\n\t ..." \
-"\n\t 0x8000000 - (Reserved for user)" \
+"\n\t 0x1 - Function Tracing" \
+"\n\t 0x2 - Driver Initialization" \
+"\n\t 0x4 - Release" \
+"\n\t 0x8 - FW Logging" \
+"\n\t 0x10 - Link" \
+"\n\t 0x20 - PHY" \
+"\n\t 0x40 - Queue Context" \
+"\n\t 0x80 - NVM" \
+"\n\t 0x100 - LAN" \
+"\n\t 0x200 - Flow" \
+"\n\t 0x400 - DCB" \
+"\n\t 0x800 - Diagnostics" \
+"\n\t 0x1000 - Flow Director" \
+"\n\t 0x2000 - Switch" \
+"\n\t 0x4000 - Scheduler" \
+"\n\t 0x8000 - RDMA" \
+"\n\t 0x10000 - DDP Package" \
+"\n\t 0x20000 - Resources" \
+"\n\t 0x40000 - ACL" \
+"\n\t 0x80000 - PTP" \
+"\n\t 0x100000 - Admin Queue messages" \
+"\n\t 0x200000 - Admin Queue descriptors" \
+"\n\t 0x400000 - Admin Queue descriptor buffers" \
+"\n\t 0x800000 - Admin Queue commands" \
+"\n\t 0x1000000 - Parser" \
+"\n\t ..." \
+"\n\t 0x80000000 - (Reserved for user)" \
"\n\t" \
"\nUse \"sysctl -x\" to view flags properly."
@@ -6244,7 +6345,7 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
enum ice_reset_req reset_type = ICE_RESET_INVAL;
const char *reset_message;
int ret;
@@ -6349,15 +6450,16 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \
"\nSelect clusters to dump with \"dump\" sysctl" \
"\nFlags:" \
+"\n\t 0 - All clusters (default)" \
"\n\t 0x1 - Switch" \
"\n\t 0x2 - ACL" \
"\n\t 0x4 - Tx Scheduler" \
-"\n\t 0x8 - Profile Configuration" \
+"\n\t 0x8 - Profile Configuration" \
"\n\t 0x20 - Link" \
"\n\t 0x80 - DCB" \
"\n\t 0x100 - L2P" \
-"\n\t 0x400000 - Manageability Transactions" \
-"\n\t" \
+"\n\t 0x400000 - Manageability Transactions (excluding E830)" \
+"\n" \
"\nUse \"sysctl -x\" to view flags properly."
/**
@@ -6391,7 +6493,13 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
if ((ret) || (req->newptr == NULL))
return (ret);
- if (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK)) {
+ u32 valid_cluster_mask;
+ if (ice_is_e830(&sc->hw))
+ valid_cluster_mask = ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830;
+ else
+ valid_cluster_mask = ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810;
+
+ if (clusters & ~(valid_cluster_mask)) {
device_printf(dev,
"%s: ERROR: Incorrect settings requested\n",
__func__);
@@ -6426,7 +6534,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
device_t dev = sc->dev;
u16 data_buf_size = ICE_AQ_MAX_BUF_LEN;
const u8 reserved_buf[8] = {};
- enum ice_status status;
+ int status;
int counter = 0;
u8 *data_buf;
@@ -6497,7 +6605,13 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
/* Adjust loop variables */
memset(data_buf, 0, data_buf_size);
bool same_table_next = (table_id == ret_next_table);
- bool last_table_next = (ret_next_table == 0xff || ret_next_table == 0xffff);
+ bool last_table_next;
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_NEXT_CLUSTER_ID))
+ last_table_next =
+ (ret_next_table == 0xffff);
+ else
+ last_table_next =
+ (ret_next_table == 0xff || ret_next_table == 0xffff);
bool last_offset_next = (ret_next_index == 0xffffffff || ret_next_index == 0);
if ((!same_table_next && !last_offset_next) ||
@@ -6533,8 +6647,59 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
return ret_next_cluster;
}
+/**
+ * ice_fw_debug_dump_print_clusters - Print data from FW clusters to sbuf
+ * @sc: the device softc
+ * @sbuf: initialized sbuf to print data to
+ *
+ * Handles dumping all of the clusters to dump to the indicated sbuf. The
+ * clusters do dump are determined by the value in the
+ * fw_debug_dump_cluster_mask field in the sc argument.
+ *
+ * @remark Only intended to be used by the sysctl handler
+ * ice_sysctl_fw_debug_dump_do_dump
+ */
+static void
+ice_fw_debug_dump_print_clusters(struct ice_softc *sc, struct sbuf *sbuf)
+{
+ u16 next_cluster_id, max_cluster_id, start_cluster_id;
+ u32 cluster_mask = sc->fw_debug_dump_cluster_mask;
+ struct ice_hw *hw = &sc->hw;
+ int bit;
+
+ ice_debug(hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
+
+ if (ice_is_e830(hw)) {
+ max_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E830;
+ start_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E830;
+ } else {
+ max_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E810;
+ start_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E810;
+ }
+
+ if (cluster_mask != 0) {
+ for_each_set_bit(bit, &cluster_mask,
+ sizeof(cluster_mask) * BITS_PER_BYTE) {
+ ice_fw_debug_dump_print_cluster(sc, sbuf,
+ bit + start_cluster_id);
+ }
+ } else {
+ next_cluster_id = start_cluster_id;
+
+ /* We don't support QUEUE_MNG and FULL_CSR_SPACE */
+ do {
+ next_cluster_id =
+ ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id);
+ } while ((next_cluster_id != 0) &&
+ (next_cluster_id < max_cluster_id));
+ }
+
+}
+
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \
-"\nWrite 1 to output a FW debug dump containing the clusters specified by the \"clusters\" sysctl" \
+"\nWrite 1 to output a FW debug dump containing the clusters specified by the" \
+"\n\"clusters\" sysctl." \
+"\n" \
"\nThe \"-b\" flag must be used in order to dump this data as binary data because" \
"\nthis data is opaque and not a string."
@@ -6565,7 +6730,7 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
device_t dev = sc->dev;
struct sbuf *sbuf;
- int bit, ret;
+ int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -6634,19 +6799,7 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
- ice_debug(&sc->hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
-
- if (sc->fw_debug_dump_cluster_mask) {
- for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
- sizeof(sc->fw_debug_dump_cluster_mask) * 8)
- ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
- } else {
- u16 next_cluster_id = 0;
- /* We don't support QUEUE_MNG and FULL_CSR_SPACE */
- do {
- next_cluster_id = ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id);
- } while (next_cluster_id != 0 && next_cluster_id < ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG);
- }
+ ice_fw_debug_dump_print_clusters(sc, sbuf);
sbuf_finish(sbuf);
sbuf_delete(sbuf);
@@ -6777,6 +6930,13 @@ ice_add_debug_sysctls(struct ice_softc *sc)
ice_sysctl_negotiated_fc, "A",
"Current Negotiated Flow Control mode");
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_PHY_STATISTICS)) {
+ SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_statistics",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_phy_stats, "A",
+ "Dumps PHY statistics from firmware");
+ }
+
SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "local_dcbx_cfg",
CTLTYPE_STRING | CTLFLAG_RD, sc, ICE_AQ_LLDP_MIB_LOCAL,
ice_sysctl_dump_dcbx_cfg, "A",
@@ -6795,6 +6955,10 @@ ice_add_debug_sysctls(struct ice_softc *sc)
sc, 0, ice_sysctl_query_port_ets, "A",
"Prints selected output from Query Port ETS AQ command");
+ SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "rx_length_errors",
+ CTLFLAG_RD | CTLFLAG_STATS, &sc->stats.cur.rx_len_errors, 0,
+ "Receive Length Errors (SNAP packets)");
+
sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch",
ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
"Switch Configuration");
@@ -6848,7 +7012,7 @@ ice_vsi_disable_tx(struct ice_vsi *vsi)
{
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
u32 *q_teids;
u16 *q_ids, *q_handles;
size_t q_teids_size, q_ids_size, q_handles_size;
@@ -7074,6 +7238,7 @@ ice_add_txq_sysctls(struct ice_tx_queue *txq)
{ &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" },
{ &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" },
{ &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" },
+ { &txq->stats.tso, "tso", "TSO packets" },
{ 0, 0, 0 }
};
@@ -7185,7 +7350,7 @@ ice_set_rss_key(struct ice_vsi *vsi)
struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} };
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/*
* If the RSS kernel interface is disabled, this will return the
@@ -7220,7 +7385,7 @@ ice_set_rss_flow_flds(struct ice_vsi *vsi)
struct ice_hw *hw = &sc->hw;
struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false };
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u_int rss_hash_config;
rss_hash_config = rss_gethashconfig();
@@ -7306,7 +7471,7 @@ ice_set_rss_lut(struct ice_vsi *vsi)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct ice_aq_get_set_rss_lut_params lut_params;
- enum ice_status status;
+ int status;
int i, err = 0;
u8 *lut;
@@ -7516,14 +7681,14 @@ ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status)
* ice_deinit_hw(). This allows the firmware reference to be immediately
* released using firmware_put.
*/
-enum ice_status
+int
ice_load_pkg_file(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_ddp_state state;
const struct firmware *pkg;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 cached_layer_count;
u8 *buf_copy;
@@ -7548,7 +7713,7 @@ ice_load_pkg_file(struct ice_softc *sc)
status = ice_cfg_tx_topo(&sc->hw, buf_copy, pkg->datasize);
free(buf_copy, M_ICE);
/* Success indicates a change was made */
- if (status == ICE_SUCCESS) {
+ if (!status) {
/* 9 -> 5 */
if (cached_layer_count == 9)
device_printf(dev,
@@ -7562,6 +7727,12 @@ ice_load_pkg_file(struct ice_softc *sc)
/* Status is ICE_ERR_CFG when DDP does not support transmit balancing */
device_printf(dev,
"DDP package does not support transmit balancing feature - please update to the latest DDP package and try again\n");
+ } else if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Requested config already loaded */
+ } else if (status == ICE_ERR_AQ_ERROR) {
+ device_printf(dev,
+ "Error configuring transmit balancing: %s\n",
+ ice_status_str(status));
}
}
@@ -7612,8 +7783,8 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_IERRORS:
return (hs->crc_errors + hs->illegal_bytes +
hs->mac_local_faults + hs->mac_remote_faults +
- hs->rx_len_errors + hs->rx_undersize +
- hs->rx_oversize + hs->rx_fragments + hs->rx_jabber);
+ hs->rx_undersize + hs->rx_oversize + hs->rx_fragments +
+ hs->rx_jabber);
case IFCOUNTER_OPACKETS:
return (es->tx_unicast + es->tx_multicast + es->tx_broadcast);
case IFCOUNTER_OERRORS:
@@ -7671,7 +7842,7 @@ int
ice_replay_all_vsi_cfg(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
int i;
for (i = 0 ; i < sc->num_available_vsi; i++) {
@@ -7712,7 +7883,7 @@ ice_clean_vsi_rss_cfg(struct ice_vsi *vsi)
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
status = ice_rem_vsi_rss_cfg(hw, vsi->idx);
if (status)
@@ -7761,7 +7932,7 @@ static const char *
ice_requested_fec_mode(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
- enum ice_status status;
+ int status;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
@@ -7871,7 +8042,7 @@ ice_update_laa_mac(struct ice_softc *sc)
{
const u8 *lladdr = (const u8 *)if_getlladdr(sc->ifp);
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/* If the address is the same, then there is nothing to update */
if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN))
@@ -7900,7 +8071,9 @@ ice_update_laa_mac(struct ice_softc *sc)
* @sc: device softc
*
* This will potentially print out a warning message if bus bandwidth
- * is insufficient for full-speed operation.
+ * is insufficient for full-speed operation. This will not print out anything
+ * for E82x devices since those are in SoCs, do not report valid PCIe info,
+ * and cannot be moved to a different slot.
*
* This should only be called once, during the attach process, after
* hw->port_info has been filled out with port link topology information
@@ -7914,6 +8087,9 @@ ice_get_and_print_bus_info(struct ice_softc *sc)
u16 pci_link_status;
int offset;
+ if (!ice_is_e810(hw) && !ice_is_e830(hw))
+ return;
+
pci_find_cap(dev, PCIY_EXPRESS, &offset);
pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
@@ -7936,7 +8112,7 @@ ice_get_and_print_bus_info(struct ice_softc *sc)
* a 64-bit baudrate.
* @speed: enum value to convert
*
- * This only goes up to PCIE Gen 4.
+ * This only goes up to PCIE Gen 5.
*/
static uint64_t
ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed)
@@ -7953,6 +8129,8 @@ ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed)
return IF_Gbps(8);
case ice_pcie_speed_16_0GT:
return IF_Gbps(16);
+ case ice_pcie_speed_32_0GT:
+ return IF_Gbps(32);
case ice_pcie_speed_unknown:
default:
return 0;
@@ -8011,10 +8189,12 @@ ice_pcie_bandwidth_check(struct ice_softc *sc)
pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width);
/*
- * If 2x100, clamp ports to 1 -- 2nd port is intended for
- * failover.
+ * If 2x100 on E810 or 2x200 on E830, clamp ports to 1 -- 2nd port is
+ * intended for failover.
*/
- if (port_speed == IF_Gbps(100))
+ if ((port_speed >= IF_Gbps(100)) &&
+ ((port_speed == IF_Gbps(100) && ice_is_e810(hw)) ||
+ (port_speed == IF_Gbps(200) && ice_is_e830(hw))))
num_ports = 1;
return !!((num_ports * port_speed) > pcie_speed * pcie_width);
@@ -8028,18 +8208,19 @@ ice_pcie_bandwidth_check(struct ice_softc *sc)
static void
ice_print_bus_link_data(device_t dev, struct ice_hw *hw)
{
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ice_pcie_speed_32_0GT) ? "32.0GT/s" :
+ (hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" :
(hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" :
(hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" :
(hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"),
- (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" :
- (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" :
- (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" :
- (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" :
- (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" :
- (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" :
- (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown");
+ (hw->bus.width == ice_pcie_lnk_x32) ? "x32" :
+ (hw->bus.width == ice_pcie_lnk_x16) ? "x16" :
+ (hw->bus.width == ice_pcie_lnk_x12) ? "x12" :
+ (hw->bus.width == ice_pcie_lnk_x8) ? "x8" :
+ (hw->bus.width == ice_pcie_lnk_x4) ? "x4" :
+ (hw->bus.width == ice_pcie_lnk_x2) ? "x2" :
+ (hw->bus.width == ice_pcie_lnk_x1) ? "x1" : "Unknown");
}
/**
@@ -8080,6 +8261,7 @@ ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status)
case ice_pcie_speed_5_0GT:
case ice_pcie_speed_8_0GT:
case ice_pcie_speed_16_0GT:
+ case ice_pcie_speed_32_0GT:
hw->bus.speed = (enum ice_pcie_bus_speed)reg;
break;
default:
@@ -8100,7 +8282,7 @@ int
ice_init_link_events(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
u16 wanted_events;
/* Set the bits for the events that we want to be notified by */
@@ -8129,6 +8311,11 @@ ice_init_link_events(struct ice_softc *sc)
return (0);
}
+#ifndef GL_MDET_TX_TCLAN
+/* Temporarily use this redefinition until the definition is fixed */
+#define GL_MDET_TX_TCLAN E800_GL_MDET_TX_TCLAN
+#define PF_MDET_TX_TCLAN E800_PF_MDET_TX_TCLAN
+#endif /* !defined(GL_MDET_TX_TCLAN) */
/**
* ice_handle_mdd_event - Handle possibly malicious events
* @sc: the device softc
@@ -8256,7 +8443,7 @@ ice_start_dcbx_agent(struct ice_softc *sc)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
bool dcbx_agent_status;
- enum ice_status status;
+ int status;
hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw);
@@ -8292,7 +8479,7 @@ ice_init_dcb_setup(struct ice_softc *sc)
struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 pfcmode_ret;
/* Don't do anything if DCB isn't supported */
@@ -8668,7 +8855,7 @@ ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map)
struct ice_hw *hw = &sc->hw;
struct ice_vsi_ctx ctx = { 0 };
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 num_tcs = 0;
int i = 0;
@@ -8858,7 +9045,7 @@ ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
pi = sc->hw.port_info;
local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -8896,7 +9083,7 @@ ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib)
/* Query ETS configuration and update SW Tx scheduler info */
status = ice_query_port_ets(pi, &port_ets, sizeof(port_ets), NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Query Port ETS AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
@@ -8932,7 +9119,7 @@ ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *even
device_t dev = sc->dev;
struct ice_hw *hw = &sc->hw;
bool needs_reconfig, mib_is_pending;
- enum ice_status status;
+ int status;
u8 mib_type, bridge_type;
ASSERT_CFG_LOCKED(sc);
@@ -9014,7 +9201,7 @@ ice_send_version(struct ice_softc *sc)
struct ice_driver_ver driver_version = {0};
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
driver_version.major_ver = ice_major_version;
driver_version.minor_ver = ice_minor_version;
@@ -9110,7 +9297,7 @@ ice_cfg_pf_ethertype_filters(struct ice_softc *sc)
struct ice_vsi *vsi = &sc->pf_vsi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&ethertype_list);
@@ -9168,7 +9355,7 @@ ice_add_rx_lldp_filter(struct ice_softc *sc)
struct ice_vsi *vsi = &sc->pf_vsi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int err;
u16 vsi_num;
@@ -9236,7 +9423,7 @@ ice_del_rx_lldp_filter(struct ice_softc *sc)
struct ice_vsi *vsi = &sc->pf_vsi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int err;
u16 vsi_num;
@@ -9306,11 +9493,11 @@ ice_init_link_configuration(struct ice_softc *sc)
struct ice_port_info *pi = sc->hw.port_info;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
pi->phy.get_link_info = true;
status = ice_get_link_status(pi, &sc->link_up);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_get_link_status failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9332,7 +9519,7 @@ ice_init_link_configuration(struct ice_softc *sc)
*/
ice_set_state(&sc->state, ICE_STATE_NO_MEDIA);
status = ice_aq_set_link_restart_an(pi, false, NULL);
- if (status != ICE_SUCCESS && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
+ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9472,7 +9659,7 @@ ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc,
struct ice_aqc_set_phy_cfg_data *cfg)
{
struct ice_port_info *pi = sc->hw.port_info;
- enum ice_status status;
+ int status;
cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
status = ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
@@ -9538,7 +9725,7 @@ ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
u64 phy_low, phy_high;
- enum ice_status status;
+ int status;
enum ice_fec_mode dflt_fec_mode;
u16 dflt_user_speed;
@@ -9549,7 +9736,7 @@ ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9588,7 +9775,7 @@ ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
/* Don't indicate failure if there's no media in the port.
* The settings have been saved and will apply when media
* is inserted.
@@ -9649,7 +9836,7 @@ ice_set_link_management_mode(struct ice_softc *sc)
struct ice_port_info *pi = sc->hw.port_info;
device_t dev = sc->dev;
struct ice_link_default_override_tlv tlv = { 0 };
- enum ice_status status;
+ int status;
/* Port must be in strict mode if FW version is below a certain
* version. (i.e. Don't set lenient mode features)
@@ -9658,7 +9845,7 @@ ice_set_link_management_mode(struct ice_softc *sc)
return;
status = ice_get_link_default_override(&tlv, pi);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_get_link_default_override failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9709,7 +9896,7 @@ ice_set_link(struct ice_softc *sc, bool enabled)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
if (ice_driver_is_detaching(sc))
return;
@@ -9721,7 +9908,7 @@ ice_set_link(struct ice_softc *sc, bool enabled)
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
else {
status = ice_aq_set_link_restart_an(hw->port_info, false, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: Link control not enabled in current device mode\n",
@@ -9755,14 +9942,14 @@ ice_init_saved_phy_cfg(struct ice_softc *sc)
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u64 phy_low, phy_high;
u8 report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA;
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2))
report_mode = ICE_AQC_REPORT_DFLT_CFG;
status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n",
__func__,
@@ -9850,7 +10037,7 @@ ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
size_t ifd_len = ifd->ifd_len, malloc_len;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 *nvm_buffer;
int err;
@@ -9929,7 +10116,7 @@ ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
/* Convert private status to an error code for proper ioctl response */
switch (status) {
- case ICE_SUCCESS:
+ case 0:
err = (0);
break;
case ICE_ERR_NO_MEMORY:
@@ -9966,7 +10153,7 @@ ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u1
{
struct ice_hw *hw = &sc->hw;
int ret = 0, retries = 0;
- enum ice_status status;
+ int status;
if (length > 16)
return (EINVAL);
@@ -10149,6 +10336,13 @@ ice_alloc_intr_tracking(struct ice_softc *sc)
device_t dev = sc->dev;
int err;
+ if (hw->func_caps.common_cap.num_msix_vectors > ICE_MAX_MSIX_VECTORS) {
+ device_printf(dev, "%s: Invalid num_msix_vectors value (%u) received from FW.\n",
+ __func__,
+ hw->func_caps.common_cap.num_msix_vectors);
+ return (EINVAL);
+ }
+
/* Initialize the interrupt allocation manager */
err = ice_resmgr_init_contig_only(&sc->dev_imgr,
hw->func_caps.common_cap.num_msix_vectors);
@@ -10263,7 +10457,8 @@ ice_apply_supported_speed_filter(u16 report_speeds, u8 mod_type)
if (module == IS_QSFP)
speed_mask = ~((u16)ICE_AQ_LINK_SPEED_10GB - 1);
}
- if (report_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if ((report_speeds & ICE_AQ_LINK_SPEED_100GB) ||
+ (report_speeds & ICE_AQ_LINK_SPEED_200GB))
speed_mask = ~((u16)ICE_AQ_LINK_SPEED_25GB - 1);
return (report_speeds & speed_mask);
}
@@ -10278,11 +10473,11 @@ ice_apply_supported_speed_filter(u16 report_speeds, u8 mod_type)
void
ice_init_health_events(struct ice_softc *sc)
{
- enum ice_status status;
+ int status;
u8 health_mask;
if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) ||
- (!sc->enable_health_events))
+ (!sc->enable_health_events))
return;
health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
@@ -10404,7 +10599,8 @@ ice_print_health_status_string(device_t dev,
device_printf(dev, "Possible Solution: Change the module or use Intel(R) Ethernet Port Configuration Tool to configure the port option to match the current module speed.\n");
break;
case ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT:
- device_printf(dev, "A parallel fault was detected.\n");
+ device_printf(dev, "All configured link modes were attempted but failed to establish link.\n");
+ device_printf(dev, "The device will restart the process to establish link.\n");
device_printf(dev, "Possible Solution: Check link partner connection and configuration.\n");
break;
case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED:
@@ -10490,7 +10686,7 @@ ice_set_default_local_lldp_mib(struct ice_softc *sc)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
/* Set Local MIB can disrupt flow control settings for
* non-DCB-supported devices.
@@ -10567,7 +10763,7 @@ ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sbuf *sbuf;
- enum ice_status status;
+ int status;
u8 maxtcs, dcbx_status, is_sw_lldp;
UNREFERENCED_PARAMETER(oidp);
@@ -10602,7 +10798,7 @@ ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS)
}
status = ice_aq_get_cee_dcb_cfg(hw, &cee_cfg, NULL);
- if (status == ICE_SUCCESS)
+ if (!status)
dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
dcbcfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
@@ -10685,7 +10881,7 @@ ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sbuf *sbuf;
- enum ice_status status;
+ int status;
UNREFERENCED_PARAMETER(oidp);
UNREFERENCED_PARAMETER(arg2);
@@ -10697,7 +10893,7 @@ ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS)
ctx.vsi_num = ice_get_hw_vsi_num(hw, sc->pf_vsi.idx);
status = ice_aq_get_vsi_params(hw, &ctx, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Get VSI AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
@@ -10744,6 +10940,520 @@ ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS)
}
/**
+ * ice_get_tx_rx_equalizations -- read serdes tx rx equalization params
+ * @hw: pointer to the HW struct
+ * @serdes_num: represents the serdes number
+ * @ptr: structure to read all serdes parameter for given serdes
+ *
+ * returns all serdes equalization parameter supported per serdes number
+ */
+static int
+ice_get_tx_rx_equalizations(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization *ptr)
+{
+ int err = 0;
+
+ if (!ptr)
+ return (EOPNOTSUPP);
+
+#define ICE_GET_PHY_EQUALIZATION(equ, dir, value) \
+ ice_aq_get_phy_equalization(hw, equ, dir, serdes_num, &(ptr->value))
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_PRE1,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_PRE2,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_POST1,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_BFLF,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_bflf);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_BFHF,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_bfhf);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_DRATE,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_drate);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE1,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE2,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE3,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre3);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_ATTEN,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_atten);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_POST1,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_post1);
+ if (err)
+ return err;
+
+ return (0);
+}
+
+/**
+ * ice_fec_counter_read - reads FEC stats from PHY
+ * @hw: pointer to the HW struct
+ * @receiver_id: pcsquad at registerlevel
+ * @reg_offset: register for the current request
+ * @output: pointer to the caller-supplied buffer to return requested fec stats
+ *
+ * Returns fec stats from phy
+ */
+static int
+ice_fec_counter_read(struct ice_hw *hw, u32 receiver_id, u32 reg_offset,
+ u16 *output)
+{
+ u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ struct ice_sbq_msg_input msg = {};
+ int err = 0;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_addr_low = ICE_LO_WORD(reg_offset);
+ msg.msg_addr_high = ICE_LO_DWORD(receiver_id);
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ err = ice_sbq_rw_reg(hw, &msg, flag);
+ if (err) {
+ return err;
+ }
+ *output = ICE_LO_WORD(msg.data);
+ return (0);
+}
+
+/**
+ * ice_get_port_fec_stats - returns fec correctable, uncorrectable stats per pcsquad, pcsport
+ * @hw: pointer to the HW struct
+ * @pcs_quad: pcsquad for input port
+ * @pcs_port: pcsport for input port
+ * @fec_stats: buffer to hold fec statistics for given port
+ *
+ * Returns fec stats
+ */
+static int
+ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ice_fec_stats_to_sysctl *fec_stats)
+{
+ u32 uncorr_low_reg = 0, uncorr_high_reg = 0;
+ u16 uncorr_low_val = 0, uncorr_high_val = 0;
+ u32 corr_low_reg = 0, corr_high_reg = 0;
+ u16 corr_low_val = 0, corr_high_val = 0;
+ u32 receiver_id = 0;
+ int err;
+
+ switch (pcs_port) {
+ case 0:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT0;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT0;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT0;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT0;
+ break;
+ case 1:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT1;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT1;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT1;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT1;
+ break;
+ case 2:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT2;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT2;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT2;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT2;
+ break;
+ case 3:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT3;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT3;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT3;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT3;
+ break;
+ default:
+ return (EINVAL);
+ }
+ if (pcs_quad == 0)
+ receiver_id = ICE_RS_FEC_RECEIVER_ID_PCS0; /* MTIP PCS Quad 0 -FEC */
+ else if (pcs_quad == 1)
+ receiver_id = ICE_RS_FEC_RECEIVER_ID_PCS1; /* MTIP PCS Quad 1 -FEC */
+ else
+ return (EINVAL);
+
+ err = ice_fec_counter_read(hw, receiver_id, corr_low_reg,
+ &corr_low_val);
+ if (err)
+ return err;
+
+ err = ice_fec_counter_read(hw, receiver_id, corr_high_reg,
+ &corr_high_val);
+ if (err)
+ return err;
+
+ err = ice_fec_counter_read(hw, receiver_id, uncorr_low_reg,
+ &uncorr_low_val);
+ if (err)
+ return err;
+
+ err = ice_fec_counter_read(hw, receiver_id, uncorr_high_reg,
+ &uncorr_high_val);
+ if (err)
+ return err;
+
+ fec_stats->fec_corr_cnt_low = corr_low_val;
+ fec_stats->fec_corr_cnt_high = corr_high_val;
+ fec_stats->fec_uncorr_cnt_low = uncorr_low_val;
+ fec_stats->fec_uncorr_cnt_high = uncorr_high_val;
+
+ return (0);
+}
+
+/**
+ * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
+ * @hw: pointer to the HW struct
+ *
+ * Returns True : when serdes is muxed
+ * False: when serdes is not muxed
+ */
+static bool
+ice_is_serdes_muxed(struct ice_hw *hw)
+{
+ return (rd32(hw, 0xB81E0) & 0x4);
+}
+
+/**
+ * ice_get_maxspeed - Get the max speed for given lport
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which max speed is requested
+ * @max_speed: return max speed for input lport
+ */
+static int
+ice_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ u8 active_idx, pending_idx;
+ int status;
+
+ status = ice_aq_get_port_options(hw, options, &option_count,
+ lport, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+
+ if (status || active_idx >= ICE_AQC_PORT_OPT_MAX) {
+ ice_debug(hw, ICE_DBG_PHY, "Port split read err: %d\n", status);
+ return (EIO);
+ }
+
+ if (active_valid) {
+ ice_debug(hw, ICE_DBG_PHY, "Active idx: %d\n", active_idx);
+ } else {
+ ice_debug(hw, ICE_DBG_PHY, "No valid Active option\n");
+ return (EINVAL);
+ }
+ *max_speed = options[active_idx].max_lane_speed;
+
+ return (0);
+}
+
+/**
+ * ice_update_port_topology - update port topology
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ * @is_muxed: serdes is muxed in hardware
+ */
+static int
+ice_update_port_topology(u8 lport, struct ice_port_topology *port_topology,
+ bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed == true)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed == true)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return (EINVAL);
+ }
+ return 0;
+}
+
+/**
+ * ice_get_port_topology - returns physical topology
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ *
+ * Returns the physical component associated with the Port like pcsquad, pcsport, serdesnumber
+ */
+static int
+ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology)
+{
+ struct ice_aqc_get_link_topo cmd;
+ bool is_muxed = false;
+ u8 cage_type = 0;
+ u16 node_handle;
+ u8 ctx = 0;
+ int err;
+
+ if (!hw || !port_topology)
+ return (EINVAL);
+
+ if (hw->device_id >= ICE_DEV_ID_E810_XXV_BACKPLANE) {
+ port_topology->serdes_lane_count = 1;
+ if (lport == 0) {
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ } else if (lport == 1) {
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 1;
+ } else {
+ return (EINVAL);
+ }
+ return (0);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+ cmd.addr.topo_params.index = 0;
+ cmd.addr.topo_params.lport_num = 0;
+ cmd.addr.topo_params.lport_num_valid = 0;
+
+ err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
+ if (err)
+ return (EINVAL);
+
+ is_muxed = ice_is_serdes_muxed(hw);
+
+ err = ice_update_port_topology(lport, port_topology, is_muxed);
+ if (err)
+ return err;
+
+ if (cage_type == 0x11 || /* SFP */
+ cage_type == 0x12) { /* SFP28 */
+ port_topology->serdes_lane_count = 1;
+ } else if (cage_type == 0x13 || /* QSFP */
+ cage_type == 0x14) { /* QSFP28 */
+ u8 max_speed = 0;
+
+ err = ice_get_maxspeed(hw, port_topology->primary_serdes_lane,
+ &max_speed);
+ if (err)
+ return err;
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_M)
+ device_printf(ice_hw_to_dev(hw),
+ "%s: WARNING: reported max_lane_speed is N/A\n",
+ __func__);
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
+ port_topology->serdes_lane_count = 4;
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ port_topology->serdes_lane_count = 2;
+ else
+ port_topology->serdes_lane_count = 1;
+ } else
+ return (EINVAL);
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: Port Topology (lport %d):\n",
+ __func__, lport);
+ ice_debug(hw, ICE_DBG_PHY, "serdes lane count %d\n",
+ port_topology->serdes_lane_count);
+ ice_debug(hw, ICE_DBG_PHY, "pcs quad select %d\n",
+ port_topology->pcs_quad_select);
+ ice_debug(hw, ICE_DBG_PHY, "pcs port %d\n",
+ port_topology->pcs_port);
+ ice_debug(hw, ICE_DBG_PHY, "primary serdes lane %d\n",
+ port_topology->primary_serdes_lane);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_dump_phy_stats - print PHY stats
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ */
+static int
+ice_sysctl_dump_phy_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_regdump_to_sysctl ice_prv_regs_buf = {};
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_port_topology port_topology;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi;
+ device_t dev = sc->dev;
+ u8 serdes_num = 0;
+ unsigned int i;
+ int err = 0;
+ struct sbuf *sbuf;
+
+ pi = hw->port_info;
+
+ if (!pi) {
+ device_printf(dev, "Port info structure is null\n");
+ return (EINVAL);
+ }
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+ UNREFERENCED_PARAMETER(req);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ if (ice_get_port_topology(hw, pi->lport, &port_topology) != 0) {
+ device_printf(dev,
+ "Extended register dump failed for Lport %d\n",
+ pi->lport);
+ return (EIO);
+ }
+
+ if (port_topology.serdes_lane_count > ICE_MAX_SERDES_LANE_COUNT) {
+ device_printf(dev,
+ "Extended register dump failed: Lport %d Serdes count %d\n",
+ pi->lport,
+ port_topology.serdes_lane_count);
+ return (EINVAL);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ /* Get serdes equalization parameter for available serdes */
+ for (i = 0; i < port_topology.serdes_lane_count; i++) {
+ serdes_num = port_topology.primary_serdes_lane + i;
+ err = ice_get_tx_rx_equalizations(hw, serdes_num,
+ &(ice_prv_regs_buf.equalization[i]));
+ if (err) {
+ device_printf(dev,
+ "Serdes equalization get failed Lport %d Serdes %d Err %d\n",
+ pi->lport,serdes_num, err);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+ return (EIO);
+ }
+ sbuf_printf(sbuf, "\nSerdes lane: %d\n", i);
+ sbuf_printf(sbuf, "RX PRE1 = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_pre1);
+ sbuf_printf(sbuf, "RX PRE2 = %d\n",
+ (s16)ice_prv_regs_buf.equalization[i].rx_equalization_pre2);
+ sbuf_printf(sbuf, "RX POST1 = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_post1);
+ sbuf_printf(sbuf, "RX BFLF = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_bflf);
+ sbuf_printf(sbuf, "RX BFHF = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_bfhf);
+ sbuf_printf(sbuf, "RX DRATE = %d\n",
+ (s16)ice_prv_regs_buf.equalization[i].rx_equalization_drate);
+ sbuf_printf(sbuf, "TX PRE1 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_pre1);
+ sbuf_printf(sbuf, "TX PRE2 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_pre2);
+ sbuf_printf(sbuf, "TX PRE3 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_pre3);
+ sbuf_printf(sbuf, "TX POST1 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_post1);
+ sbuf_printf(sbuf, "TX ATTEN = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_atten);
+ }
+
+ /* Get fec correctable , uncorrectable counter */
+ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
+ port_topology.pcs_port,
+ &(ice_prv_regs_buf.stats));
+ if (err) {
+ device_printf(dev, "failed to get FEC stats Lport %d Err %d\n",
+ pi->lport, err);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+ return (EIO);
+ }
+
+ sbuf_printf(sbuf, "\nRS FEC Corrected codeword count = %d\n",
+ ((u32)ice_prv_regs_buf.stats.fec_corr_cnt_high << 16) |
+ ice_prv_regs_buf.stats.fec_corr_cnt_low);
+ sbuf_printf(sbuf, "RS FEC Uncorrected codeword count = %d\n",
+ ((u32)ice_prv_regs_buf.stats.fec_uncorr_cnt_high << 16) |
+ ice_prv_regs_buf.stats.fec_uncorr_cnt_low);
+
+ /* Finish */
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
* ice_ets_str_to_tbl - Parse string into ETS table
* @str: input string to parse
* @table: output eight values used for ETS values
@@ -10826,7 +11536,7 @@ ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
device_t dev = sc->dev;
struct sbuf *sbuf;
- enum ice_status status;
+ int status;
int i = 0;
UNREFERENCED_PARAMETER(oidp);
@@ -10838,7 +11548,7 @@ ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS)
pi = hw->port_info;
status = ice_aq_query_port_ets(pi, &port_ets, sizeof(port_ets), NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Query Port ETS AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
@@ -10897,7 +11607,7 @@ ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
int ret;
@@ -10944,7 +11654,8 @@ ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS)
* needs to be done for ETS settings, so this function can be re-used
* for that purpose.
*/
- ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg, 8);
+ ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg,
+ ICE_MAX_TRAFFIC_CLASS - 1);
if (ret) {
device_printf(dev, "%s: Could not parse input DSCP2TC table: %s\n",
__func__, dscp_user_buf);
@@ -10982,7 +11693,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct ice_debug_dump_cmd *ddc;
- enum ice_status status;
+ int status;
int err = 0;
/* Returned arguments from the Admin Queue */
@@ -11083,7 +11794,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
aq_error:
/* Convert private status to an error code for proper ioctl response */
switch (status) {
- case ICE_SUCCESS:
+ case 0:
err = (0);
break;
case ICE_ERR_NO_MEMORY:
@@ -11165,6 +11876,44 @@ ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS)
}
/**
+ * ice_print_dual_nac_info - Print NAC status/ID information
+ * @sc: device softc structure
+ *
+ * Prints out information about the NAC mode if the device is capable of
+ * being part of a system with multiple NACs.
+ *
+ * @pre Must be called after ice_init_hw() and ice_init_device_features()
+ * sometime during driver load.
+ */
+void
+ice_print_dual_nac_info(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ bool is_dual_nac, is_primary_nac;
+ u8 cpk_id;
+
+ is_dual_nac = (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+ is_primary_nac = (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
+ cpk_id = hw->dev_caps.nac_topo.id;
+
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DUAL_NAC)) {
+ log(LOG_INFO, "%s: In %s NAC mode\n",
+ device_get_nameunit(dev),
+ is_dual_nac ? "Dual" : "Single");
+
+ if (is_dual_nac) {
+ ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_en);
+ log(LOG_INFO,
+ "%s: PF is configured in %s mode with IP instance ID %u\n",
+ device_get_nameunit(dev),
+ is_primary_nac ? "primary" : "secondary",
+ cpk_id);
+ }
+ }
+}
+
+/**
* ice_sysctl_temperature - Retrieve NIC temp via AQ command
* @oidp: sysctl oid structure
* @arg1: pointer to private data structure
@@ -11181,7 +11930,7 @@ ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
UNREFERENCED_PARAMETER(oidp);
UNREFERENCED_PARAMETER(arg2);
@@ -11191,7 +11940,7 @@ ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
status = ice_aq_get_sensor_reading(hw, ICE_AQC_INT_TEMP_SENSOR,
ICE_AQC_INT_TEMP_FORMAT, &resp, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Get Sensor Reading AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index 6c010cffc0fd..b524db61403c 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -155,6 +155,7 @@ struct ice_bar_info {
#define ICE_MAX_TSO_HDR_SEGS 3
#define ICE_MSIX_BAR 3
+#define ICE_MAX_MSIX_VECTORS (GLINT_DYN_CTL_MAX_INDEX + 1)
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
@@ -287,6 +288,12 @@ struct ice_bar_info {
#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC)
#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC)
+/*
+ * Mask of valid flags that can be used as an input for the
+ * advertise_speed sysctl.
+ */
+#define ICE_SYSCTL_SPEEDS_VALID_RANGE 0xFFF
+
/**
* @enum ice_dyn_idx_t
* @brief Dynamic Control ITR indexes
@@ -313,6 +320,28 @@ enum ice_dyn_idx_t {
#define ICE_DFLT_TX_ITR 50
#define ICE_DFLT_RX_ITR 50
+/* RS FEC register values */
+#define ICE_RS_FEC_REG_SHIFT 2
+#define ICE_RS_FEC_RECV_ID_SHIFT 4
+#define ICE_RS_FEC_CORR_LOW_REG_PORT0 (0x02 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT0 (0x03 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT0 (0x04 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT0 (0x05 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_LOW_REG_PORT1 (0x42 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT1 (0x43 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT1 (0x44 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT1 (0x45 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_LOW_REG_PORT2 (0x4A << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT2 (0x4B << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT2 (0x4C << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT2 (0x4D << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_LOW_REG_PORT3 (0x52 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT3 (0x53 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT3 (0x54 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT3 (0x55 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_RECEIVER_ID_PCS0 (0x33 << ICE_RS_FEC_RECV_ID_SHIFT)
+#define ICE_RS_FEC_RECEIVER_ID_PCS1 (0x34 << ICE_RS_FEC_RECV_ID_SHIFT)
+
/**
* ice_itr_to_reg - Convert an ITR setting into its register equivalent
* @hw: The device HW structure
@@ -374,10 +403,11 @@ enum ice_rx_dtype {
#define ICE_START_LLDP_RETRY_WAIT (2 * hz)
/*
- * Only certain cluster IDs are valid for the FW debug dump functionality,
- * so define a mask of those here.
+ * Only certain clusters are valid for certain devices for the FW debug dump
+ * functionality, so define masks of those here.
*/
-#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x4001AF
+#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810 0x4001AF
+#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830 0x1AF
struct ice_softc;
@@ -428,6 +458,7 @@ struct tx_stats {
u64 tx_bytes;
u64 tx_packets;
u64 mss_too_small;
+ u64 tso;
u64 cso[ICE_CSO_STAT_TX_COUNT];
};
@@ -485,6 +516,9 @@ struct ice_pf_sw_stats {
/* # of detected MDD events for Tx and Rx */
u32 tx_mdd_count;
u32 rx_mdd_count;
+
+ u64 rx_roc_error; /* port oversize packet stats, error_cnt \
+ from GLV_REPC VSI register + RxOversize */
};
/**
@@ -581,6 +615,58 @@ struct ice_debug_dump_cmd {
};
/**
+ * @struct ice_serdes_equalization
+ * @brief serdes equalization info
+ */
+struct ice_serdes_equalization {
+ int rx_equalization_pre1;
+ int rx_equalization_pre2;
+ int rx_equalization_post1;
+ int rx_equalization_bflf;
+ int rx_equalization_bfhf;
+ int rx_equalization_drate;
+ int tx_equalization_pre1;
+ int tx_equalization_pre2;
+ int tx_equalization_pre3;
+ int tx_equalization_atten;
+ int tx_equalization_post1;
+};
+
+/**
+ * @struct ice_fec_stats_to_sysctl
+ * @brief FEC stats register value of port
+ */
+struct ice_fec_stats_to_sysctl {
+ u16 fec_corr_cnt_low;
+ u16 fec_corr_cnt_high;
+ u16 fec_uncorr_cnt_low;
+ u16 fec_uncorr_cnt_high;
+};
+
+#define ICE_MAX_SERDES_LANE_COUNT 4
+
+/**
+ * @struct ice_regdump_to_sysctl
+ * @brief PHY stats of port
+ */
+struct ice_regdump_to_sysctl {
+ /* A multilane port can have max 4 serdes */
+ struct ice_serdes_equalization equalization[ICE_MAX_SERDES_LANE_COUNT];
+ struct ice_fec_stats_to_sysctl stats;
+};
+
+/**
+ * @struct ice_port_topology
+ * @brief Port topology from lport i.e. serdes mapping, pcsquad, macport, cage
+ */
+struct ice_port_topology {
+ u16 pcs_port;
+ u16 primary_serdes_lane;
+ u16 serdes_lane_count;
+ u16 pcs_quad_select;
+};
+
+/**
* @enum ice_state
* @brief Driver state flags
*
@@ -713,7 +799,7 @@ struct ice_str_buf {
};
struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err);
-struct ice_str_buf _ice_status_str(enum ice_status status);
+struct ice_str_buf _ice_status_str(int status);
struct ice_str_buf _ice_err_str(int err);
struct ice_str_buf _ice_fltr_flag_str(u16 flag);
struct ice_str_buf _ice_log_sev_str(u8 log_level);
@@ -838,7 +924,7 @@ void ice_deinit_vsi(struct ice_vsi *vsi);
uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi);
int ice_get_phy_type_low(uint64_t phy_type_low);
int ice_get_phy_type_high(uint64_t phy_type_high);
-enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
+int ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
void ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx);
void ice_configure_all_rxq_interrupts(struct ice_vsi *vsi);
void ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx);
@@ -864,15 +950,15 @@ void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
void ice_add_vsi_sysctls(struct ice_vsi *vsi);
void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
- struct ice_hw_port_stats *stats);
+ struct ice_softc *sc);
void ice_configure_misc_interrupts(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
-enum ice_status ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
+int ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
-enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
-enum ice_status ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
+int ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
+int ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
-enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
+int ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi);
void ice_add_device_tunables(struct ice_softc *sc);
@@ -887,7 +973,7 @@ void ice_add_txq_sysctls(struct ice_tx_queue *txq);
void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
int ice_config_rss(struct ice_vsi *vsi);
void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
-enum ice_status ice_load_pkg_file(struct ice_softc *sc);
+int ice_load_pkg_file(struct ice_softc *sc);
void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status);
uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
void ice_save_pci_info(struct ice_hw *hw, device_t dev);
@@ -924,6 +1010,7 @@ void ice_init_health_events(struct ice_softc *sc);
void ice_cfg_pba_num(struct ice_softc *sc);
int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
+void ice_print_dual_nac_info(struct ice_softc *sc);
void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
int ice_setup_vsi_mirroring(struct ice_vsi *vsi);
diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c
index 5234cb265f9b..9a41f30386c0 100644
--- a/sys/dev/ice/ice_nvm.c
+++ b/sys/dev/ice/ice_nvm.c
@@ -46,7 +46,7 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-enum ice_status
+int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
@@ -92,14 +92,14 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
- enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -157,7 +157,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -198,12 +198,11 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*
* Erase the NVM sector using the admin queue commands (0x0702)
*/
-enum ice_status
-ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
- enum ice_status status;
+ int status;
__le16 len;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -240,13 +239,13 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
*
* Reads single or multiple feature/field ID and data (0x0704)
*/
-enum ice_status
+int
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -275,7 +274,7 @@ ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
*
* Writes single or multiple feature/field ID and data (0x0705)
*/
-enum ice_status
+int
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd)
{
@@ -301,7 +300,7 @@ ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
* @offset: offset in words from module start
* @words: number of words to access
*/
-static enum ice_status
+static int
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
if ((offset + words) > hw->flash.sr_words) {
@@ -323,7 +322,7 @@ ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -334,11 +333,11 @@ ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
- enum ice_status status;
__le16 data_local;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -352,7 +351,7 @@ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
return status;
*data = LE16_TO_CPU(data_local);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -365,11 +364,11 @@ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
*/
-static enum ice_status
+static int
ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data,
bool last_command)
{
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -391,11 +390,11 @@ ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data,
* Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
* taken before reading the buffer and later released.
*/
-static enum ice_status
+static int
ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
u32 bytes = *words * 2, i;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -421,12 +420,11 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
*
* This function will request NVM ownership.
*/
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (hw->flash.blank_nvm_mode)
- return ICE_SUCCESS;
+ return 0;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
}
@@ -532,11 +530,11 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization.
*/
-static enum ice_status
+static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length)
{
- enum ice_status status;
+ int status;
u32 start;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -569,11 +567,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(_FORCE_ u8 *)&data_local, sizeof(u16));
@@ -592,13 +590,13 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the CSS header length from the NVM CSS header and add the Authentication
* header size, and then convert to words.
*/
-static enum ice_status
+static int
ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
u32 *hdr_len)
{
u16 hdr_len_l, hdr_len_h;
- enum ice_status status;
u32 hdr_len_dword;
+ int status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
&hdr_len_l);
@@ -616,7 +614,7 @@ ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
*hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -629,11 +627,11 @@ ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
u32 hdr_len;
+ int status;
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
@@ -655,11 +653,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
* Note that unlike the NVM module, the CSS data is stored at the end of the
* module instead of at the beginning.
*/
-static enum ice_status
+static int
ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16),
(_FORCE_ u8 *)&data_local, sizeof(u16));
@@ -678,11 +676,11 @@ ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
*
* Read a word from the specified netlist bank.
*/
-static enum ice_status
+static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(_FORCE_ u8 *)&data_local, sizeof(u16));
@@ -700,9 +698,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -724,21 +722,21 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- enum ice_status status;
u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u32 next_tlv;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
return status;
}
status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
@@ -746,27 +744,32 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < ((u32)pfa_ptr + pfa_len)) {
u16 tlv_sub_module_type;
u16 tlv_len;
/* Read TLV type */
- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
- if (status != ICE_SUCCESS) {
+ status = ice_read_sr_word(hw, (u16)next_tlv,
+ &tlv_sub_module_type);
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
break;
}
/* Read TLV length */
- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
- if (status != ICE_SUCCESS) {
+ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len);
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
break;
}
+ if (tlv_len > pfa_len) {
+ ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
if (tlv_sub_module_type == module_type) {
if (tlv_len) {
- *module_tlv = next_tlv;
+ *module_tlv = (u16)next_tlv;
*module_tlv_len = tlv_len;
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_INVAL_SIZE;
}
@@ -787,24 +790,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*
* Reads the part number string from the NVM.
*/
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
- enum ice_status status;
u16 pba_word, pba_size;
+ int status;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
ICE_SR_PBA_BLOCK_PTR);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
return status;
}
/* pba_size is the next word */
status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
return status;
}
@@ -825,7 +827,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
for (i = 0; i < pba_size; i++) {
status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
return status;
}
@@ -847,10 +849,10 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the security revision out of the CSS header of the active NVM module
* bank.
*/
-static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
+static int ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{
- enum ice_status status;
u16 srev_l, srev_h;
+ int status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l);
if (status)
@@ -862,7 +864,7 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
*srev = srev_h << 16 | srev_l;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -874,11 +876,11 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the NVM info structure.
*/
-static enum ice_status
+static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
u16 eetrack_lo, eetrack_hi, ver;
- enum ice_status status;
+ int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
@@ -906,7 +908,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
if (status)
ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM security revision.\n");
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -918,7 +920,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated.
*/
-enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
}
@@ -932,13 +934,13 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Read the security revision out of the CSS header of the active OROM module
* bank.
*/
-static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
+static int ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{
u32 orom_size_word = hw->flash.banks.orom_size / 2;
- enum ice_status status;
u16 srev_l, srev_h;
u32 css_start;
u32 hdr_len;
+ int status;
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
@@ -964,7 +966,7 @@ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select
*srev = srev_h << 16 | srev_l;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -976,13 +978,14 @@ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*/
-static enum ice_status
+static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
- u8 *orom_data;
- enum ice_status status;
+ struct ice_orom_civd_info civd_data_section;
+ int status;
u32 offset;
+ u32 tmp;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
@@ -993,38 +996,37 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* usually somewhere in the middle of the bank. We need to scan the
* Option ROM bank to locate it.
*
- * It's significantly faster to read the entire Option ROM up front
- * using the maximum page size, than to read each possible location
- * with a separate firmware command.
*/
- orom_data = (u8 *)ice_calloc(hw, hw->flash.banks.orom_size, sizeof(u8));
- if (!orom_data)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
- orom_data, hw->flash.banks.orom_size);
- if (status) {
- ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
- goto exit_error;
- }
/* Scan the memory buffer to locate the CIVD data section */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
- struct ice_orom_civd_info *tmp;
u8 sum = 0, i;
- tmp = (struct ice_orom_civd_info *)&orom_data[offset];
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
+ offset, (u8 *)&tmp, sizeof(tmp));
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+ return status;
+ }
/* Skip forward until we find a matching signature */
- if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0)
+ if (memcmp("$CIV", &tmp, sizeof(tmp)) != 0)
continue;
ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n",
offset);
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
+ offset, (u8 *)&civd_data_section,
+ sizeof(civd_data_section));
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read CIVD data\n");
+ goto exit_error;
+ }
+
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(*tmp); i++)
- sum += ((u8 *)tmp)[i];
+ for (i = 0; i < sizeof(civd_data_section); i++)
+ sum += ((u8 *)&civd_data_section)[i];
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
@@ -1033,16 +1035,15 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
goto exit_error;
}
- *civd = *tmp;
- ice_free(hw, orom_data);
- return ICE_SUCCESS;
+ *civd = civd_data_section;
+
+ return 0;
}
status = ICE_ERR_NVM;
ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
exit_error:
- ice_free(hw, orom_data);
return status;
}
@@ -1055,12 +1056,12 @@ exit_error:
* Read Option ROM version and security revision from the Option ROM flash
* section.
*/
-static enum ice_status
+static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{
struct ice_orom_civd_info civd;
- enum ice_status status;
u32 combo_ver;
+ int status;
status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
@@ -1080,7 +1081,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1092,7 +1093,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has
* not yet been activated.
*/
-enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
}
@@ -1107,13 +1108,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure.
*/
-static enum ice_status
+static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist)
{
u16 module_id, length, node_count, i;
- enum ice_status status;
u16 *id_blk;
+ int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status)
@@ -1181,7 +1182,7 @@ exit_error:
*
* Get the netlist version information
*/
-enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, netlist);
}
@@ -1195,7 +1196,7 @@ enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_i
* extract version data of a pending flash update in order to display the
* version data.
*/
-enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
@@ -1208,10 +1209,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
-static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+static int ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1229,7 +1230,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
- status = ICE_SUCCESS;
+ status = 0;
max_size = offset;
} else if (!status) {
ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
@@ -1265,10 +1266,9 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -1281,7 +1281,7 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
else
*pointer = value * 2;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1297,10 +1297,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -1310,7 +1309,7 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
/* Area sizes are always specified in 4KB units */
*size = value * 4 * 1024;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1323,12 +1322,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read
* from the active module.
*/
-static enum ice_status
-ice_determine_active_flash_banks(struct ice_hw *hw)
+static int ice_determine_active_flash_banks(struct ice_hw *hw)
{
struct ice_bank_info *banks = &hw->flash.banks;
- enum ice_status status;
u16 ctrl_word;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) {
@@ -1393,7 +1391,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1403,12 +1401,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
-enum ice_status ice_init_nvm(struct ice_hw *hw)
+int ice_init_nvm(struct ice_hw *hw)
{
struct ice_flash_info *flash = &hw->flash;
- enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1459,7 +1457,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1473,10 +1471,10 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
* method. The buf read is preceded by the NVM ownership take
* and followed by the release.
*/
-enum ice_status
+int
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -1498,7 +1496,7 @@ ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
* reception) by caller. To commit SR to NVM update checksum function
* should be called.
*/
-enum ice_status
+int
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data)
{
__le16 data_local = CPU_TO_LE16(*data);
@@ -1521,11 +1519,11 @@ __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data)
* on ARQ completion event reception by caller. To commit SR to NVM update
* checksum function should be called.
*/
-enum ice_status
+int
__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data)
{
- enum ice_status status;
__le16 *data_local;
+ int status;
void *vmem;
u32 i;
@@ -1559,12 +1557,12 @@ __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data)
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
*/
-static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
+static int ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
{
- enum ice_status status = ICE_SUCCESS;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module;
+ int status = 0;
void *vmem;
u16 *data;
u16 i;
@@ -1596,7 +1594,7 @@ static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS;
status = ice_read_sr_buf_aq(hw, i, &words, data);
- if (status != ICE_SUCCESS)
+ if (status)
goto ice_calc_sr_checksum_exit;
}
@@ -1630,11 +1628,11 @@ ice_calc_sr_checksum_exit:
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
*/
-enum ice_status ice_update_sr_checksum(struct ice_hw *hw)
+int ice_update_sr_checksum(struct ice_hw *hw)
{
- enum ice_status status;
__le16 le_sum;
u16 checksum;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1655,11 +1653,11 @@ enum ice_status ice_update_sr_checksum(struct ice_hw *hw)
* Performs checksum calculation and validates the Shadow RAM SW checksum.
* If the caller does not need checksum, the value can be NULL.
*/
-enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
+int ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
{
- enum ice_status status;
u16 checksum_local;
u16 checksum_sr;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1694,11 +1692,11 @@ enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
*
* Verify NVM PFA checksum validity (0x0706)
*/
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1725,11 +1723,11 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
*
* Recalculate NVM PFA checksum (0x0706)
*/
-enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
+int ice_nvm_recalculate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1767,12 +1765,11 @@ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
-enum ice_status
-ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
- enum ice_status err;
+ int err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
@@ -1795,11 +1792,11 @@ ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
* Read the Minimum Security Revision TLV and extract the revision values from
* the flash image into a readable structure for processing.
*/
-enum ice_status
+int
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
{
struct ice_aqc_nvm_minsrev data;
- enum ice_status status;
+ int status;
u16 valid;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1840,7 +1837,7 @@ ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
minsrevs->orom_valid = true;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1853,11 +1850,11 @@ ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
* fields to determine what update is being requested. If the valid bit is not
* set for that module, then the associated minsrev will be left as is.
*/
-enum ice_status
+int
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
{
struct ice_aqc_nvm_minsrev data;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1911,7 +1908,7 @@ exit_release_res:
* Fill in the data section of the NVM access request with a copy of the NVM
* features structure.
*/
-enum ice_status
+int
ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
@@ -1932,7 +1929,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
data->drv_features.size = sizeof(struct ice_nvm_features);
data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1977,7 +1974,7 @@ u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd)
* register offset. First validates that the module and flags are correct, and
* then ensures that the register offset is one of the accepted registers.
*/
-static enum ice_status
+static int
ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
{
u32 module, flags, offset;
@@ -2005,18 +2002,18 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
case GLNVM_GENS:
case GLNVM_FLA:
case PF_FUNC_RID:
- return ICE_SUCCESS;
+ return 0;
default:
break;
}
for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
if (offset == (u32)GL_HIDA(i))
- return ICE_SUCCESS;
+ return 0;
for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
if (offset == (u32)GL_HIBA(i))
- return ICE_SUCCESS;
+ return 0;
/* All other register offsets are not valid */
return ICE_ERR_OUT_OF_RANGE;
@@ -2030,11 +2027,11 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
*
* Process an NVM access request to read a register.
*/
-enum ice_status
+int
ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2052,7 +2049,7 @@ ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
/* Read the register and store the contents in the data field */
data->regval = rd32(hw, cmd->offset);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2063,11 +2060,11 @@ ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
*
* Process an NVM access request to write a register.
*/
-enum ice_status
+int
ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2077,21 +2074,24 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
return status;
/* Reject requests to write to read-only registers */
- switch (cmd->offset) {
- case GL_HICR_EN:
- case GLGEN_RSTAT:
- return ICE_ERR_OUT_OF_RANGE;
- default:
- break;
+ if (hw->mac_type == ICE_MAC_E830) {
+ if (cmd->offset == E830_GL_HICR_EN)
+ return ICE_ERR_OUT_OF_RANGE;
+ } else {
+ if (cmd->offset == GL_HICR_EN)
+ return ICE_ERR_OUT_OF_RANGE;
}
+ if (cmd->offset == GLGEN_RSTAT)
+ return ICE_ERR_OUT_OF_RANGE;
+
ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n",
cmd->offset, data->regval);
/* Write the data field to the specified register */
wr32(hw, cmd->offset, data->regval);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2107,7 +2107,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
* For valid commands, perform the necessary function, copying the data into
* the provided data buffer.
*/
-enum ice_status
+int
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
@@ -2146,3 +2146,59 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
}
}
+/**
+ * ice_nvm_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using AQ command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ice_nvm_sanitize_operate(struct ice_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = ICE_AQ_NVM_SANITIZE_REQ_OPERATE |
+ ICE_AQ_NVM_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ice_nvm_sanitize(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return ICE_ERR_AQ_ERROR;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_nvm_sanitize - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using AQ command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ice_nvm_sanitize(struct ice_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ice_aqc_nvm_sanitization *cmd;
+ struct ice_aq_desc desc;
+ s32 status;
+
+ cmd = &desc.params.sanitization;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
diff --git a/sys/dev/ice/ice_nvm.h b/sys/dev/ice/ice_nvm.h
index f43381c10ac5..310e17260d12 100644
--- a/sys/dev/ice/ice_nvm.h
+++ b/sys/dev/ice/ice_nvm.h
@@ -96,65 +96,67 @@ union ice_nvm_access_data {
u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd);
-enum ice_status
+int
ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
-enum ice_status
+int
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
-enum ice_status
+int
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
-enum ice_status
+int
ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
-enum ice_status
+int
ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
-enum ice_status
+int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
+int
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
+int ice_init_nvm(struct ice_hw *hw);
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data);
+int
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
-enum ice_status
+int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd);
-enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
-enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw);
-enum ice_status
+int ice_update_sr_checksum(struct ice_hw *hw);
+int ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
+int ice_nvm_validate_checksum(struct ice_hw *hw);
+int ice_nvm_recalculate_checksum(struct ice_hw *hw);
+int
ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
+s32 ice_nvm_sanitize_operate(struct ice_hw *hw);
+s32 ice_nvm_sanitize(struct ice_hw *hw, u8 cmd_flags, u8 *values);
#endif /* _ICE_NVM_H_ */
diff --git a/sys/dev/ice/ice_rdma.c b/sys/dev/ice/ice_rdma.c
index 3fe12cec7adc..79bc675b570d 100644
--- a/sys/dev/ice/ice_rdma.c
+++ b/sys/dev/ice/ice_rdma.c
@@ -168,7 +168,7 @@ ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_
struct ice_vsi *vsi = NULL;
struct ice_dcbx_cfg *dcbx_cfg;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
int count, i, ret = 0;
uint32_t *qset_teid;
uint16_t *qs_handle;
diff --git a/sys/dev/ice/ice_sbq_cmd.h b/sys/dev/ice/ice_sbq_cmd.h
new file mode 100644
index 000000000000..3f8f38c32186
--- /dev/null
+++ b/sys/dev/ice/ice_sbq_cmd.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2024, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ICE_SBQ_CMD_H_
+#define _ICE_SBQ_CMD_H_
+
+/* This header file defines the Sideband Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+/* Sideband Queue command structure and opcodes */
+enum ice_sbq_opc {
+ /* Sideband Queue commands */
+ ice_sbq_opc_neigh_dev_req = 0x0C00,
+ ice_sbq_opc_neigh_dev_ev = 0x0C01
+};
+
+/* Sideband Queue descriptor. Indirect command
+ * and non posted
+ */
+struct ice_sbq_cmd_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+
+ /* Opaque message data */
+ __le32 cookie_high;
+ __le32 cookie_low;
+
+ union {
+ __le16 cmd_len;
+ __le16 cmpl_len;
+ } param0;
+
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_sbq_evt_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+ u8 data[24];
+};
+
+enum ice_sbq_msg_dev {
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06
+};
+
+enum ice_sbq_msg_opcode {
+ ice_sbq_msg_rd = 0x00,
+ ice_sbq_msg_wr = 0x01
+};
+
+#define ICE_SBQ_MSG_FLAGS 0x40
+#define ICE_SBQ_MSG_SBE_FBE 0x0F
+
+struct ice_sbq_msg_req {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ u8 sbe_fbe;
+ u8 func_id;
+ __le16 msg_addr_low;
+ __le32 msg_addr_high;
+ __le32 data;
+};
+
+struct ice_sbq_msg_cmpl {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ __le32 data;
+};
+
+/* Internal struct */
+struct ice_sbq_msg_input {
+ u8 dest_dev;
+ u8 opcode;
+ u16 msg_addr_low;
+ u32 msg_addr_high;
+ u32 data;
+};
+#endif /* _ICE_SBQ_CMD_H_ */
diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c
index cd0d7de62b33..d57733dbfa7e 100644
--- a/sys/dev/ice/ice_sched.c
+++ b/sys/dev/ice/ice_sched.c
@@ -39,7 +39,7 @@
* This function inserts the root node of the scheduling tree topology
* to the SW DB.
*/
-static enum ice_status
+static int
ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_aqc_txsched_elem_data *info)
{
@@ -62,9 +62,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
return ICE_ERR_NO_MEMORY;
}
- ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&root->info, info, sizeof(*info), ICE_NONDMA_TO_NONDMA);
pi->root = root;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -83,6 +83,9 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
{
u16 i;
+ if (!start_node)
+ return NULL;
+
/* The TEID is same as that of the start_node */
if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
return start_node;
@@ -123,14 +126,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
*
* This function sends a scheduling elements cmd (cmd_opc)
*/
-static enum ice_status
+static int
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 elems_req, void *buf, u16 buf_size,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
@@ -154,7 +157,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -173,7 +176,7 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* This function inserts a scheduler node to the SW DB.
*/
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info,
struct ice_sched_node *prealloc_node)
@@ -181,8 +184,8 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_sched_node *node;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -226,7 +229,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
node->info = elem;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -240,7 +243,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
*
* Delete scheduling elements (0x040F)
*/
-static enum ice_status
+static int
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
@@ -259,14 +262,14 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* This function remove nodes from HW
*/
-static enum ice_status
+static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
- enum ice_status status;
u16 buf_size;
+ int status;
buf_size = ice_struct_size(buf, teid, num_nodes);
buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
@@ -280,7 +283,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
- if (status != ICE_SUCCESS || num_groups_removed != 1)
+ if (status || num_groups_removed != 1)
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
@@ -400,14 +403,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
*
* Get default scheduler topology (0x400)
*/
-static enum ice_status
+static int
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
@@ -430,7 +433,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
*
* Add scheduling elements (0x0401)
*/
-static enum ice_status
+static int
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
@@ -451,7 +454,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Configure scheduling elements (0x0403)
*/
-static enum ice_status
+static int
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
@@ -472,7 +475,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-enum ice_status
+int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -493,7 +496,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Suspend scheduling elements (0x0409)
*/
-static enum ice_status
+static int
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -513,7 +516,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* resume scheduling elements (0x040A)
*/
-static enum ice_status
+static int
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -531,7 +534,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* Query scheduler resource allocation (0x0412)
*/
-static enum ice_status
+static int
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
@@ -551,13 +554,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static enum ice_status
+static int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
u16 i, buf_size, num_elem_ret = 0;
- enum ice_status status;
__le32 *buf;
+ int status;
buf_size = sizeof(*buf) * num_nodes;
buf = (__le32 *)ice_malloc(hw, buf_size);
@@ -575,7 +578,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
buf_size, &num_elem_ret,
NULL);
- if (status != ICE_SUCCESS || num_elem_ret != num_nodes)
+ if (status || num_elem_ret != num_nodes)
ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
ice_free(hw, buf);
@@ -589,7 +592,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -605,7 +608,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
if (!vsi_ctx->lan_q_ctx[tc])
return ICE_ERR_NO_MEMORY;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
- return ICE_SUCCESS;
+ return 0;
}
/* num queues are increased, update the queue contexts */
if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
@@ -621,7 +624,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -631,7 +634,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -647,7 +650,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
if (!vsi_ctx->rdma_q_ctx[tc])
return ICE_ERR_NO_MEMORY;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
- return ICE_SUCCESS;
+ return 0;
}
/* num queues are increased, update the queue contexts */
if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
@@ -663,7 +666,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx->rdma_q_ctx[tc] = q_ctx;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -678,14 +681,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*
* RL profile function to add, query, or remove profile(s)
*/
-static enum ice_status
+static int
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.rl_profile;
@@ -709,7 +712,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*
* Add RL profile (0x0410)
*/
-static enum ice_status
+static int
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
@@ -728,7 +731,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Query RL profile (0x0411)
*/
-enum ice_status
+int
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -748,7 +751,7 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Remove RL profile (0x0415)
*/
-static enum ice_status
+static int
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
@@ -767,14 +770,14 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
* its associated parameters from HW DB,and locally. The caller needs to
* hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
- enum ice_status status;
u16 num_profiles = 1;
+ int status;
if (rl_info->prof_id_ref != 0)
return ICE_ERR_IN_USE;
@@ -810,7 +813,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
&hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
- enum ice_status status;
+ int status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
@@ -923,7 +926,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
*
* Configure Node Attributes (0x0417)
*/
-enum ice_status
+int
ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_node_attr_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -950,7 +953,7 @@ ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
*
* Configure L2 Node CGD (0x0414)
*/
-enum ice_status
+int
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf,
u16 buf_size, struct ice_sq_cd *cd)
@@ -979,7 +982,7 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-enum ice_status
+int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid,
@@ -988,8 +991,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
u16 i, num_groups_added = 0;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u16 buf_size;
u32 teid;
@@ -1019,7 +1022,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
- if (status != ICE_SUCCESS || num_groups_added != 1) {
+ if (status || num_groups_added != 1) {
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
ice_free(hw, buf);
@@ -1034,7 +1037,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
else
status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
break;
@@ -1083,7 +1086,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*
* Add nodes into specific hw layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1095,7 +1098,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*num_nodes_added = 0;
if (!num_nodes)
- return ICE_SUCCESS;
+ return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
@@ -1127,7 +1130,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1136,7 +1139,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u32 temp;
*num_nodes_added = 0;
@@ -1147,7 +1150,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
layer, new_num_nodes,
first_teid_ptr,
&num_added);
- if (status == ICE_SUCCESS)
+ if (!status)
*num_nodes_added += num_added;
/* added more nodes than requested ? */
if (*num_nodes_added > num_nodes) {
@@ -1157,10 +1160,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
break;
}
/* break if all the nodes are added successfully */
- if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes))
+ if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
- if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT)
+ if (status && status != ICE_ERR_MAX_LIMIT)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -1255,7 +1258,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
}
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = LE32_TO_CPU(node->info.node_teid);
- enum ice_status status;
+ int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
@@ -1301,13 +1304,13 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
* resources, default topology created by firmware and storing the information
* in SW DB.
*/
-enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+int ice_sched_init_port(struct ice_port_info *pi)
{
struct ice_aqc_get_topo_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 num_branches;
u16 num_elems;
+ int status;
u8 i, j;
if (!pi)
@@ -1430,12 +1433,12 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
*
* query FW for allocated scheduler resources and store in HW struct
*/
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+int ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
- enum ice_status status = ICE_SUCCESS;
__le16 max_sibl;
- u8 i;
+ int status = 0;
+ u16 i;
if (hw->layer_info)
return status;
@@ -1721,12 +1724,12 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u32 node_teid;
+ int status;
node_teid = LE32_TO_CPU(node->info.node_teid);
status = ice_sched_query_elem(hw, node_teid, &buf);
- if (status != ICE_SUCCESS)
+ if (status)
return false;
if (memcmp(&buf, &node->info, sizeof(buf))) {
@@ -1777,7 +1780,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* This function adds the VSI child nodes to tree. It gets called for
* LAN and RDMA separately.
*/
-static enum ice_status
+static int
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
@@ -1792,7 +1795,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
- enum ice_status status;
+ int status;
if (!parent)
return ICE_ERR_CFG;
@@ -1801,7 +1804,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
num_nodes[i],
&first_node_teid,
&num_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_added)
+ if (status || num_nodes[i] != num_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -1820,7 +1823,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1882,7 +1885,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
-static enum ice_status
+static int
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
@@ -1896,13 +1899,13 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
- enum ice_status status;
+ int status;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
&num_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_added)
+ if (status || num_nodes[i] != num_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -1921,7 +1924,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent->vsi_handle = vsi_handle;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1932,7 +1935,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
*
* This function adds a new VSI into scheduler tree
*/
-static enum ice_status
+static int
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
@@ -1960,7 +1963,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
*
* This function updates the VSI child nodes based on the number of queues
*/
-static enum ice_status
+static int
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
@@ -1968,8 +1971,8 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u16 prev_numqs;
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -2019,7 +2022,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
else
vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2035,14 +2038,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* enabled and VSI is in suspended state then resume the VSI back. If TC is
* disabled then suspend the VSI if it is not already.
*/
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -2160,11 +2163,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
-static enum ice_status
+static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
+ int status = ICE_ERR_PARAM;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
@@ -2217,7 +2220,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
else
vsi_ctx->sched.max_rdmaq[i] = 0;
}
- status = ICE_SUCCESS;
+ status = 0;
exit_sched_rm_vsi_cfg:
ice_release_lock(&pi->sched_lock);
@@ -2232,7 +2235,7 @@ exit_sched_rm_vsi_cfg:
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
@@ -2245,7 +2248,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
* This function clears the VSI and its RDMA children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
}
@@ -2285,7 +2288,7 @@ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
* This function retrieves the tree topology from the firmware for a given
* node TEID to the root node.
*/
-enum ice_status
+int
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -2405,15 +2408,15 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-enum ice_status
+int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
- enum ice_status status = ICE_SUCCESS;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ int status = 0;
u16 buf_len;
hw = pi->hw;
@@ -2468,16 +2471,16 @@ move_err_exit:
* This function moves a VSI to an aggregator node or its subtree.
* Intermediate nodes may be created if required.
*/
-static enum ice_status
+static int
ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
u8 tc)
{
struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u32 first_node_teid, vsi_teid;
- enum ice_status status;
u16 num_nodes_added;
u8 aggl, vsil, i;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -2493,7 +2496,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
- return ICE_SUCCESS;
+ return 0;
aggl = ice_sched_get_agg_layer(pi->hw);
vsil = ice_sched_get_vsi_layer(pi->hw);
@@ -2518,7 +2521,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
num_nodes[i],
&first_node_teid,
&num_nodes_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
+ if (status || num_nodes[i] != num_nodes_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -2550,14 +2553,14 @@ move_nodes:
* aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
struct ice_sched_agg_info *agg_info, u8 tc,
bool rm_vsi_info)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *tmp;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
ice_sched_agg_vsi_info, list_entry) {
@@ -2614,7 +2617,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
* This function removes the aggregator node and intermediate nodes if any
* from the given TC
*/
-static enum ice_status
+static int
ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *tc_node, *agg_node;
@@ -2648,7 +2651,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
}
ice_free_sched_node(pi, agg_node);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2662,11 +2665,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* the aggregator configuration completely for requested TC. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
u8 tc, bool rm_vsi_info)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
/* If nothing to remove - return success */
if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
@@ -2695,7 +2698,7 @@ exit_rm_agg_cfg_tc:
* Save aggregator TC bitmap. This function needs to be called with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
ice_bitmap_t *tc_bitmap)
{
@@ -2706,7 +2709,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
return ICE_ERR_PARAM;
ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2718,15 +2721,15 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
* This function creates an aggregator node and intermediate nodes if required
* for the given TC
*/
-static enum ice_status
+static int
ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *parent, *agg_node, *tc_node;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
u32 first_node_teid;
u16 num_nodes_added;
+ int status = 0;
u8 i, aggl;
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -2772,7 +2775,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
num_nodes[i],
&first_node_teid,
&num_nodes_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
+ if (status || num_nodes[i] != num_nodes_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -2789,7 +2792,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2808,13 +2811,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* resources and remove aggregator ID.
* This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
agg_info = ice_get_agg_info(hw, agg_id);
@@ -2870,12 +2873,12 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
*
* This function configures aggregator node(s).
*/
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
u8 tc_bitmap)
{
ice_bitmap_t bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_cfg_agg(pi, agg_id, agg_type,
@@ -2943,7 +2946,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
* Save VSI to aggregator TC bitmap. This function needs to call with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
ice_bitmap_t *tc_bitmap)
{
@@ -2959,7 +2962,7 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
return ICE_ERR_PARAM;
ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2973,14 +2976,14 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* already associated to the aggregator node then no operation is performed on
* the tree. This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, ice_bitmap_t *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
@@ -3071,14 +3074,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
+ int status;
buf = *info;
/* For TC nodes, CIR config is not supported */
@@ -3116,13 +3119,13 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*
* This function configures node element's BW allocation.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 bw_alloc)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -3150,12 +3153,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Move or associate VSI to a new or default aggregator node.
*/
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap)
{
ice_bitmap_t bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
@@ -3175,10 +3178,10 @@ ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* This function removes aggregator reference to VSI and delete aggregator ID
* info. It removes the aggregator configuration completely.
*/
-enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
+int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
ice_acquire_lock(&pi->sched_lock);
@@ -3257,7 +3260,7 @@ ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
*
* Save BW alloc information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -3280,7 +3283,7 @@ ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3350,7 +3353,7 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save BW information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3374,7 +3377,7 @@ ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3402,7 +3405,7 @@ static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
*
* Save priority information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 prio)
{
@@ -3416,7 +3419,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return ICE_ERR_PARAM;
ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3429,7 +3432,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* Save BW alloc information of AGG type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -3450,7 +3453,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3463,7 +3466,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
*
* Save BW information of AGG type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3487,7 +3490,7 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3501,11 +3504,11 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
* This function configures BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
@@ -3528,11 +3531,11 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function configures default BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
@@ -3558,11 +3561,11 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function applies BW limit to aggregator scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
tc, rl_type, bw);
@@ -3584,11 +3587,11 @@ ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* This function applies default BW limit to aggregator scheduling node based
* on TC information.
*/
-enum ice_status
+int
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
tc, rl_type,
@@ -3613,7 +3616,7 @@ ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
* classes for VSI matching handle.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
u32 max_bw, u32 shared_bw)
{
@@ -3629,7 +3632,7 @@ ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
* This function removes the shared rate limiter(SRL) of all VSI type nodes
* across all traffic classes for VSI matching handle.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
@@ -3649,7 +3652,7 @@ ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw)
{
@@ -3665,7 +3668,7 @@ ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
* This function removes the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
{
return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
@@ -3685,7 +3688,7 @@ ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
@@ -3702,7 +3705,7 @@ ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc,
@@ -3721,11 +3724,11 @@ ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
* This function configures the queue node priority (Sibling Priority) of the
* passed in VSI's queue(s) for a given traffic class (TC).
*/
-enum ice_status
+int
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio)
{
- enum ice_status status = ICE_ERR_PARAM;
+ int status = ICE_ERR_PARAM;
u16 i;
ice_acquire_lock(&pi->sched_lock);
@@ -3761,17 +3764,17 @@ ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
* This function configures the node priority (Sibling Priority) of the
* passed in VSI's for a given traffic class (TC) of an Aggregator ID.
*/
-enum ice_status
+int
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
u16 num_vsis, u16 *vsi_handle_arr,
u8 *node_prio, u8 tc)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_node *tc_node, *agg_node;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_agg_info *agg_info;
bool agg_id_present = false;
struct ice_hw *hw = pi->hw;
+ int status = ICE_ERR_PARAM;
u16 i;
ice_acquire_lock(&pi->sched_lock);
@@ -3848,11 +3851,11 @@ exit_agg_priority_per_tc:
* This function configures the BW allocation of the passed in VSI's
* node(s) for enabled traffic class.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
@@ -3900,14 +3903,14 @@ ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
* This function configures the BW allocation of passed in aggregator for
* enabled traffic class(s).
*/
-enum ice_status
+int
ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc)
{
struct ice_sched_agg_info *agg_info;
bool agg_id_present = false;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
ice_acquire_lock(&pi->sched_lock);
@@ -4002,12 +4005,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
*
* This function converts the BW to profile structure format.
*/
-static enum ice_status
+static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile)
{
- enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
+ int status = ICE_ERR_PARAM;
bool found = false;
s32 encode = 0;
s64 mv = 0;
@@ -4052,7 +4055,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_multiply = CPU_TO_LE16(mv);
profile->wake_up_calc = CPU_TO_LE16(wm);
profile->rl_encode = CPU_TO_LE16(encode);
- status = ICE_SUCCESS;
+ status = 0;
} else {
status = ICE_ERR_DOES_NOT_EXIST;
}
@@ -4080,8 +4083,8 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
- enum ice_status status;
u8 profile_type;
+ int status;
if (!hw || layer_num >= hw->num_tx_sched_layers)
return NULL;
@@ -4114,7 +4117,7 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
return NULL;
status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
- if (status != ICE_SUCCESS)
+ if (status)
goto exit_add_rl_prof;
rl_prof_elem->bw = bw;
@@ -4149,7 +4152,7 @@ exit_add_rl_prof:
*
* This function configures node element's BW limit.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 rl_prof_id)
{
@@ -4293,12 +4296,12 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
* scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!hw || layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
@@ -4319,7 +4322,7 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
break;
}
if (status == ICE_ERR_IN_USE)
- status = ICE_SUCCESS;
+ status = 0;
return status;
}
@@ -4334,15 +4337,15 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 layer_num)
{
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
u16 rl_prof_id;
+ int status;
u16 old_id;
hw = pi->hw;
@@ -4373,7 +4376,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
/* Remove stale RL profile ID */
if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
old_id == ICE_SCHED_INVAL_PROF_ID)
- return ICE_SUCCESS;
+ return 0;
return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
}
@@ -4390,14 +4393,14 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-enum ice_status
+int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_info;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
+ int status = ICE_ERR_PARAM;
rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
if (!rl_prof_info)
@@ -4419,7 +4422,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
/* Check for old ID removal */
if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
- return ICE_SUCCESS;
+ return 0;
return ice_sched_rm_rl_profile(hw, layer_num,
rl_prof_info->profile.flags &
@@ -4434,7 +4437,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
*
* This function sets priority of a node among it's siblings.
*/
-enum ice_status
+int
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u16 priority)
{
@@ -4459,7 +4462,7 @@ ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *nod
*
* This function sets weight of the node for WFQ algorithm.
*/
-enum ice_status
+int
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
{
struct ice_aqc_txsched_elem_data buf;
@@ -4491,7 +4494,7 @@ ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node,
* NOTE: Caller provides the correct SRL node in case of shared profile
* settings.
*/
-enum ice_status
+int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
@@ -4524,7 +4527,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type)
@@ -4542,7 +4545,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
* behalf of the requested node (first argument). This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
{
/* SRL profiles are not available on all layers. Check if the
@@ -4555,7 +4558,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
node->num_children == 1) ||
((sel_layer == node->tx_sched_layer - 1) &&
(node->parent && node->parent->num_children == 1)))
- return ICE_SUCCESS;
+ return 0;
return ICE_ERR_CFG;
}
@@ -4568,7 +4571,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
*
* Save BW information of queue type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
{
switch (rl_type) {
@@ -4584,7 +4587,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -4598,13 +4601,13 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
*
* This function sets BW limit of queue scheduling node.
*/
-static enum ice_status
+static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
struct ice_q_ctx *q_ctx;
+ int status = ICE_ERR_PARAM;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -4661,7 +4664,7 @@ exit_q_bw_lmt:
*
* This function configures BW limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
@@ -4679,7 +4682,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* This function configures BW default limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type)
{
@@ -4697,7 +4700,7 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function saves the modified values of bandwidth settings for later
* replay purpose (restore) after reset.
*/
-static enum ice_status
+static int
ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -4716,9 +4719,12 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
+#define ICE_SCHED_GENERIC_STRICT_MODE BIT(4)
+#define ICE_SCHED_GENERIC_PRIO_S 1
+
/**
* ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
* @pi: port information structure
@@ -4728,12 +4734,14 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
*
* This function configures bandwidth limit of TC node.
*/
-static enum ice_status
+static int
ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
struct ice_sched_node *tc_node;
+ int status = ICE_ERR_PARAM;
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return status;
@@ -4741,6 +4749,17 @@ ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
goto exit_set_tc_node_bw;
+
+ /* update node's generic field */
+ buf = tc_node->info;
+ data = &buf.data;
+ data->valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic = (tc << ICE_SCHED_GENERIC_PRIO_S) |
+ ICE_SCHED_GENERIC_STRICT_MODE;
+ status = ice_sched_update_elem(pi->hw, tc_node, &buf);
+ if (status)
+ goto exit_set_tc_node_bw;
+
if (bw == ICE_SCHED_DFLT_BW)
status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
else
@@ -4763,7 +4782,7 @@ exit_set_tc_node_bw:
* This function configures BW limit of TC node.
* Note: The minimum guaranteed reservation is done via DCBX.
*/
-enum ice_status
+int
ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -4778,7 +4797,7 @@ ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
*
* This function configures BW default limit of TC node.
*/
-enum ice_status
+int
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type)
{
@@ -4794,7 +4813,7 @@ ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
*
* Save BW alloc information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -4812,7 +4831,7 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -4826,12 +4845,12 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
* changed settings for replay purpose, and return success if it succeeds
* in modifying bandwidth alloc setting.
*/
-static enum ice_status
+static int
ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *tc_node;
+ int status = ICE_ERR_PARAM;
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return status;
@@ -4860,7 +4879,7 @@ exit_set_tc_node_bw_alloc:
* This function configures BW limit of TC node.
* Note: The minimum guaranteed reservation is done via DCBX.
*/
-enum ice_status
+int
ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc)
{
@@ -4876,11 +4895,11 @@ ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
* and sets node's BW limit to default. This function needs to be
* called with the scheduler lock held.
*/
-enum ice_status
+int
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
@@ -4992,13 +5011,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
+ int status = ICE_ERR_PARAM;
if (!pi)
return status;
@@ -5031,7 +5050,7 @@ exit_set_node_bw_lmt_per_tc:
* different than the VSI node layer on all TC(s).This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
{
u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
@@ -5044,7 +5063,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
ice_for_each_traffic_class(tc) {
struct ice_sched_node *tc_node, *vsi_node;
enum ice_rl_type rl_type = ICE_SHARED_BW;
- enum ice_status status;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -5070,7 +5089,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5086,12 +5105,12 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
* class, and saves those value for later use for replaying purposes. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, struct ice_sched_node *srl_node,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
if (bw == ICE_SCHED_DFLT_BW) {
status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
@@ -5118,13 +5137,13 @@ ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
* is passed, it removes the corresponding bw from the node. The caller
* holds scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
{
struct ice_sched_node *tc_node, *vsi_node, *cfg_node;
- enum ice_status status;
u8 layer_num;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -5172,11 +5191,11 @@ ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
* classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is
* passed, it removes those value(s) from the node.
*/
-enum ice_status
+int
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!pi)
@@ -5222,13 +5241,13 @@ exit_set_vsi_bw_shared_lmt:
* different than the AGG node layer on all TC(s).This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
{
u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
struct ice_sched_agg_info *agg_info;
bool agg_id_present = false;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
@@ -5277,13 +5296,13 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
*
* This function validates aggregator id. Caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *tmp;
bool agg_id_present = false;
- enum ice_status status;
+ int status;
status = ice_sched_validate_agg_srl_node(pi, agg_id);
if (status)
@@ -5299,7 +5318,7 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
if (!agg_id_present)
return ICE_ERR_PARAM;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5315,12 +5334,12 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
* requested traffic class, and saves those value for later use for
* replaying purposes. The caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
struct ice_sched_node *srl_node,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
if (bw == ICE_SCHED_DFLT_BW) {
status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
@@ -5347,13 +5366,13 @@ ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
* value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller
* holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
{
struct ice_sched_node *tc_node, *agg_node, *cfg_node;
enum ice_rl_type rl_type = ICE_SHARED_BW;
- enum ice_status status = ICE_ERR_CFG;
+ int status = ICE_ERR_CFG;
u8 layer_num;
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -5402,11 +5421,11 @@ ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
* BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
* node(s).
*/
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
- enum ice_status status;
+ int status;
u8 tc;
if (!pi)
@@ -5454,12 +5473,12 @@ exit_agg_bw_shared_lmt:
* node for a given traffic class for aggregator matching agg_id. When BW
* value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node.
*/
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw,
u32 shared_bw)
{
- enum ice_status status;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -5485,14 +5504,14 @@ exit_agg_bw_shared_lmt_per_tc:
* This function configures node element's sibling priority only. This
* function needs to be called with scheduler lock held.
*/
-enum ice_status
+int
ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
struct ice_sched_node *node, u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
if (!hw)
return ICE_ERR_PARAM;
@@ -5518,7 +5537,7 @@ ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
* burst size value is used for future rate limit calls. It doesn't change the
* existing or previously created RL profiles.
*/
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
{
u16 burst_size_to_prog;
@@ -5547,7 +5566,7 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
burst_size_to_prog |= (u16)(bytes / 1024);
}
hw->max_burst_size = burst_size_to_prog;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5559,13 +5578,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
* This function configures node element's priority value. It
* needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -5586,18 +5605,18 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
* This function restores node's BW from bw_t_info. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status = ICE_ERR_PARAM;
+ int status = ICE_ERR_PARAM;
u16 bw_alloc;
if (!node)
return status;
if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
- return ICE_SUCCESS;
+ return 0;
if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
status = ice_sched_replay_node_prio(hw, node,
bw_t_info->generic);
@@ -5644,11 +5663,11 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
* This function re-creates aggregator type nodes. The caller needs to hold
* the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
{
struct ice_sched_node *tc_node, *agg_node;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!agg_info)
@@ -5721,7 +5740,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
ICE_MAX_TRAFFIC_CLASS)) {
ice_declare_bitmap(replay_bitmap,
ICE_MAX_TRAFFIC_CLASS);
- enum ice_status status;
+ int status;
ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
ice_sched_get_ena_tc_bitmap(pi,
@@ -5777,9 +5796,9 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
*
* Replay root node BW settings.
*/
-enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
+int ice_sched_replay_root_node_bw(struct ice_port_info *pi)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!pi->hw)
return ICE_ERR_PARAM;
@@ -5797,9 +5816,9 @@ enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
*
* This function replay TC nodes.
*/
-enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
+int ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!pi->hw)
@@ -5829,7 +5848,7 @@ enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
* This function replays VSI type nodes bandwidth. This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *tc_bitmap)
{
@@ -5837,7 +5856,7 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
struct ice_port_info *pi = hw->port_info;
struct ice_bw_type_info *bw_t_info;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -5869,24 +5888,24 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
* their node bandwidth information. This function needs to be called with
* scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_port_info *pi = hw->port_info;
struct ice_sched_agg_info *agg_info;
- enum ice_status status;
+ int status;
ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info)
- return ICE_SUCCESS; /* Not present in list - default Agg case */
+ return 0; /* Not present in list - default Agg case */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info)
- return ICE_SUCCESS; /* Not present in list - default Agg case */
+ return 0; /* Not present in list - default Agg case */
ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
replay_bitmap);
/* Replay aggregator node associated to vsi_handle */
@@ -5920,10 +5939,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_replay_vsi_agg(hw, vsi_handle);
@@ -5939,7 +5958,7 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays queue type node bandwidth. This function needs to be
* called with scheduler lock held.
*/
-enum ice_status
+int
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
{
struct ice_sched_node *q_node;
diff --git a/sys/dev/ice/ice_sched.h b/sys/dev/ice/ice_sched.h
index 490a7719a960..db79e9e23621 100644
--- a/sys/dev/ice/ice_sched.h
+++ b/sys/dev/ice/ice_sched.h
@@ -110,54 +110,54 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
-enum ice_status
+int
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_node_attr_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num);
-enum ice_status
+int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid,
struct ice_sched_node **prealloc_node);
-enum ice_status
+int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list);
-enum ice_status
+int
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u16 priority);
-enum ice_status
+int
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node,
u16 weight);
-enum ice_status ice_sched_init_port(struct ice_port_info *pi);
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+int ice_sched_init_port(struct ice_port_info *pi);
+int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
/* Functions to cleanup scheduler SW DB */
@@ -170,7 +170,7 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
/* Add a scheduling node into SW DB for given info */
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info,
struct ice_sched_node *prealloc_node);
@@ -182,112 +182,112 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 vsi_handle);
bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node);
-enum ice_status
+int
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd);
/* Tx scheduler rate limiter functions */
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
-enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id);
-enum ice_status
+int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id);
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id);
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
u32 min_bw, u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc);
-enum ice_status
+int
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio);
-enum ice_status
+int
ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc);
-enum ice_status
+int
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
u16 num_vsis, u16 *vsi_handle_arr,
u8 *node_prio, u8 tc);
-enum ice_status
+int
ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc);
bool
ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
struct ice_sched_node *node);
-enum ice_status
+int
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 min_bw, u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw,
u32 shared_bw);
-enum ice_status
+int
ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
struct ice_sched_node *node, u8 priority);
-enum ice_status
+int
ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc);
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
-enum ice_status ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+int ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+int ice_sched_replay_root_node_bw(struct ice_port_info *pi);
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/sys/dev/ice/ice_strings.c b/sys/dev/ice/ice_strings.c
index b341b2815fea..5b5da737cadb 100644
--- a/sys/dev/ice/ice_strings.c
+++ b/sys/dev/ice/ice_strings.c
@@ -179,13 +179,13 @@ _ice_aq_str(enum ice_aq_err aq_err)
* Otherwise, use the scratch space to format the status code into a number.
*/
struct ice_str_buf
-_ice_status_str(enum ice_status status)
+_ice_status_str(int status)
{
struct ice_str_buf buf = { .str = "" };
const char *str = NULL;
switch (status) {
- case ICE_SUCCESS:
+ case 0:
str = "OK";
break;
case ICE_ERR_PARAM:
diff --git a/sys/dev/ice/ice_switch.c b/sys/dev/ice/ice_switch.c
index e02390e10ef8..1edd39497ab5 100644
--- a/sys/dev/ice/ice_switch.c
+++ b/sys/dev/ice/ice_switch.c
@@ -72,7 +72,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle);
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status
+int
ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
{
struct ice_sw_recipe *recps;
@@ -93,7 +93,7 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
*recp_list = recps;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -121,14 +121,14 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
* in response buffer. The caller of this function to use *num_elems while
* parsing the response buffer.
*/
-static enum ice_status
+static int
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -149,10 +149,10 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
* @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
* @global_lut_id: output parameter for the RSS global LUT's ID
*/
-enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
+int ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
+ int status;
u16 buf_len;
buf_len = ice_struct_size(sw_buf, elem, 1);
@@ -184,11 +184,11 @@ ice_alloc_global_lut_exit:
* @hw: pointer to the HW struct
* @global_lut_id: ID of the RSS global LUT to free
*/
-enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
+int ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
u16 buf_len, num_elems = 1;
- enum ice_status status;
+ int status;
buf_len = ice_struct_size(sw_buf, elem, num_elems);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -218,14 +218,14 @@ enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
*
* allocates switch resources (SWID and VEB counter) (0x0208)
*/
-enum ice_status
+int
ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *sw_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -302,10 +302,10 @@ ice_alloc_sw_exit:
* releasing other resources even after it encounters error.
* The error code returned is the last error it encountered.
*/
-enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
+int ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
- enum ice_status status, ret_status;
+ int status, ret_status;
u16 buf_len;
buf_len = ice_struct_size(sw_buf, elem, 1);
@@ -364,14 +364,14 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
*
* Add a VSI context to the hardware (0x0210)
*/
-enum ice_status
+int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
res = &desc.params.add_update_free_vsi_res;
@@ -408,14 +408,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware (0x0213)
*/
-enum ice_status
+int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -443,14 +443,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-enum ice_status
+int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -528,7 +528,7 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*/
-static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_vsi_ctx *vsi;
u8 i;
@@ -590,12 +590,12 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
* If this function gets called after reset for existing VSIs then update
* with the new HW VSI number in the corresponding VSI handle list entry.
*/
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_vsi_ctx *tmp_vsi_ctx;
- enum ice_status status;
+ int status;
if (vsi_handle >= ICE_MAX_VSI)
return ICE_ERR_PARAM;
@@ -619,7 +619,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -632,11 +632,11 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware as well as from VSI handle list
*/
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -656,7 +656,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware
*/
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
@@ -672,11 +672,11 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
* @vsi_handle: VSI SW index
* @enable: boolean for enable/disable
*/
-enum ice_status
+int
ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
struct ice_vsi_ctx *ctx, *cached_ctx;
- enum ice_status status;
+ int status;
cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!cached_ctx)
@@ -715,14 +715,14 @@ ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
*
* Get VSI context info from hardware (0x0212)
*/
-enum ice_status
+int
ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aqc_get_vsi_resp *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.get_vsi_resp;
@@ -756,16 +756,16 @@ ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Add/Update Mirror Rule (0x260).
*/
-enum ice_status
+int
ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
u16 count, struct ice_mir_rule_buf *mr_buf,
struct ice_sq_cd *cd, u16 *rule_id)
{
struct ice_aqc_add_update_mir_rule *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
__le16 *mr_list = NULL;
u16 buf_size = 0;
+ int status;
switch (rule_type) {
case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
@@ -854,7 +854,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
*
* Delete Mirror Rule (0x261).
*/
-enum ice_status
+int
ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
struct ice_sq_cd *cd)
{
@@ -886,15 +886,15 @@ ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
*
* allocates or free a VSI list resource
*/
-static enum ice_status
+static int
ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *vsi_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -945,7 +945,7 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Sets the storm control configuration (0x0280)
*/
-enum ice_status
+int
ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
u32 ctl_bitmask)
{
@@ -972,12 +972,12 @@ ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
*
* Gets the storm control configuration (0x0281)
*/
-enum ice_status
+int
ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
u32 *ctl_bitmask)
{
- enum ice_status status;
struct ice_aq_desc desc;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
@@ -1009,12 +1009,12 @@ ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1064,13 +1064,13 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
/* ice_get_initial_sw_cfg - Get initial port and default VSI data
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+int ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
- enum ice_status status;
u8 num_total_ports;
u16 req_desc = 0;
u16 num_elems;
+ int status;
u8 j = 0;
u16 i;
@@ -1117,6 +1117,12 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
switch (res_type) {
+ case ICE_AQC_GET_SW_CONF_RESP_VSI:
+ if (hw->fw_vsi_num != ICE_DFLT_VSI_INVAL)
+ ice_debug(hw, ICE_DBG_SW, "fw_vsi_num %d -> %d\n",
+ hw->fw_vsi_num, vsi_port_num);
+ hw->fw_vsi_num = vsi_port_num;
+ break;
case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
if (j == num_total_ports) {
@@ -1191,8 +1197,10 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
(fi->lkup_type == ICE_SW_LKUP_MAC &&
!IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
- !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
- fi->lan_en = true;
+ !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr))) {
+ if (!fi->fltVeb_en)
+ fi->lan_en = true;
+ }
} else {
fi->lan_en = true;
}
@@ -1355,7 +1363,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
*/
-static enum ice_status
+static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
@@ -1367,9 +1375,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
- enum ice_status status;
u16 lg_act_size;
u16 rules_size;
+ int status;
u32 act;
u16 id;
@@ -1456,19 +1464,19 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* @counter_id: VLAN counter ID returned as part of allocate resource
* @l_id: large action resource ID
*/
-static enum ice_status
+static int
ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 counter_id, u16 l_id)
{
struct ice_sw_rule_lkup_rx_tx *rx_tx;
struct ice_sw_rule_lg_act *lg_act;
- enum ice_status status;
/* 2 actions will be added while adding a large action counter */
const int num_acts = 2;
u16 lg_act_size;
u16 rules_size;
u16 f_rule_id;
u32 act;
+ int status;
u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
@@ -1583,15 +1591,15 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list ID
*/
-static enum ice_status
+static int
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
struct ice_sw_rule_vsi_list *s_rule;
- enum ice_status status;
u16 s_rule_size;
u16 rule_type;
+ int status;
int i;
if (!num_vsi)
@@ -1646,11 +1654,11 @@ exit:
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
-static enum ice_status
+static int
ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
- enum ice_status status;
+ int status;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1673,13 +1681,13 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* to the corresponding filter management list to track this switch rule
* and VSI mapping
*/
-static enum ice_status
+static int
ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
struct ice_sw_rule_lkup_rx_tx *s_rule;
- enum ice_status status;
+ int status;
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data,
@@ -1734,11 +1742,11 @@ ice_create_pkt_fwd_rule_exit:
* Call AQ command to update a previously created switch rule with a
* VSI list ID
*/
-static enum ice_status
+static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
- enum ice_status status;
+ int status;
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data,
@@ -1766,13 +1774,14 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = ICE_SUCCESS;
- struct ice_switch_info *sw = NULL;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ struct ice_switch_info *sw;
+ int status = 0;
+
sw = hw->switch_info;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
@@ -1824,14 +1833,15 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_add_update_vsi_list(struct ice_hw *hw,
struct ice_fltr_mgmt_list_entry *m_entry,
struct ice_fltr_info *cur_fltr,
struct ice_fltr_info *new_fltr)
{
- enum ice_status status = ICE_SUCCESS;
u16 vsi_list_id = 0;
+ int status = 0;
+
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
return ICE_ERR_NOT_IMPL;
@@ -1851,7 +1861,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return ICE_ERR_ALREADY_EXISTS;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -1899,7 +1909,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
- return ICE_SUCCESS;
+ return ICE_ERR_ALREADY_EXISTS;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -2008,14 +2018,14 @@ ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
*
* Adds or updates the rule lists for a given recipe
*/
-static enum ice_status
+static int
ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
u8 lport, struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM;
@@ -2058,7 +2068,7 @@ exit_add_rule_internal:
* The VSI list should be emptied before this function is called to remove the
* VSI list.
*/
-static enum ice_status
+static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
@@ -2076,13 +2086,13 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
- enum ice_status status = ICE_SUCCESS;
u16 vsi_list_id;
+ int status = 0;
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
@@ -2163,14 +2173,14 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* @recp_list: recipe list for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
-static enum ice_status
+static int
ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *list_elem;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
bool remove_rule = false;
+ int status = 0;
u16 vsi_handle;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
@@ -2180,6 +2190,7 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
rule_lock = &recp_list->filt_rule_lock;
ice_acquire_lock(rule_lock);
+
list_elem = ice_find_rule_entry(&recp_list->filt_rules,
&f_entry->fltr_info);
if (!list_elem) {
@@ -2259,14 +2270,14 @@ exit:
* information for all resource types. Each resource type is an
* ice_aqc_get_res_resp_elem structure.
*/
-enum ice_status
+int
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_res_alloc *resp;
- enum ice_status status;
struct ice_aq_desc desc;
+ int status;
if (!buf)
return ICE_ERR_BAD_PTR;
@@ -2296,14 +2307,14 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
* @desc_id: input - first desc ID to start; output - next desc ID
* @cd: pointer to command details structure or NULL
*/
-enum ice_status
+int
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
{
struct ice_aqc_get_allocd_res_desc *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2342,7 +2353,7 @@ ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-static enum ice_status
+static int
ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
struct ice_switch_info *sw, u8 lport)
{
@@ -2352,8 +2363,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
struct LIST_HEAD_TYPE *rule_head;
u16 total_elem_left, s_rule_size;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
u16 num_unicast = 0;
+ int status = 0;
u8 elem_sent;
s_rule = NULL;
@@ -2403,7 +2414,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
ice_acquire_lock(rule_lock);
/* Exit if no suitable entries were found for adding bulk switch rule */
if (!num_unicast) {
- status = ICE_SUCCESS;
+ status = 0;
goto ice_add_mac_exit;
}
@@ -2493,7 +2504,7 @@ ice_add_mac_exit:
*
* Function add MAC rule for logical port from HW struct
*/
-enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+int ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
if (!m_list || !hw)
return ICE_ERR_PARAM;
@@ -2508,7 +2519,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
* @recp_list: recipe list for which rule has to be added
* @f_entry: filter entry containing one VLAN information
*/
-static enum ice_status
+static int
ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
@@ -2517,7 +2528,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
enum ice_sw_lkup_type lkup_type;
u16 vsi_list_id = 0, vsi_handle;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM;
@@ -2662,7 +2673,7 @@ exit:
* @v_list: list of VLAN entries and forwarding information
* @sw: pointer to switch info struct for which function add rule
*/
-static enum ice_status
+static int
ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
struct ice_switch_info *sw)
{
@@ -2680,7 +2691,7 @@ ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
if (v_list_itr->status)
return v_list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2690,7 +2701,7 @@ ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
*
* Function add VLAN rule for logical port from HW struct
*/
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
+int ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
{
if (!v_list || !hw)
return ICE_ERR_PARAM;
@@ -2709,7 +2720,7 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/
-static enum ice_status
+static int
ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
struct ice_switch_info *sw, u8 lport)
{
@@ -2733,7 +2744,7 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,</