aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPiotr Kubaj <pkubaj@FreeBSD.org>2023-02-14 01:29:44 +0000
committerEric Joyner <erj@FreeBSD.org>2023-02-17 22:54:35 +0000
commitebc914f942f6655ff4c27f8717630f81c74624cb (patch)
tree9866d259c0314c3dc339e1c3459fc573fd6d48b6
parentcd280c60a5b85875b8aeff8d522793914e60d19d (diff)
downloadsrc-ebc914f942f6655ff4c27f8717630f81c74624cb.tar.gz
src-ebc914f942f6655ff4c27f8717630f81c74624cb.zip
ice(4): Update to 1.37.7-k
Notable changes include: - DSCP QoS Support (leveraging support added in rG9c950139051298831ce19d01ea5fb33ec6ea7f89) - Improved PFC handling and TC queue assignments (now all remaining queues are assigned to TC 0 when more than one TC is enabled and the number of available queues does not evenly divide between them) - Support for dumping the internal FW state for additional debugging by Intel support - Support for allowing "No FEC" to be a valid state for the LESM to negotiate when using non-standard compliant modules Also includes various bug fixes and smaller enhancements, too. Signed-off-by: Eric Joyner <erj@FreeBSD.org> Reviewed by: erj@ Tested by: Jeff Pieper <jeffrey.pieper@intel.com> MFC after: 3 days Relnotes: yes Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D38109 (cherry picked from commit 8923de59054358980102ea5acda6c6dd58273957)
-rw-r--r--sys/conf/files.amd642
-rw-r--r--sys/conf/files.arm642
-rw-r--r--sys/conf/files.powerpc44
-rw-r--r--sys/dev/ice/ice_adminq_cmd.h142
-rw-r--r--sys/dev/ice/ice_alloc.h2
-rw-r--r--sys/dev/ice/ice_bitops.h14
-rw-r--r--sys/dev/ice/ice_common.c453
-rw-r--r--sys/dev/ice/ice_common.h7
-rw-r--r--sys/dev/ice/ice_common_sysctls.h17
-rw-r--r--sys/dev/ice/ice_common_txrx.h2
-rw-r--r--sys/dev/ice/ice_controlq.c31
-rw-r--r--sys/dev/ice/ice_controlq.h2
-rw-r--r--sys/dev/ice/ice_dcb.c52
-rw-r--r--sys/dev/ice/ice_dcb.h4
-rw-r--r--sys/dev/ice/ice_ddp_common.c2532
-rw-r--r--sys/dev/ice/ice_ddp_common.h478
-rw-r--r--sys/dev/ice/ice_defs.h71
-rw-r--r--sys/dev/ice/ice_devids.h9
-rw-r--r--sys/dev/ice/ice_drv_info.h15
-rw-r--r--sys/dev/ice/ice_features.h4
-rw-r--r--sys/dev/ice/ice_flex_pipe.c2216
-rw-r--r--sys/dev/ice/ice_flex_pipe.h44
-rw-r--r--sys/dev/ice/ice_flex_type.h291
-rw-r--r--sys/dev/ice/ice_flow.c18
-rw-r--r--sys/dev/ice/ice_flow.h2
-rw-r--r--sys/dev/ice/ice_fw_logging.c2
-rw-r--r--sys/dev/ice/ice_fwlog.c2
-rw-r--r--sys/dev/ice/ice_fwlog.h4
-rw-r--r--sys/dev/ice/ice_hw_autogen.h2
-rw-r--r--sys/dev/ice/ice_iflib.h12
-rw-r--r--sys/dev/ice/ice_iflib_recovery_txrx.c2
-rw-r--r--sys/dev/ice/ice_iflib_sysctls.h2
-rw-r--r--sys/dev/ice/ice_iflib_txrx.c27
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h10
-rw-r--r--sys/dev/ice/ice_lib.c1487
-rw-r--r--sys/dev/ice/ice_lib.h43
-rw-r--r--sys/dev/ice/ice_nvm.c37
-rw-r--r--sys/dev/ice/ice_nvm.h5
-rw-r--r--sys/dev/ice/ice_opts.h2
-rw-r--r--sys/dev/ice/ice_osdep.c2
-rw-r--r--sys/dev/ice/ice_osdep.h2
-rw-r--r--sys/dev/ice/ice_protocol_type.h25
-rw-r--r--sys/dev/ice/ice_rdma.c15
-rw-r--r--sys/dev/ice/ice_rdma.h2
-rw-r--r--sys/dev/ice/ice_resmgr.c2
-rw-r--r--sys/dev/ice/ice_resmgr.h2
-rw-r--r--sys/dev/ice/ice_rss.h2
-rw-r--r--sys/dev/ice/ice_sbq_cmd.h2
-rw-r--r--sys/dev/ice/ice_sched.c94
-rw-r--r--sys/dev/ice/ice_sched.h13
-rw-r--r--sys/dev/ice/ice_status.h2
-rw-r--r--sys/dev/ice/ice_strings.c8
-rw-r--r--sys/dev/ice/ice_switch.c257
-rw-r--r--sys/dev/ice/ice_switch.h54
-rw-r--r--sys/dev/ice/ice_type.h112
-rw-r--r--sys/dev/ice/ice_vlan_mode.c3
-rw-r--r--sys/dev/ice/ice_vlan_mode.h2
-rw-r--r--sys/dev/ice/if_ice_iflib.c66
-rw-r--r--sys/dev/ice/irdma_di_if.m2
-rw-r--r--sys/dev/ice/irdma_if.m2
-rw-r--r--sys/dev/ice/virtchnl.h186
-rw-r--r--sys/dev/ice/virtchnl_inline_ipsec.h11
-rw-r--r--sys/dev/ice/virtchnl_lan_desc.h550
-rw-r--r--sys/modules/ice/Makefile2
64 files changed, 5833 insertions, 3676 deletions
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index d8a20e128c84..b967a3be2a4f 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -205,6 +205,8 @@ dev/ice/irdma_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/irdma_di_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
+dev/ice/ice_ddp_common.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 88cb483f178f..6b96de28128d 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -235,6 +235,8 @@ dev/ice/irdma_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/irdma_di_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
+dev/ice/ice_ddp_common.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
index c01e0e81ab1a..05f331e2c3e0 100644
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -38,48 +38,50 @@ dev/iicbus/max6690.c optional max6690 powermac
dev/iicbus/ofw_iicbus.c optional iicbus aim
dev/ipmi/ipmi.c optional ipmi
dev/ipmi/ipmi_opal.c optional powernv ipmi
-dev/ice/if_ice_iflib.c optional ice pci powerpc64 \
+dev/ice/if_ice_iflib.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_lib.c optional ice pci powerpc64 \
+dev/ice/ice_lib.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_osdep.c optional ice pci powerpc64 \
+dev/ice/ice_osdep.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_resmgr.c optional ice pci powerpc64 \
+dev/ice/ice_resmgr.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_strings.c optional ice pci powerpc64 \
+dev/ice/ice_strings.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 \
+dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 \
+dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_common.c optional ice pci powerpc64 \
+dev/ice/ice_common.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_controlq.c optional ice pci powerpc64 \
+dev/ice/ice_controlq.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_dcb.c optional ice pci powerpc64 \
+dev/ice/ice_dcb.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_flex_pipe.c optional ice pci powerpc64 \
+dev/ice/ice_flex_pipe.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_flow.c optional ice pci powerpc64 \
+dev/ice/ice_flow.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_nvm.c optional ice pci powerpc64 \
+dev/ice/ice_nvm.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_sched.c optional ice pci powerpc64 \
+dev/ice/ice_sched.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_switch.c optional ice pci powerpc64 \
+dev/ice/ice_switch.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_vlan_mode.c optional ice pci powerpc64 \
+dev/ice/ice_vlan_mode.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_fw_logging.c optional ice pci powerpc64 \
+dev/ice/ice_fw_logging.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_fwlog.c optional ice pci powerpc64 \
+dev/ice/ice_fwlog.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/ice_rdma.c optional ice pci powerpc64 \
+dev/ice/ice_rdma.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
-dev/ice/irdma_if.m optional ice pci powerpc64 \
+dev/ice/irdma_if.m optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_M} -I$S/dev/ice"
-dev/ice/irdma_di_if.m optional ice pci powerpc64 \
+dev/ice/irdma_di_if.m optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_M} -I$S/dev/ice"
+dev/ice/ice_ddp_common.c optional ice pci powerpc64 | ice pci powerpc64le \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp powerpc64 | ice pci powerpc64le \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h
index a07ca6780a3c..92ad8055b666 100644
--- a/sys/dev/ice/ice_adminq_cmd.h
+++ b/sys/dev/ice/ice_adminq_cmd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,10 +37,19 @@
* descriptor format. It is shared between Firmware and Software.
*/
+#include "ice_osdep.h"
+#include "ice_defs.h"
+#include "ice_bitops.h"
+
#define ICE_MAX_VSI 768
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+enum ice_aq_res_access_type {
+ ICE_RES_READ = 1,
+ ICE_RES_WRITE
+};
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -155,9 +164,6 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_MAX_MTU 0x0047
-#define ICE_AQC_CAPS_NVM_VER 0x0048
-#define ICE_AQC_CAPS_OROM_VER 0x004A
-#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_CEM 0x00F2
#define ICE_AQC_CAPS_IWARP 0x0051
#define ICE_AQC_CAPS_LED 0x0061
@@ -173,6 +179,10 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
+#define ICE_AQC_CAPS_DYN_FLATTENING 0x0090
+#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
u8 major_ver;
u8 minor_ver;
@@ -526,6 +536,7 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
+#define ICE_AQ_VSI_SW_FLAG_RX_PASS_PRUNE_ENA BIT(3)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
@@ -836,6 +847,8 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_PTR 0x2
#define ICE_SINGLE_ACT_PTR_VAL_S 4
#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S)
+ /* Bit 17 should be set if pointed action includes a FWD cmd */
+#define ICE_SINGLE_ACT_PTR_HAS_FWD BIT(17)
/* Bit 18 should be set to 1 */
#define ICE_SINGLE_ACT_PTR_BIT BIT(18)
@@ -1017,6 +1030,24 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+ u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_SET_PSM BIT(2)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
+ u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_NO_UPDATE 0
+#define ICE_AQC_TX_TOPO_GET_PSM 1
+#define ICE_AQC_TX_TOPO_GET_RAM 2
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@@ -1170,6 +1201,22 @@ struct ice_aqc_rl_profile_elem {
__le16 rl_encode;
};
+/* Config Node Attributes (indirect 0x0419)
+ * Query Node Attributes (indirect 0x041A)
+ */
+struct ice_aqc_node_attr {
+ __le16 num_entries; /* Number of attributes structures in the buffer */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_node_attr_elem {
+ __le32 node_teid;
+ __le16 max_children;
+ __le16 children_level;
+};
+
/* Configure L2 Node CGD (indirect 0x0414)
* This indirect command allows configuring a congestion domain for given L2
* node TEIDs in the scheduler topology.
@@ -1335,7 +1382,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1376,6 +1423,7 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2)
#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3)
#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define ICE_AQC_PHY_FEC_DIS BIT(5)
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
#define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0)
@@ -1484,6 +1532,12 @@ struct ice_aqc_get_link_status {
__le32 addr_low;
};
+enum ice_get_link_status_data_version {
+ ICE_GET_LINK_STATUS_DATA_V1 = 1,
+};
+
+#define ICE_GET_LINK_STATUS_DATALEN_V1 32
+
/* Get link status response data structure, also used for Link Status Event */
struct ice_aqc_get_link_status_data {
u8 topo_media_conflict;
@@ -2078,6 +2132,12 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@@ -2108,6 +2168,7 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
+#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
* type field is excluded from the section when reading and writing from
@@ -2124,6 +2185,13 @@ struct ice_aqc_nvm_minsrev {
__le16 orom_minsrev_h;
};
+struct ice_aqc_nvm_tx_topo_user_sel {
+ __le16 length;
+ u8 data;
+#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
+ u8 reserved;
+};
+
/* Used for 0x0704 as well as for 0x0705 commands */
struct ice_aqc_nvm_cfg {
u8 cmd_flags;
@@ -2218,14 +2286,25 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* DCBX mode */
+#define ICE_AQ_LLDP_DCBX_S 6
+#define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S)
+#define ICE_AQ_LLDP_DCBX_NA 0
+#define ICE_AQ_LLDP_DCBX_CEE 1
+#define ICE_AQ_LLDP_DCBX_IEEE 2
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
- u8 reserved1;
+ u8 state;
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_S 0
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M \
+ (0x1 << ICE_AQ_LLDP_MIB_CHANGE_STATE_S)
+#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
+#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
__le16 local_len;
__le16 remote_len;
- u8 reserved2[2];
+ u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
@@ -2236,6 +2315,11 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+#define ICE_AQ_LLDP_MIB_PENDING_S 1
+#define ICE_AQ_LLDP_MIB_PENDING_M \
+ (0x1 << ICE_AQ_LLDP_MIB_PENDING_S)
+#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
+#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
@@ -2580,6 +2664,9 @@ struct ice_aqc_add_rdma_qset_data {
/* Move RDMA Queue Set (indirect 0x0C34) */
struct ice_aqc_move_rdma_qset_cmd {
u8 num_rdma_qset; /* Used by commands and response */
+#define ICE_AQC_PF_MODE_SAME_PF 0x0
+#define ICE_AQC_PF_MODE_GIVE_OWNERSHIP 0x1
+#define ICE_AQC_PF_MODE_KEEP_OWNERSHIP 0x2
u8 flags;
u8 reserved[6];
__le32 addr_high;
@@ -2656,8 +2743,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
-#define ICE_AQC_DRIVER_PARAM_SET 0
-#define ICE_AQC_DRIVER_PARAM_GET 1
+#define ICE_AQC_DRIVER_PARAM_SET ((u8)0)
+#define ICE_AQC_DRIVER_PARAM_GET ((u8)1)
u8 param_indx;
#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
u8 rsvd[2];
@@ -2676,16 +2763,18 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
u8 cluster_id;
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
/* EMP_DRAM only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
/* AUX_REGS only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
u8 reserved;
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
@@ -2729,7 +2818,6 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
-
/* Set Health Status (direct 0xFF20) */
struct ice_aqc_set_health_status_config {
u8 event_source;
@@ -2747,6 +2835,7 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
#define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
#define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
#define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
@@ -2768,7 +2857,16 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
#define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
#define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
/* Get Health Status codes (indirect 0xFF21) */
struct ice_aqc_get_supported_health_status_codes {
@@ -2923,6 +3021,7 @@ struct ice_aq_desc {
struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_rl_profile rl_profile;
+ struct ice_aqc_node_attr node_attr;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_cfg nvm_cfg;
struct ice_aqc_nvm_checksum nvm_checksum;
@@ -2949,6 +3048,7 @@ struct ice_aq_desc {
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_move_txqs move_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
+ struct ice_aqc_move_rdma_qset_cmd move_rdma_qset;
struct ice_aqc_txqs_cleanup txqs_cleanup;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
@@ -2975,6 +3075,7 @@ struct ice_aq_desc {
struct ice_aqc_clear_health_status clear_health_status;
struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm;
struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@@ -3125,6 +3226,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_node_to_root = 0x0413,
ice_aqc_opc_cfg_l2_node_cgd = 0x0414,
ice_aqc_opc_remove_rl_profiles = 0x0415,
+ ice_aqc_opc_set_tx_topo = 0x0417,
+ ice_aqc_opc_get_tx_topo = 0x0418,
+ ice_aqc_opc_cfg_node_attr = 0x0419,
+ ice_aqc_opc_query_node_attr = 0x041A,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
@@ -3196,6 +3301,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
+ ice_execute_pending_lldp_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
diff --git a/sys/dev/ice/ice_alloc.h b/sys/dev/ice/ice_alloc.h
index b281958be793..bfcb376d45b2 100644
--- a/sys/dev/ice/ice_alloc.h
+++ b/sys/dev/ice/ice_alloc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index 0e04cab87be9..c29963d0a318 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,15 +33,25 @@
#ifndef _ICE_BITOPS_H_
#define _ICE_BITOPS_H_
+#include "ice_defs.h"
+#include "ice_osdep.h"
+
/* Define the size of the bitmap chunk */
typedef u32 ice_bitmap_t;
+/* NOTE!
+ * Do not use any of the functions declared in this file
+ * on memory that was not declared with ice_declare_bitmap.
+ * Not following this rule might cause issues like split
+ * locks.
+ */
+
/* Number of bits per bitmap chunk */
#define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t))
/* Determine which chunk a bit belongs in */
#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
/* How many chunks are required to store this many bits */
-#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK)
+#define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK)
/* Which bit inside a chunk this bit corresponds to */
#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
/* How many bits are valid in the last chunk, assumes nr > 0 */
diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c
index 3ae266b72d1f..c2efddeb4f7c 100644
--- a/sys/dev/ice/ice_common.c
+++ b/sys/dev/ice/ice_common.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,118 +39,110 @@
#define ICE_PF_RESET_WAIT_COUNT 300
-/**
- * dump_phy_type - helper function that prints PHY type strings
- * @hw: pointer to the HW structure
- * @phy: 64 bit PHY type to decipher
- * @i: bit index within phy
- * @phy_string: string corresponding to bit i in phy
- * @prefix: prefix string to differentiate multiple dumps
- */
-static void
-dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
- const char *prefix)
-{
- if (phy & BIT_ULL(i))
- ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
- phy_string);
-}
+static const char * const ice_link_mode_str_low[] = {
+ [0] = "100BASE_TX",
+ [1] = "100M_SGMII",
+ [2] = "1000BASE_T",
+ [3] = "1000BASE_SX",
+ [4] = "1000BASE_LX",
+ [5] = "1000BASE_KX",
+ [6] = "1G_SGMII",
+ [7] = "2500BASE_T",
+ [8] = "2500BASE_X",
+ [9] = "2500BASE_KX",
+ [10] = "5GBASE_T",
+ [11] = "5GBASE_KR",
+ [12] = "10GBASE_T",
+ [13] = "10G_SFI_DA",
+ [14] = "10GBASE_SR",
+ [15] = "10GBASE_LR",
+ [16] = "10GBASE_KR_CR1",
+ [17] = "10G_SFI_AOC_ACC",
+ [18] = "10G_SFI_C2C",
+ [19] = "25GBASE_T",
+ [20] = "25GBASE_CR",
+ [21] = "25GBASE_CR_S",
+ [22] = "25GBASE_CR1",
+ [23] = "25GBASE_SR",
+ [24] = "25GBASE_LR",
+ [25] = "25GBASE_KR",
+ [26] = "25GBASE_KR_S",
+ [27] = "25GBASE_KR1",
+ [28] = "25G_AUI_AOC_ACC",
+ [29] = "25G_AUI_C2C",
+ [30] = "40GBASE_CR4",
+ [31] = "40GBASE_SR4",
+ [32] = "40GBASE_LR4",
+ [33] = "40GBASE_KR4",
+ [34] = "40G_XLAUI_AOC_ACC",
+ [35] = "40G_XLAUI",
+ [36] = "50GBASE_CR2",
+ [37] = "50GBASE_SR2",
+ [38] = "50GBASE_LR2",
+ [39] = "50GBASE_KR2",
+ [40] = "50G_LAUI2_AOC_ACC",
+ [41] = "50G_LAUI2",
+ [42] = "50G_AUI2_AOC_ACC",
+ [43] = "50G_AUI2",
+ [44] = "50GBASE_CP",
+ [45] = "50GBASE_SR",
+ [46] = "50GBASE_FR",
+ [47] = "50GBASE_LR",
+ [48] = "50GBASE_KR_PAM4",
+ [49] = "50G_AUI1_AOC_ACC",
+ [50] = "50G_AUI1",
+ [51] = "100GBASE_CR4",
+ [52] = "100GBASE_SR4",
+ [53] = "100GBASE_LR4",
+ [54] = "100GBASE_KR4",
+ [55] = "100G_CAUI4_AOC_ACC",
+ [56] = "100G_CAUI4",
+ [57] = "100G_AUI4_AOC_ACC",
+ [58] = "100G_AUI4",
+ [59] = "100GBASE_CR_PAM4",
+ [60] = "100GBASE_KR_PAM4",
+ [61] = "100GBASE_CP2",
+ [62] = "100GBASE_SR2",
+ [63] = "100GBASE_DR",
+};
+
+static const char * const ice_link_mode_str_high[] = {
+ [0] = "100GBASE_KR2_PAM4",
+ [1] = "100G_CAUI2_AOC_ACC",
+ [2] = "100G_CAUI2",
+ [3] = "100G_AUI2_AOC_ACC",
+ [4] = "100G_AUI2",
+};
/**
- * ice_dump_phy_type_low - helper function to dump phy_type_low
+ * ice_dump_phy_type - helper function to dump phy_type
* @hw: pointer to the HW structure
* @low: 64 bit value for phy_type_low
+ * @high: 64 bit value for phy_type_high
* @prefix: prefix string to differentiate multiple dumps
*/
static void
-ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
+ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
{
+ u32 i;
+
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
(unsigned long long)low);
- dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
- dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
- dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
- dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
- dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
- dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
- dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
- dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
- dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
- dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
- dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
- dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
- dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
- dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
- dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
- dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
- dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
- dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
- dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
- dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
- dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
- dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
- dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
- dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
- dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
- dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
- dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
- dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
- dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
- dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
- dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
- dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
- dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
- dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
- dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
- dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
- dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
- dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
- dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
- dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
- dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
- dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
- dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
- dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
- dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
- dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
- dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
- dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
- dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
- dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
- dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
- dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
- dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
- dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
- dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
- dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
- dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
- dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
- dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
- dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
- dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
- dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
- dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
- dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
-}
-
-/**
- * ice_dump_phy_type_high - helper function to dump phy_type_high
- * @hw: pointer to the HW structure
- * @high: 64 bit value for phy_type_high
- * @prefix: prefix string to differentiate multiple dumps
- */
-static void
-ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
-{
+ for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
+ if (low & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_low[i]);
+ }
+
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
(unsigned long long)high);
- dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
- dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
- dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
- dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
- dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
+ for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
+ if (high & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_high[i]);
+ }
}
/**
@@ -227,13 +219,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T5:
+ case ICE_SUBDEV_ID_E810T7:
return true;
+ }
break;
case ICE_DEV_ID_E810C_QSFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T5:
+ case ICE_SUBDEV_ID_E810T6:
return true;
+ }
break;
default:
break;
@@ -243,6 +245,31 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e823
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E823-L or E823-C based, false if not.
+ */
+bool ice_is_e823(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -308,10 +335,10 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
ice_memcpy(hw->port_info->mac.lan_addr,
resp[i].mac_addr, ETH_ALEN,
- ICE_DMA_TO_NONDMA);
+ ICE_NONDMA_TO_NONDMA);
ice_memcpy(hw->port_info->mac.perm_addr,
resp[i].mac_addr,
- ETH_ALEN, ICE_DMA_TO_NONDMA);
+ ETH_ALEN, ICE_NONDMA_TO_NONDMA);
break;
}
return ICE_SUCCESS;
@@ -355,23 +382,30 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
cmd->param0 |= CPU_TO_LE16(report_mode);
+
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
- if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
+ switch (report_mode) {
+ case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
prefix = "phy_caps_media";
- else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
+ break;
+ case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
prefix = "phy_caps_no_media";
- else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
+ break;
+ case ICE_AQC_REPORT_ACTIVE_CFG:
prefix = "phy_caps_active";
- else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
+ break;
+ case ICE_AQC_REPORT_DFLT_CFG:
prefix = "phy_caps_default";
- else
+ break;
+ default:
prefix = "phy_caps_invalid";
+ }
- ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
- ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
+ ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
+ LE64_TO_CPU(pcaps->phy_type_high), prefix);
ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
prefix, report_mode);
@@ -444,7 +478,7 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
*
* Find and return the node handle for a given node type and part number in the
* netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
- * otherwise. If @node_handle provided, it would be set to found node handle.
+ * otherwise. If node_handle provided, it would be set to found node handle.
*/
enum ice_status
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
@@ -452,11 +486,12 @@ ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
{
struct ice_aqc_get_link_topo cmd;
u8 rec_node_part_number;
- enum ice_status status;
u16 rec_node_handle;
u8 idx;
for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
+ enum ice_status status;
+
memset(&cmd, 0, sizeof(cmd));
cmd.addr.topo_params.node_type_ctx =
@@ -545,7 +580,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_1000BASE_LX:
case ICE_PHY_TYPE_LOW_10GBASE_SR:
case ICE_PHY_TYPE_LOW_10GBASE_LR:
- case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_SR:
case ICE_PHY_TYPE_LOW_25GBASE_LR:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
@@ -602,6 +636,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_2500BASE_X:
case ICE_PHY_TYPE_LOW_5GBASE_KR:
case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_KR:
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
@@ -629,6 +664,8 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
}
+#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1
+
/**
* ice_aq_get_link_info
* @pi: port information structure
@@ -668,8 +705,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = CPU_TO_LE16(cmd_flags);
resp->lport_num = pi->lport;
- status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
-
+ status = ice_aq_send_cmd(hw, &desc, &link_data,
+ ice_get_link_status_datalen(hw), cd);
if (status != ICE_SUCCESS)
return status;
@@ -1255,7 +1292,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* that is occurring during a download package operation.
*/
for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
- ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@@ -2341,8 +2378,6 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
- case ICE_AQC_CAPS_NVM_VER:
- break;
case ICE_AQC_CAPS_NVM_MGMT:
caps->sec_rev_disabled =
(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
@@ -2369,6 +2404,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->iwarp = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
break;
+ case ICE_AQC_CAPS_ROCEV2_LAG:
+ caps->roce_lag = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
+ prefix, caps->roce_lag);
+ break;
case ICE_AQC_CAPS_LED:
if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
caps->led[phys_id] = true;
@@ -2425,7 +2465,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
{
- u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
+ u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
caps->ext_topo_dev_img_ver_high[index] = number;
caps->ext_topo_dev_img_ver_low[index] = logical_id;
@@ -2458,6 +2498,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->ext_topo_dev_img_prog_en[index]);
break;
}
+ case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ caps->tx_sched_topo_comp_mode_en = (number == 1);
+ break;
+ case ICE_AQC_CAPS_DYN_FLATTENING:
+ caps->dyn_flattening_en = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
+ prefix, caps->dyn_flattening_en);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2654,6 +2702,29 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
+ */
+static void
+ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
+ dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ dev_p->nac_topo.id);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2695,6 +2766,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -2999,12 +3073,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool ice_is_100m_speed_supported(struct ice_hw *hw)
{
switch (hw->device_id) {
- case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
- case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
return true;
default:
return false;
@@ -3349,8 +3421,12 @@ enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
*/
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
{
- if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
- return ICE_FEC_AUTO;
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
+ if (fec_options & ICE_AQC_PHY_FEC_DIS)
+ return ICE_FEC_DIS_AUTO;
+ else
+ return ICE_FEC_AUTO;
+ }
if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
@@ -3641,6 +3717,12 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
/* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
+ case ICE_FEC_DIS_AUTO:
+ /* Set No FEC and auto FEC */
+ if (!ice_fw_supports_fec_dis_auto(hw))
+ return ICE_ERR_NOT_SUPPORTED;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
+ /* fall-through */
case ICE_FEC_AUTO:
/* AND auto FEC bit, and all caps bits. */
cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
@@ -3909,7 +3991,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
- desc.datalen = data_size;
+ desc.datalen = CPU_TO_LE16(data_size);
ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
ICE_NONDMA_TO_NONDMA);
cmd->start_address = CPU_TO_LE32(start_address);
@@ -5932,7 +6014,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
cmd = &desc.params.read_write_gpio;
- cmd->gpio_ctrl_handle = gpio_ctrl_handle;
+ cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@@ -5960,7 +6042,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
- cmd->gpio_ctrl_handle = gpio_ctrl_handle;
+ cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
@@ -5972,20 +6054,22 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
}
/**
- * ice_fw_supports_link_override
+ * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
*
- * Checks if the firmware supports link override
+ * Checks if the firmware is minimum version
*/
-bool ice_fw_supports_link_override(struct ice_hw *hw)
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ } else if (hw->api_maj_ver > maj) {
return true;
}
@@ -5993,6 +6077,48 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
}
/**
+ * ice_is_fw_min_ver
+ * @hw: pointer to the hardware structure
+ * @branch: branch version
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
+ *
+ * Checks if the firmware is minimum version
+ */
+static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
+ u8 patch)
+{
+ if (hw->fw_branch == branch) {
+ if (hw->fw_maj_ver > maj)
+ return true;
+ if (hw->fw_maj_ver == maj) {
+ if (hw->fw_min_ver > min)
+ return true;
+ if (hw->fw_min_ver == min && hw->fw_patch >= patch)
+ return true;
+ }
+ } else if (hw->fw_branch > branch) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
+}
+
+/**
* ice_get_link_default_override
* @ldo: pointer to the link default override struct
* @pi: pointer to the port info struct
@@ -6254,19 +6380,12 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
*/
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
{
- if (hw->mac_type != ICE_MAC_E810)
+ if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -6296,6 +6415,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
}
/**
+ * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
+ * @hw: pointer to HW struct
+ */
+enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
*
@@ -6303,19 +6435,25 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
/**
+ * ice_fw_supports_fec_dis_auto
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports FEC disable in Auto FEC mode
+ */
+bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
+{
+ return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
+ ICE_FW_FEC_DIS_AUTO_MAJ,
+ ICE_FW_FEC_DIS_AUTO_MIN,
+ ICE_FW_FEC_DIS_AUTO_PATCH);
+}
+/**
* ice_is_fw_auto_drop_supported
* @hw: pointer to the hardware structure
*
@@ -6328,3 +6466,4 @@ bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
return true;
return false;
}
+
diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h
index b113082b2394..73e051fdda67 100644
--- a/sys/dev/ice/ice_common.h
+++ b/sys/dev/ice/ice_common.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,7 +60,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
-void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -197,6 +197,7 @@ enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
+bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw);
enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
@@ -301,6 +302,7 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e823(struct ice_hw *hw);
enum ice_status
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
@@ -332,6 +334,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw);
enum ice_status
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
diff --git a/sys/dev/ice/ice_common_sysctls.h b/sys/dev/ice/ice_common_sysctls.h
index 0d149a5bc25c..11cfc50848f5 100644
--- a/sys/dev/ice/ice_common_sysctls.h
+++ b/sys/dev/ice/ice_common_sysctls.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -95,6 +95,17 @@ bool ice_enable_tx_lldp_filter = true;
bool ice_enable_health_events = true;
/**
+ * @var ice_tx_balance_en
+ * @brief boolean permitting the 5-layer scheduler topology enablement
+ *
+ * Global sysctl variable indicating whether the driver will allow the
+ * 5-layer scheduler topology feature to be enabled. It's _not_
+ * specifically enabling the feature, just allowing it depending on what
+ * the DDP package allows.
+ */
+bool ice_tx_balance_en = true;
+
+/**
* @var ice_rdma_max_msix
* @brief maximum number of MSI-X vectors to reserve for RDMA interface
*
@@ -137,4 +148,8 @@ SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN,
&ice_enable_tx_lldp_filter, 0,
"Drop Ethertype 0x88cc LLDP frames originating from non-HW sources");
+SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, tx_balance_en, CTLFLAG_RWTUN,
+ &ice_tx_balance_en, 0,
+ "Enable 5-layer scheduler topology");
+
#endif /* _ICE_COMMON_SYSCTLS_H_ */
diff --git a/sys/dev/ice/ice_common_txrx.h b/sys/dev/ice/ice_common_txrx.h
index d5e6182c2212..dd2b3c5bff0d 100644
--- a/sys/dev/ice/ice_common_txrx.h
+++ b/sys/dev/ice/ice_common_txrx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c
index 636c2da3e4a1..76de98dcfafc 100644
--- a/sys/dev/ice/ice_controlq.c
+++ b/sys/dev/ice/ice_controlq.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -508,12 +508,18 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
return false;
} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
- ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
- ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
} else {
/* Major API version is older than expected, log a warning */
- ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
}
return true;
}
@@ -665,10 +671,12 @@ init_ctrlq_free_sq:
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
+ bool unloading)
{
struct ice_ctl_q_info *cq;
@@ -678,7 +686,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
+ ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
@@ -694,18 +702,19 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@@ -739,7 +748,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -809,7 +818,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h
index 4ed0809f2bad..16d47ae77f8f 100644
--- a/sys/dev/ice/ice_controlq.h
+++ b/sys/dev/ice/ice_controlq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_dcb.c b/sys/dev/ice/ice_dcb.c
index 137ffad92935..19ab0d349cf7 100644
--- a/sys/dev/ice/ice_dcb.c
+++ b/sys/dev/ice/ice_dcb.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -102,6 +102,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+ else
+ cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE <<
+ ICE_AQ_LLDP_MIB_PENDING_S;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -857,9 +860,9 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- enum ice_status status;
+ enum ice_adminq_opc opcode;
struct ice_aq_desc desc;
- u16 opcode;
+ enum ice_status status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -1106,8 +1109,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/
if (!err && sync && oper) {
dcbcfg->app[app_index].priority =
- (app_prio & ice_aqc_cee_app_mask) >>
- ice_aqc_cee_app_shift;
+ (u8)((app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift);
dcbcfg->app[app_index].selector = ice_app_sel_type;
dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
app_index++;
@@ -1189,6 +1192,43 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
}
/**
+ * ice_get_dcb_cfg_from_mib_change
+ * @pi: port information structure
+ * @event: pointer to the admin queue receive event
+ *
+ * Set DCB configuration from received MIB Change event
+ */
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ struct ice_aqc_lldp_get_mib *mib;
+ u8 change_type, dcbx_mode;
+
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
+ change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
+
+ dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >>
+ ICE_AQ_LLDP_DCBX_S);
+
+ switch (dcbx_mode) {
+ case ICE_AQ_LLDP_DCBX_IEEE:
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
+ break;
+
+ case ICE_AQ_LLDP_DCBX_CEE:
+ pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
+ ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
+ event->msg_buf, pi);
+ break;
+ }
+}
+
+/**
* ice_init_dcb
* @hw: pointer to the HW struct
* @enable_mib_change: enable MIB change event
@@ -1597,7 +1637,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = HTONL(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
- buf[1] = dcbcfg->pfc.pfcena & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
}
/**
diff --git a/sys/dev/ice/ice_dcb.h b/sys/dev/ice/ice_dcb.h
index 6d624268bb74..504a356221c5 100644
--- a/sys/dev/ice/ice_dcb.h
+++ b/sys/dev/ice/ice_dcb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -249,6 +249,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event);
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
enum ice_status
diff --git a/sys/dev/ice/ice_ddp_common.c b/sys/dev/ice/ice_ddp_common.c
new file mode 100644
index 000000000000..730b78b0f81e
--- /dev/null
+++ b/sys/dev/ice/ice_ddp_common.c
@@ -0,0 +1,2532 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2022, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_ddp_common.h"
+#include "ice_type.h"
+#include "ice_common.h"
+#include "ice_sched.h"
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
+
+ if (LE32_TO_CPU(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_get_pkg_seg_by_idx
+ * @pkg_hdr: pointer to the package header to be searched
+ * @idx: index of segment
+ */
+static struct ice_generic_seg_hdr *
+ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ struct ice_generic_seg_hdr *seg = NULL;
+
+ if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr +
+ LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
+
+ return seg;
+}
+
+/**
+ * ice_is_signing_seg_at_idx - determine if segment is a signing segment
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ */
+static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ struct ice_generic_seg_hdr *seg;
+ bool retval = false;
+
+ seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (seg)
+ retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING;
+
+ return retval;
+}
+
+/**
+ * ice_is_signing_seg_type_at_idx
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ *
+ * Determine if a segment is a signing segment of the correct type
+ */
+static bool
+ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
+ u32 seg_id, u32 sign_type)
+{
+ bool result = false;
+
+ if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
+ struct ice_sign_seg *seg;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
+ idx);
+ if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
+ LE32_TO_CPU(seg->sign_type) == sign_type)
+ result = true;
+ }
+
+ return result;
+}
+
+/**
+ * ice_update_pkg_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ */
+enum ice_status
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+ u32 offset, info;
+
+ status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+static enum ice_ddp_state
+ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ return ICE_DDP_PKG_NO_SEC_MANIFEST;
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ return ICE_DDP_PKG_MANIFEST_INVALID;
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_BUFFER_INVALID;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ bool metadata = false;
+
+ if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
+ metadata = true;
+
+ return metadata;
+}
+
+/**
+ * ice_is_last_download_buffer
+ * @buf: pointer to current buffer header
+ * @idx: index of the buffer in the current sequence
+ * @count: the buffer count in the current sequence
+ *
+ * Note: this routine should only be called if the buffer is not the last buffer
+ */
+static bool
+ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
+{
+ bool last = ((idx + 1) == count);
+
+ /* A set metadata flag in the next buffer will signal that the current
+ * buffer will be the last buffer downloaded
+ */
+ if (!last) {
+ struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
+
+ last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
+ }
+
+ return last;
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ * @indicate_last: if true, then set last buffer flag on last buffer download
+ *
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
+ u32 count, bool indicate_last)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
+ u32 offset, info, i;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)(bufs + start);
+ if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ for (i = 0; i < count; i++) {
+ enum ice_status status;
+ bool last = false;
+
+ bh = (struct ice_buf_hdr *)(bufs + start + i);
+
+ if (indicate_last)
+ last = ice_is_last_download_buffer(bh, i, count);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ return state;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_has_signing_seg - determine if package has a signing segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ */
+static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
+
+ return seg_hdr ? true : false;
+}
+
+/**
+ * ice_get_pkg_segment_id - get correct package segment id, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
+{
+ u32 seg_id;
+
+ switch (mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K:
+ default:
+ seg_id = SEGMENT_TYPE_ICE_E810;
+ break;
+ }
+
+ return seg_id;
+}
+
+/**
+ * ice_get_pkg_sign_type - get package segment sign type, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
+{
+ u32 sign_type;
+
+ switch (mac_type) {
+ case ICE_MAC_GENERIC_3K:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K;
+ break;
+ case ICE_MAC_GENERIC:
+ default:
+ sign_type = SEGMENT_SIGN_TYPE_RSA2K;
+ break;
+ }
+
+ return sign_type;
+}
+
+/**
+ * ice_get_signing_req - get correct package requirements, based on device
+ * @hw: pointer to the hardware structure
+ */
+static void ice_get_signing_req(struct ice_hw *hw)
+{
+ hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
+ hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
+}
+
+/**
+ * ice_download_pkg_sig_seg - download a signature segment
+ * @hw: pointer to the hardware structure
+ * @seg: pointer to signature segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+{
+ enum ice_ddp_state state;
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
+ LE32_TO_CPU(seg->buf_tbl.buf_count),
+ false);
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_config_seg - download a config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @start: starting buffer
+ * @count: buffer count
+ *
+ * Note: idx must reference a ICE segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx, u32 start, u32 count)
+{
+ struct ice_buf_table *bufs;
+ enum ice_ddp_state state;
+ struct ice_seg *seg;
+ u32 buf_count;
+
+ seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg)
+ return ICE_DDP_PKG_ERR;
+
+ bufs = ice_find_buf_table(seg);
+ buf_count = LE32_TO_CPU(bufs->buf_count);
+
+ if (start >= buf_count || start + count > buf_count)
+ return ICE_DDP_PKG_ERR;
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
+ true);
+
+ return state;
+}
+
+/**
+ * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index (must be a signature segment)
+ *
+ * Note: idx must reference a signature segment
+ */
+static enum ice_ddp_state
+ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx)
+{
+ enum ice_ddp_state state;
+ struct ice_sign_seg *seg;
+ u32 conf_idx;
+ u32 start;
+ u32 count;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg) {
+ state = ICE_DDP_PKG_ERR;
+ goto exit;
+ }
+
+ conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
+ start = LE32_TO_CPU(seg->signed_buf_start);
+ count = LE32_TO_CPU(seg->signed_buf_count);
+
+ state = ice_download_pkg_sig_seg(hw, seg);
+ if (state)
+ goto exit;
+
+ state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ count);
+
+exit:
+ return state;
+}
+
+/**
+ * ice_match_signing_seg - determine if a matching signing segment exists
+ * @pkg_hdr: pointer to package header
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ */
+static bool
+ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
+{
+ bool match = false;
+ u32 i;
+
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
+ sign_type)) {
+ match = true;
+ break;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * ice_post_dwnld_pkg_actions - perform post download package actions
+ * @hw: pointer to the hardware structure
+ */
+static enum ice_ddp_state
+ice_post_dwnld_pkg_actions(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ enum ice_status status;
+
+ status = ice_set_vlan_mode(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ state = ICE_DDP_PKG_ERR;
+ }
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_with_sig_seg - download package using signature segments
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ */
+static enum ice_ddp_state
+ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
+ enum ice_status status;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
+ ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ state = ICE_DDP_PKG_ALREADY_LOADED;
+ else
+ state = ice_map_aq_err_to_ddp_state(aq_err);
+ return state;
+ }
+
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
+ hw->pkg_sign_type))
+ continue;
+
+ state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ if (state)
+ break;
+ }
+
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ enum ice_status status;
+ struct ice_buf_hdr *bh;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_without_sig_seg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package without signature segment.
+ */
+static enum ice_ddp_state
+ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+ enum ice_ddp_state state;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ LE32_TO_CPU(ice_seg->hdr.seg_type),
+ LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ return state;
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state
+ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ struct ice_seg *ice_seg)
+{
+ enum ice_ddp_state state;
+
+ if (hw->pkg_has_signing_seg)
+ state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
+ else
+ state = ice_download_pkg_without_sig_seg(hw, ice_seg);
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return state;
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_ddp_state
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ if (!pkg_hdr)
+ return ICE_DDP_PKG_ERR;
+
+ hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
+ ice_get_signing_req(hw);
+
+ ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
+ hw->pkg_seg_id);
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ /* Get package information from the Metadata Section */
+ meta = (struct ice_meta_sect *)
+ ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ hw->pkg_ver = meta->ver;
+ ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
+
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
+ sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ u16 size;
+ u32 i;
+
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
+ pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg_info)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto init_pkg_free_alloc;
+ }
+
+ for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
+ ice_memcpy(hw->active_pkg_name,
+ pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name),
+ ICE_NONDMA_TO_NONDMA);
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+ i, pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ ice_free(hw, pkg_info);
+
+ return state;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = (struct ice_label_section *)section;
+ if (index >= LE16_TO_CPU(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
+ NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = LE16_TO_CPU(label->value);
+ return label->name;
+}
+
+/**
+ * ice_find_label_value
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @name: name of the label to search for
+ * @type: the section type that will contain the label
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Finds a label's value given the label name and the section type to search.
+ * The ice_seg parameter must not be NULL since the first call to
+ * ice_enum_labels requires a pointer to an actual ice_seg structure.
+ */
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+ u16 *value)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ label_name = ice_enum_labels(ice_seg, type, &state, &val);
+ if (label_name && !strcmp(label_name, name)) {
+ *value = val;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (label_name);
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < ice_struct_size(pkg, seg_offset, 1))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* pkg must have at least one segment */
+ seg_count = LE32_TO_CPU(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* make sure segment array fits in package length */
+ if (len < ice_struct_size(pkg, seg_offset, seg_count))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + LE32_TO_CPU(seg->seg_size))
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ ice_free(hw, hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_ddp_state
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_ddp_state state;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return state;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
+ pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto fw_ddp_compat_free_alloc;
+ }
+
+ for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ state = ICE_DDP_PKG_FW_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ ice_free(hw, pkg);
+ return state;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section =
+ (struct ice_sw_fv_section *)section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= LE16_TO_CPU(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = LE16_TO_CPU(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in hw for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ *
+ */
+static enum ice_ddp_state
+ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
+ struct ice_pkg_hdr *pkg;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return state;
+ }
+
+ /* initialize package info */
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
+
+ /* For packages with signing segments, must be a matching segment */
+ if (hw->pkg_has_signing_seg)
+ if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
+ hw->pkg_sign_type))
+ return ICE_DDP_PKG_ERR;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ state = ice_download_pkg(hw, pkg, seg);
+
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
+ ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
+ already_loaded = true;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
+ }
+
+ if (ice_is_init_pkg_successful(state)) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ ice_get_prof_index_max(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+ state);
+ }
+
+ return state;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+ enum ice_ddp_state state;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
+
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
+ /* Free the copy, since we failed to initialize the package */
+ ice_free(hw, buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return state;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
+{
+ bool valid_prof = false;
+ u16 i;
+
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ if (fv->ew[i].off != ICE_NAN_OFFSET)
+ valid_prof = true;
+
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ ice_bitmap_t *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+ ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
+
+ if (req_profs & prof_type)
+ ice_set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and offset and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!lkups->n_val_words || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!ice_is_bit_set(bm, (u16)offset))
+ continue;
+
+ for (i = 0; i < lkups->n_val_words; i++) {
+ int j;
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == lkups->n_val_words) {
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ LIST_ADD(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (LIST_EMPTY(fv_list)) {
+ ice_warn(hw, "Required profiles not found in currently loaded DDP package");
+ return ICE_ERR_CFG;
+ }
+ return ICE_SUCCESS;
+
+err:
+ LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
+ list_entry) {
+ LIST_DEL(&fvl->list_entry);
+ ice_free(hw, fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ ice_set_bit(i,
+ hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ ice_free(hw, bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = LE16_TO_CPU(buf->data_end) +
+ FLEX_ARRAY_SIZE(buf, section_entry, count);
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = LE16_TO_CPU(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ICE_ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = LE16_TO_CPU(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
+ buf->section_entry[sect_count].size = CPU_TO_LE16(size);
+ buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+
+ data_end += size;
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ buf->section_count = CPU_TO_LE16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_unreserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to unreserve
+ *
+ * Unreserves one or more section table entries in a package buffer, releasing
+ * space that can be used for section data. This routine can be called
+ * multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't decrease table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (count > bld->reserved_section_table_entries)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries -= count;
+
+ data_end = LE16_TO_CPU(buf->data_end) -
+ FLEX_ARRAY_SIZE(buf, section_entry, count);
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_get_free_space
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of free bytes remaining in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return LE16_TO_CPU(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (bld)
+ return &bld->buf;
+
+ return NULL;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms;
+
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ LE32_TO_CPU(ice_seg->device_table_count));
+
+ return (_FORCE_ struct ice_buf_table *)
+ (nvms->vers + LE32_TO_CPU(nvms->table_count));
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = LE16_TO_CPU(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = LE16_TO_CPU(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ CPU_TO_LE32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect = ((u8 *)state->buf) +
+ LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = (struct ice_boost_tcam_section *)section;
+ if (index >= LE16_TO_CPU(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = (struct ice_boost_tcam_entry *)
+ ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
+ *entry = tcam;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name) {
+/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ enum ice_status status;
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (status == ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
+ * ice_get_set_tx_topo - get or set tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set tx topology
+ */
+static enum ice_status
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+ struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+ struct ice_aqc_get_set_tx_topo *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_set_tx_topo;
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+ cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+ /* requested to update a new topology, not a default topolgy */
+ if (buf)
+ cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+ ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+ cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+ }
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+ /* read the return flag values (first byte) for get operation */
+ if (!set && flags)
+ *flags = desc.params.get_set_tx_topo.set_flags;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ */
+enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ u8 *current_topo, *new_topo = NULL;
+ struct ice_run_time_cfg_seg *seg;
+ struct ice_buf_hdr *section;
+ struct ice_pkg_hdr *pkg_hdr;
+ enum ice_ddp_state state;
+ u16 i, size = 0, offset;
+ enum ice_status status;
+ u32 reg = 0;
+ u8 flags;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ /* Does FW support new Tx topology mode ? */
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+ ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+ return ICE_ERR_NOT_SUPPORTED;
+ }
+
+ current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
+ if (!current_topo)
+ return ICE_ERR_NO_MEMORY;
+
+ /* get the current Tx topology */
+ status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
+ &flags, false);
+ ice_free(hw, current_topo);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+ return status;
+ }
+
+ /* Is default topology already applied ? */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == 9) {
+ ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
+ /* Already default topology is loaded */
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+
+ /* Is new topology already applied ? */
+ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == 5) {
+ ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
+ /* Already new topology is loaded */
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+
+ /* Is set topology issued already ? */
+ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+ ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n");
+ /* add a small delay before exiting */
+ for (i = 0; i < 20; i++)
+ ice_msec_delay(100, true);
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+
+ /* Change the topology from new to default (5 to 9) */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == 5) {
+ ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+ goto update_topo;
+ }
+
+ pkg_hdr = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg_hdr, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return ICE_ERR_CFG;
+ }
+
+ /* find run time configuration segment */
+ seg = (struct ice_run_time_cfg_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+ return ICE_ERR_CFG;
+ }
+
+ if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+ seg->buf_table.buf_count);
+ return ICE_ERR_CFG;
+ }
+
+ section = ice_pkg_val_buf(seg->buf_table.buf_array);
+
+ if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
+ ICE_SID_TX_5_LAYER_TOPO) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+ return ICE_ERR_CFG;
+ }
+
+ size = LE16_TO_CPU(section->section_entry[0].size);
+ offset = LE16_TO_CPU(section->section_entry[0].offset);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Get the new topology buffer */
+ new_topo = ((u8 *)section) + offset;
+
+update_topo:
+ /* acquire global lock to make sure that set topology issued
+ * by one PF
+ */
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+ return status;
+ }
+
+ /* check reset was triggered already or not */
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+ /* Reset is in progress, re-init the hw again */
+ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
+ ice_check_reset(hw);
+ return ICE_SUCCESS;
+ }
+
+ /* set new topology */
+ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
+ return status;
+ }
+
+ /* new topology is updated, delay 1 second before issuing the CORRER */
+ for (i = 0; i < 10; i++)
+ ice_msec_delay(100, true);
+ ice_reset(hw, ICE_RESET_CORER);
+ /* CORER will clear the global lock, so no explicit call
+ * required for release
+ */
+ return ICE_SUCCESS;
+}
diff --git a/sys/dev/ice/ice_ddp_common.h b/sys/dev/ice/ice_ddp_common.h
new file mode 100644
index 000000000000..621729b03446
--- /dev/null
+++ b/sys/dev/ice/ice_ddp_common.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2022, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_DDP_H_
+#define _ICE_DDP_H_
+
+#include "ice_osdep.h"
+#include "ice_adminq_cmd.h"
+#include "ice_controlq.h"
+#include "ice_status.h"
+#include "ice_flex_type.h"
+#include "ice_protocol_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* Missing security manifest in DDP pkg */
+ ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
+
+ /* The RSA signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
+
+ /* Manifest hash mismatch */
+ ICE_DDP_PKG_MANIFEST_INVALID = -12,
+
+ /* Buffer hash mismatches manifest */
+ ICE_DDP_PKG_BUFFER_INVALID = -13,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -14,
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver pkg_format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[STRUCT_HACK_VAR_LEN];
+};
+
+/* Package signing algorithm types */
+#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
+#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
+#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
+#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_INVALID 0x00000000
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE_E810 0x00000010
+#define SEGMENT_TYPE_SIGNING 0x00001001
+#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
+ __le32 seg_type;
+ struct ice_pkg_ver seg_format_ver;
+ __le32 seg_size;
+ char seg_id[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[STRUCT_HACK_VAR_LEN];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
+};
+
+struct ice_run_time_cfg_seg {
+ struct ice_generic_seg_hdr hdr;
+ u8 rsvd[8];
+ struct ice_buf_table buf_table;
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 rsvd;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+struct ice_sign_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 seg_id;
+ __le32 sign_type;
+ __le32 signed_seg_idx;
+ __le32 signed_buf_start;
+ __le32 signed_buf_count;
+#define ICE_SIGN_SEG_RESERVED_COUNT 44
+ u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
+ struct ice_buf_table buf_tbl;
+};
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
+ ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+ (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_METADATA 1
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+#define ICE_SID_CDID_REDIR_SW 18
+
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
+
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
+#define ICE_SID_RXPARSER_XLT0_BUILDER 53
+#define ICE_SID_RXPARSER_NODE_PTYPE 54
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
+#define ICE_SID_RXPARSER_METADATA_INIT 58
+#define ICE_SID_RXPARSER_XLT0 59
+
+#define ICE_SID_TXPARSER_CAM 60
+#define ICE_SID_TXPARSER_NOMATCH_CAM 61
+#define ICE_SID_TXPARSER_IMEM 62
+#define ICE_SID_TXPARSER_XLT0_BUILDER 63
+#define ICE_SID_TXPARSER_NODE_PTYPE 64
+#define ICE_SID_TXPARSER_MARKER_PTYPE 65
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_TXPARSER_PROTO_GRP 67
+#define ICE_SID_TXPARSER_METADATA_INIT 68
+#define ICE_SID_TXPARSER_XLT0 69
+
+#define ICE_SID_RXPARSER_INIT_REDIR 70
+#define ICE_SID_TXPARSER_INIT_REDIR 71
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_TXPARSER_MARKER_GRP 73
+#define ICE_SID_RXPARSER_LAST_PROTO 74
+#define ICE_SID_TXPARSER_LAST_PROTO 75
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_TXPARSER_PG_SPILL 77
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
+#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
+
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
+
+#define ICE_SID_RXPARSER_FLAG_REDIR 97
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
+#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
+#define ICE_SID_LBL_RESERVED_12 0x80000012
+#define ICE_SID_LBL_RESERVED_13 0x80000013
+#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
+#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
+#define ICE_SID_LBL_PTYPE 0x80000016
+#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
+#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
+#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
+#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
+#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
+#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
+#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
+#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
+#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
+#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
+#define ICE_SID_LBL_FLAG 0x80000023
+#define ICE_SID_LBL_REG 0x80000024
+#define ICE_SID_LBL_SW_PTG 0x80000025
+#define ICE_SID_LBL_ACL_PTG 0x80000026
+#define ICE_SID_LBL_PE_PTG 0x80000027
+#define ICE_SID_LBL_RSS_PTG 0x80000028
+#define ICE_SID_LBL_FD_PTG 0x80000029
+#define ICE_SID_LBL_SW_VSIG 0x8000002A
+#define ICE_SID_LBL_ACL_VSIG 0x8000002B
+#define ICE_SID_LBL_PE_VSIG 0x8000002C
+#define ICE_SID_LBL_RSS_VSIG 0x8000002D
+#define ICE_SID_LBL_FD_VSIG 0x8000002E
+#define ICE_SID_LBL_PTYPE_META 0x8000002F
+#define ICE_SID_LBL_SW_PROFID 0x80000030
+#define ICE_SID_LBL_ACL_PROFID 0x80000031
+#define ICE_SID_LBL_PE_PROFID 0x80000032
+#define ICE_SID_LBL_RSS_PROFID 0x80000033
+#define ICE_SID_LBL_FD_PROFID 0x80000034
+#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
+#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
+#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
+#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+/* Label ICE runtime configuration section IDs */
+#define ICE_SID_TX_5_LAYER_TOPO 0x10
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+struct ice_hw;
+
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+enum ice_status
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+void ice_release_global_cfg_lock(struct ice_hw *hw);
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr);
+enum ice_ddp_state
+ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
+enum ice_ddp_state
+ice_get_pkg_info(struct ice_hw *hw);
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
+struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
+enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access);
+
+struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
+struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
+bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset));
+void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type);
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+bool ice_is_init_pkg_successful(enum ice_ddp_state state);
+void ice_free_seg(struct ice_hw *hw);
+
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
+enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+
+#endif /* _ICE_DDP_H_ */
diff --git a/sys/dev/ice/ice_defs.h b/sys/dev/ice/ice_defs.h
new file mode 100644
index 000000000000..8a1dda2c492c
--- /dev/null
+++ b/sys/dev/ice/ice_defs.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2022, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_DEFS_H_
+#define _ICE_DEFS_H_
+
+#define ETH_ALEN 6
+
+#define ETH_HEADER_LEN 14
+
+#define BIT(a) (1UL << (a))
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+
+#define BITS_PER_BYTE 8
+
+#define _FORCE_
+
+#define ICE_BYTES_PER_WORD 2
+#define ICE_BYTES_PER_DWORD 4
+#define ICE_MAX_TRAFFIC_CLASS 8
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+#define STRUCT_HACK_VAR_LEN
+/**
+ * ice_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ice_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
+
+#endif /* _ICE_DEFS_H_ */
diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h
index 2d092c6037c4..dc8970952b49 100644
--- a/sys/dev/ice/ice_devids.h
+++ b/sys/dev/ice/ice_devids.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,6 +34,7 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
@@ -52,6 +53,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x02E9
+#define ICE_SUBDEV_ID_E810T4 0x02EA
+#define ICE_SUBDEV_ID_E810T5 0x0010
+#define ICE_SUBDEV_ID_E810T6 0x0012
+#define ICE_SUBDEV_ID_E810T7 0x0011
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
@@ -86,5 +92,4 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
-
#endif /* _ICE_DEVIDS_H_ */
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
index 19d633554463..f47adb1572c6 100644
--- a/sys/dev/ice/ice_drv_info.h
+++ b/sys/dev/ice/ice_drv_info.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
-const char ice_driver_version[] = "1.34.2-k";
+const char ice_driver_version[] = "1.37.7-k";
const uint8_t ice_major_version = 1;
-const uint8_t ice_minor_version = 34;
-const uint8_t ice_patch_version = 2;
+const uint8_t ice_minor_version = 37;
+const uint8_t ice_patch_version = 7;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
- PVID(vendor, devid, name " - 1.34.2-k")
+ PVID(vendor, devid, name " - 1.37.7-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
- PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.34.2-k")
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.7-k")
/**
* @var ice_vendor_info_array
@@ -131,9 +131,6 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
ICE_INTEL_VENDOR_ID, 0x0007, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
- ICE_INTEL_VENDOR_ID, 0x0008, 0,
- "Intel(R) Ethernet Network Adapter E810-XXV-2"),
- PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x000C, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h
index 51c60b0dfeea..efbeb12c3ab1 100644
--- a/sys/dev/ice/ice_features.h
+++ b/sys/dev/ice/ice_features.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +68,8 @@ enum feat_list {
ICE_FEATURE_HEALTH_STATUS,
ICE_FEATURE_FW_LOGGING,
ICE_FEATURE_HAS_PBA,
+ ICE_FEATURE_DCB,
+ ICE_FEATURE_TX_BALANCE,
/* Must be last entry */
ICE_FEATURE_COUNT
};
diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c
index 0c956f720a92..36c420478131 100644
--- a/sys/dev/ice/ice_flex_pipe.c
+++ b/sys/dev/ice/ice_flex_pipe.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,11 @@
/*$FreeBSD$*/
#include "ice_common.h"
+#include "ice_ddp_common.h"
#include "ice_flex_pipe.h"
#include "ice_protocol_type.h"
#include "ice_flow.h"
-/* To support tunneling entries by PF, the package will append the PF number to
- * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
- */
-#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
@@ -126,368 +123,12 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
}
/**
- * ice_pkg_val_buf
- * @buf: pointer to the ice buffer
- *
- * This helper function validates a buffer's header.
- */
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
-{
- struct ice_buf_hdr *hdr;
- u16 section_count;
- u16 data_end;
-
- hdr = (struct ice_buf_hdr *)buf->buf;
- /* verify data */
- section_count = LE16_TO_CPU(hdr->section_count);
- if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
- return NULL;
-
- data_end = LE16_TO_CPU(hdr->data_end);
- if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
- return NULL;
-
- return hdr;
-}
-
-/**
- * ice_find_buf_table
- * @ice_seg: pointer to the ice segment
- *
- * Returns the address of the buffer table within the ice segment.
- */
-static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
-{
- struct ice_nvm_table *nvms;
-
- nvms = (struct ice_nvm_table *)
- (ice_seg->device_table +
- LE32_TO_CPU(ice_seg->device_table_count));
-
- return (_FORCE_ struct ice_buf_table *)
- (nvms->vers + LE32_TO_CPU(nvms->table_count));
-}
-
-/**
- * ice_pkg_enum_buf
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This function will enumerate all the buffers in the ice segment. The first
- * call is made with the ice_seg parameter non-NULL; on subsequent calls,
- * ice_seg is set to NULL which continues the enumeration. When the function
- * returns a NULL pointer, then the end of the buffers has been reached, or an
- * unexpected value has been detected (for example an invalid section count or
- * an invalid buffer end value).
- */
-static struct ice_buf_hdr *
-ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (ice_seg) {
- state->buf_table = ice_find_buf_table(ice_seg);
- if (!state->buf_table)
- return NULL;
-
- state->buf_idx = 0;
- return ice_pkg_val_buf(state->buf_table->buf_array);
- }
-
- if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
- return ice_pkg_val_buf(state->buf_table->buf_array +
- state->buf_idx);
- else
- return NULL;
-}
-
-/**
- * ice_pkg_advance_sect
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This helper function will advance the section within the ice segment,
- * also advancing the buffer if needed.
- */
-static bool
-ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (!ice_seg && !state->buf)
- return false;
-
- if (!ice_seg && state->buf)
- if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
- return true;
-
- state->buf = ice_pkg_enum_buf(ice_seg, state);
- if (!state->buf)
- return false;
-
- /* start of new buffer, reset section index */
- state->sect_idx = 0;
- return true;
-}
-
-/**
- * ice_pkg_enum_section
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- *
- * This function will enumerate all the sections of a particular type in the
- * ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the matching
- * sections has been reached.
- */
-static void *
-ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type)
-{
- u16 offset, size;
-
- if (ice_seg)
- state->type = sect_type;
-
- if (!ice_pkg_advance_sect(ice_seg, state))
- return NULL;
-
- /* scan for next matching section */
- while (state->buf->section_entry[state->sect_idx].type !=
- CPU_TO_LE32(state->type))
- if (!ice_pkg_advance_sect(NULL, state))
- return NULL;
-
- /* validate section */
- offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
- if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
- return NULL;
-
- size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
- if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
- return NULL;
-
- /* make sure the section fits in the buffer */
- if (offset + size > ICE_PKG_BUF_SIZE)
- return NULL;
-
- state->sect_type =
- LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
-
- /* calc pointer to this section */
- state->sect = ((u8 *)state->buf) +
- LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
-
- return state->sect;
-}
-
-/**
- * ice_pkg_enum_entry
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- * @offset: pointer to variable that receives the offset in the table (optional)
- * @handler: function that handles access to the entries into the section type
- *
- * This function will enumerate all the entries in particular section type in
- * the ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the entries has
- * been reached.
- *
- * Since each section may have a different header and entry size, the handler
- * function is needed to determine the number and location entries in each
- * section.
- *
- * The offset parameter is optional, but should be used for sections that
- * contain an offset for each section table. For such cases, the section handler
- * function must return the appropriate offset + index to give the absolution
- * offset for each entry. For example, if the base for a section's header
- * indicates a base offset of 10, and the index for the entry is 2, then
- * section handler function should set the offset to 10 + 2 = 12.
- */
-static void *
-ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type, u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
-{
- void *entry;
-
- if (ice_seg) {
- if (!handler)
- return NULL;
-
- if (!ice_pkg_enum_section(ice_seg, state, sect_type))
- return NULL;
-
- state->entry_idx = 0;
- state->handler = handler;
- } else {
- state->entry_idx++;
- }
-
- if (!state->handler)
- return NULL;
-
- /* get entry */
- entry = state->handler(state->sect_type, state->sect, state->entry_idx,
- offset);
- if (!entry) {
- /* end of a section, look for another section of this type */
- if (!ice_pkg_enum_section(NULL, state, 0))
- return NULL;
-
- state->entry_idx = 0;
- entry = state->handler(state->sect_type, state->sect,
- state->entry_idx, offset);
- }
-
- return entry;
-}
-
-/**
- * ice_boost_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the boost TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual boost TCAM entries.
- */
-static void *
-ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_boost_tcam_section *boost;
-
- if (!section)
- return NULL;
-
- if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
- return NULL;
-
- if (index > ICE_MAX_BST_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- boost = (struct ice_boost_tcam_section *)section;
- if (index >= LE16_TO_CPU(boost->count))
- return NULL;
-
- return boost->tcam + index;
-}
-
-/**
- * ice_find_boost_entry
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @addr: Boost TCAM address of entry to search for
- * @entry: returns pointer to the entry
- *
- * Finds a particular Boost TCAM entry and returns a pointer to that entry
- * if it is found. The ice_seg parameter must not be NULL since the first call
- * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
- */
-static enum ice_status
-ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
- struct ice_boost_tcam_entry **entry)
-{
- struct ice_boost_tcam_entry *tcam;
- struct ice_pkg_enum state;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ice_seg)
- return ICE_ERR_PARAM;
-
- do {
- tcam = (struct ice_boost_tcam_entry *)
- ice_pkg_enum_entry(ice_seg, &state,
- ICE_SID_RXPARSER_BOOST_TCAM, NULL,
- ice_boost_tcam_handler);
- if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
- *entry = tcam;
- return ICE_SUCCESS;
- }
-
- ice_seg = NULL;
- } while (tcam);
-
- *entry = NULL;
- return ICE_ERR_CFG;
-}
-
-/**
- * ice_label_enum_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the label entry to be returned
- * @offset: pointer to receive absolute offset, always zero for label sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual label entries.
- */
-static void *
-ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_label_section *labels;
-
- if (!section)
- return NULL;
-
- if (index > ICE_MAX_LABELS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- labels = (struct ice_label_section *)section;
- if (index >= LE16_TO_CPU(labels->count))
- return NULL;
-
- return labels->label + index;
-}
-
-/**
- * ice_enum_labels
- * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
- * @type: the section type that will contain the label (0 on subsequent calls)
- * @state: ice_pkg_enum structure that will hold the state of the enumeration
- * @value: pointer to a value that will return the label's value if found
- *
- * Enumerates a list of labels in the package. The caller will call
- * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
- * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
- * the end of the list has been reached.
- */
-static char *
-ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
- u16 *value)
-{
- struct ice_label *label;
-
- /* Check for valid label section on first call */
- if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
- return NULL;
-
- label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
- NULL,
- ice_label_enum_handler);
- if (!label)
- return NULL;
-
- *value = LE16_TO_CPU(label->value);
- return label->name;
-}
-
-/**
* ice_add_tunnel_hint
* @hw: pointer to the HW structure
* @label_name: label text
* @val: value of the tunnel port boost entry
*/
-static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
{
if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
u16 i;
@@ -517,49 +158,6 @@ static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
}
}
-/**
- * ice_init_pkg_hints
- * @hw: pointer to the HW structure
- * @ice_seg: pointer to the segment of the package scan (non-NULL)
- *
- * This function will scan the package and save off relevant information
- * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
- * since the first call to ice_enum_labels requires a pointer to an actual
- * ice_seg structure.
- */
-static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
- int i;
-
- ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ice_seg)
- return;
-
- label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
- &val);
-
- while (label_name) {
- if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
- /* check for a tunnel entry */
- ice_add_tunnel_hint(hw, label_name, val);
-
- label_name = ice_enum_labels(NULL, 0, &state, &val);
- }
-
- /* Cache the appropriate boost TCAM entry pointers for tunnels */
- for (i = 0; i < hw->tnl.count; i++) {
- ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
- &hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry)
- hw->tnl.tbl[i].valid = true;
- }
-}
-
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
@@ -731,1434 +329,6 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
}
/**
- * ice_acquire_global_cfg_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the global config lock for reading
- * or writing of the package. When attempting to obtain write access, the
- * caller must check for the following two return values:
- *
- * ICE_SUCCESS - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static enum ice_status
-ice_acquire_global_cfg_lock(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
-{
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
- ICE_GLOBAL_CFG_LOCK_TIMEOUT);
-
- if (status == ICE_ERR_AQ_NO_WORK)
- ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
-
- return status;
-}
-
-/**
- * ice_release_global_cfg_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the global config lock.
- */
-static void ice_release_global_cfg_lock(struct ice_hw *hw)
-{
- ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
-}
-
-/**
- * ice_acquire_change_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the change lock.
- */
-static enum ice_status
-ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
-{
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
- ICE_CHANGE_LOCK_TIMEOUT);
-}
-
-/**
- * ice_release_change_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the change lock using the proper Admin Command.
- */
-static void ice_release_change_lock(struct ice_hw *hw)
-{
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
-}
-
-/**
- * ice_aq_download_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Download Package (0x0C40)
- */
-static enum ice_status
-ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = LE32_TO_CPU(resp->error_offset);
- if (error_info)
- *error_info = LE32_TO_CPU(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_aq_upload_section
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
- * @cd: pointer to command details structure or NULL
- *
- * Upload Section (0x0C41)
- */
-enum ice_status
-ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
-}
-
-/**
- * ice_aq_update_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package cmd buffer
- * @buf_size: the size of the package cmd buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Update Package (0x0C42)
- */
-static enum ice_status
-ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
- bool last_buf, u32 *error_offset, u32 *error_info,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = LE32_TO_CPU(resp->error_offset);
- if (error_info)
- *error_info = LE32_TO_CPU(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_find_seg_in_pkg
- * @hw: pointer to the hardware structure
- * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- */
-static struct ice_generic_seg_hdr *
-ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
-{
- u32 i;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
- pkg_hdr->pkg_format_ver.update,
- pkg_hdr->pkg_format_ver.draft);
-
- /* Search all package segments for the requested segment type */
- for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
-
- seg = (struct ice_generic_seg_hdr *)
- ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
-
- if (LE32_TO_CPU(seg->seg_type) == seg_type)
- return seg;
- }
-
- return NULL;
-}
-
-/**
- * ice_update_pkg_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- */
-static enum ice_status
-ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status = ICE_SUCCESS;
- u32 i;
-
- for (i = 0; i < count; i++) {
- struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
- bool last = ((i + 1) == count);
- u32 offset, info;
-
- status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
- last, &offset, &info, NULL);
-
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
- status, offset, info);
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ice_update_pkg
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
- */
-enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
-
- status = ice_update_pkg_no_lock(hw, bufs, count);
-
- ice_release_change_lock(hw);
-
- return status;
-}
-
-/**
- * ice_dwnld_cfg_bufs
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
- */
-static enum ice_status
-ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status;
- struct ice_buf_hdr *bh;
- u32 offset, info, i;
-
- if (!bufs || !count)
- return ICE_ERR_PARAM;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)bufs;
- if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_SUCCESS;
-
- /* reset pkg_dwnld_status in case this function is called in the
- * reset/rebuild flow
- */
- hw->pkg_dwnld_status = ICE_AQ_RC_OK;
-
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == ICE_ERR_AQ_NO_WORK)
- hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
- else
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- return status;
- }
-
- for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
-
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (LE16_TO_CPU(bh->section_count))
- if (LE32_TO_CPU(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
-
- bh = (struct ice_buf_hdr *)(bufs + i);
-
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
-
- /* Save AQ status from download package */
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
-
- break;
- }
-
- if (last)
- break;
- }
-
- if (!status) {
- status = ice_set_vlan_mode(hw);
- if (status)
- ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
- status);
- }
-
- ice_release_global_cfg_lock(hw);
-
- return status;
-}
-
-/**
- * ice_aq_get_pkg_info_list
- * @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
- *
- * Get Package Info List (0x0C43)
- */
-static enum ice_status
-ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
-
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
-}
-
-/**
- * ice_download_pkg
- * @hw: pointer to the hardware structure
- * @ice_seg: pointer to the segment of the package to be downloaded
- *
- * Handles the download of a complete package.
- */
-static enum ice_status
-ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_buf_table *ice_buf_tbl;
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_format_ver.major,
- ice_seg->hdr.seg_format_ver.minor,
- ice_seg->hdr.seg_format_ver.update,
- ice_seg->hdr.seg_format_ver.draft);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
- LE32_TO_CPU(ice_seg->hdr.seg_type),
- LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
-
- ice_buf_tbl = ice_find_buf_table(ice_seg);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
- LE32_TO_CPU(ice_buf_tbl->buf_count));
-
- status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- LE32_TO_CPU(ice_buf_tbl->buf_count));
-
- ice_post_pkg_dwnld_vlan_mode_cfg(hw);
-
- return status;
-}
-
-/**
- * ice_init_pkg_info
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- *
- * Saves off the package details into the HW structure.
- */
-static enum ice_status
-ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- if (!pkg_hdr)
- return ICE_ERR_PARAM;
-
- hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810;
-
- ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
- hw->pkg_seg_id);
-
- seg_hdr = (struct ice_generic_seg_hdr *)
- ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
- if (seg_hdr) {
- struct ice_meta_sect *meta;
- struct ice_pkg_enum state;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- /* Get package information from the Metadata Section */
- meta = (struct ice_meta_sect *)
- ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
- ICE_SID_METADATA);
- if (!meta) {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_ERR_CFG;
- }
-
- hw->pkg_ver = meta->ver;
- ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
- ICE_NONDMA_TO_NONDMA);
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta->ver.major, meta->ver.minor, meta->ver.update,
- meta->ver.draft, meta->name);
-
- hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
- ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
- sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
-
- ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_format_ver.major,
- seg_hdr->seg_format_ver.minor,
- seg_hdr->seg_format_ver.update,
- seg_hdr->seg_format_ver.draft,
- seg_hdr->seg_id);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_ERR_CFG;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_get_pkg_info
- * @hw: pointer to the hardware structure
- *
- * Store details of the package currently loaded in HW into the HW structure.
- */
-static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
-{
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- enum ice_status status;
- u16 size;
- u32 i;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
- if (!pkg_info)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
- if (status)
- goto init_pkg_free_alloc;
-
- for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
-#define ICE_PKG_FLAG_COUNT 4
- char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
- u8 place = 0;
-
- if (pkg_info->pkg_info[i].is_active) {
- flags[place++] = 'A';
- hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
- hw->active_track_id =
- LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
- ice_memcpy(hw->active_pkg_name,
- pkg_info->pkg_info[i].name,
- sizeof(pkg_info->pkg_info[i].name),
- ICE_NONDMA_TO_NONDMA);
- hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
- }
- if (pkg_info->pkg_info[i].is_active_at_boot)
- flags[place++] = 'B';
- if (pkg_info->pkg_info[i].is_modified)
- flags[place++] = 'M';
- if (pkg_info->pkg_info[i].is_in_nvm)
- flags[place++] = 'N';
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
- i, pkg_info->pkg_info[i].ver.major,
- pkg_info->pkg_info[i].ver.minor,
- pkg_info->pkg_info[i].ver.update,
- pkg_info->pkg_info[i].ver.draft,
- pkg_info->pkg_info[i].name, flags);
- }
-
-init_pkg_free_alloc:
- ice_free(hw, pkg_info);
-
- return status;
-}
-
-/**
- * ice_find_label_value
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @name: name of the label to search for
- * @type: the section type that will contain the label
- * @value: pointer to a value that will return the label's value if found
- *
- * Finds a label's value given the label name and the section type to search.
- * The ice_seg parameter must not be NULL since the first call to
- * ice_enum_labels requires a pointer to an actual ice_seg structure.
- */
-enum ice_status
-ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
- u16 *value)
-{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ice_seg)
- return ICE_ERR_PARAM;
-
- do {
- label_name = ice_enum_labels(ice_seg, type, &state, &val);
- if (label_name && !strcmp(label_name, name)) {
- *value = val;
- return ICE_SUCCESS;
- }
-
- ice_seg = NULL;
- } while (label_name);
-
- return ICE_ERR_CFG;
-}
-
-/**
- * ice_verify_pkg - verify package
- * @pkg: pointer to the package buffer
- * @len: size of the package buffer
- *
- * Verifies various attributes of the package file, including length, format
- * version, and the requirement of at least one segment.
- */
-static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
-{
- u32 seg_count;
- u32 i;
-
- if (len < ice_struct_size(pkg, seg_offset, 1))
- return ICE_ERR_BUF_TOO_SHORT;
-
- if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_ERR_CFG;
-
- /* pkg must have at least one segment */
- seg_count = LE32_TO_CPU(pkg->seg_count);
- if (seg_count < 1)
- return ICE_ERR_CFG;
-
- /* make sure segment array fits in package length */
- if (len < ice_struct_size(pkg, seg_offset, seg_count))
- return ICE_ERR_BUF_TOO_SHORT;
-
- /* all segments must fit within length */
- for (i = 0; i < seg_count; i++) {
- u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
-
- /* segment header must fit */
- if (len < off + sizeof(*seg))
- return ICE_ERR_BUF_TOO_SHORT;
-
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
-
- /* segment body must fit */
- if (len < off + LE32_TO_CPU(seg->seg_size))
- return ICE_ERR_BUF_TOO_SHORT;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_free_seg - free package segment pointer
- * @hw: pointer to the hardware structure
- *
- * Frees the package segment pointer in the proper manner, depending on if the
- * segment was allocated or just the passed in pointer was stored.
- */
-void ice_free_seg(struct ice_hw *hw)
-{
- if (hw->pkg_copy) {
- ice_free(hw, hw->pkg_copy);
- hw->pkg_copy = NULL;
- hw->pkg_size = 0;
- }
- hw->seg = NULL;
-}
-
-/**
- * ice_init_pkg_regs - initialize additional package registers
- * @hw: pointer to the hardware structure
- */
-static void ice_init_pkg_regs(struct ice_hw *hw)
-{
-#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
-#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
-#define ICE_SW_BLK_IDX 0
-
- /* setup Switch block input mask, which is 48-bits in two parts */
- wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
- wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
-}
-
-/**
- * ice_chk_pkg_version - check package version for compatibility with driver
- * @pkg_ver: pointer to a version structure to check
- *
- * Check to make sure that the package about to be downloaded is compatible with
- * the driver. To be compatible, the major and minor components of the package
- * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
- * definitions.
- */
-static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
-{
- if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
- return ICE_ERR_NOT_SUPPORTED;
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_chk_pkg_compat
- * @hw: pointer to the hardware structure
- * @ospkg: pointer to the package hdr
- * @seg: pointer to the package segment hdr
- *
- * This function checks the package version compatibility with driver and NVM
- */
-static enum ice_status
-ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
- struct ice_seg **seg)
-{
- struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_status status;
- u16 size;
- u32 i;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- /* Check package version compatibility */
- status = ice_chk_pkg_version(&hw->pkg_ver);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return status;
- }
-
- /* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
- ospkg);
- if (!*seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
- }
-
- /* Check if FW is compatible with the OS package */
- size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
- if (!pkg)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
- if (status)
- goto fw_ddp_compat_free_alloc;
-
- for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
- /* loop till we find the NVM package */
- if (!pkg->pkg_info[i].is_in_nvm)
- continue;
- if ((*seg)->hdr.seg_format_ver.major !=
- pkg->pkg_info[i].ver.major ||
- (*seg)->hdr.seg_format_ver.minor >
- pkg->pkg_info[i].ver.minor) {
- status = ICE_ERR_FW_DDP_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
- }
- /* done processing NVM package so break */
- break;
- }
-fw_ddp_compat_free_alloc:
- ice_free(hw, pkg);
- return status;
-}
-
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section =
- (struct ice_sw_fv_section *)section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= LE16_TO_CPU(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = LE16_TO_CPU(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
-/**
- * ice_get_prof_index_max - get the max profile index for used profile
- * @hw: pointer to the HW struct
- *
- * Calling this function will get the max profile index for used profile
- * and store the index number in struct ice_switch_info *switch_info
- * in hw for following use.
- */
-static int ice_get_prof_index_max(struct ice_hw *hw)
-{
- u16 prof_index = 0, j, max_prof_index = 0;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- bool flag = false;
- struct ice_fv *fv;
- u32 offset;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!hw->seg)
- return ICE_ERR_PARAM;
-
- ice_seg = hw->seg;
-
- do {
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* in the profile that not be used, the prot_id is set to 0xff
- * and the off is set to 0x1ff for all the field vectors.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
- fv->ew[j].off != ICE_FV_OFFSET_INVAL)
- flag = true;
- if (flag && prof_index > max_prof_index)
- max_prof_index = prof_index;
-
- prof_index++;
- flag = false;
- } while (fv);
-
- hw->switch_info->max_used_prof_index = max_prof_index;
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_init_pkg - initialize/download package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function initializes a package. The package contains HW tables
- * required to do packet processing. First, the function extracts package
- * information such as version. Then it finds the ice configuration segment
- * within the package; this function then saves a copy of the segment pointer
- * within the supplied package buffer. Next, the function will cache any hints
- * from the package, followed by downloading the package itself. Note, that if
- * a previous PF driver has already downloaded the package successfully, then
- * the current driver will not have to download the package again.
- *
- * The local package contents will be used to query default behavior and to
- * update specific sections of the HW's version of the package (e.g. to update
- * the parse graph to understand new protocols).
- *
- * This function stores a pointer to the package buffer memory, and it is
- * expected that the supplied buffer will not be freed immediately. If the
- * package buffer needs to be freed, such as when read from a file, use
- * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
- * case.
- */
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
-{
- struct ice_pkg_hdr *pkg;
- enum ice_status status;
- struct ice_seg *seg;
-
- if (!buf || !len)
- return ICE_ERR_PARAM;
-
- pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
- return status;
- }
-
- /* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
-
- /* before downloading the package, check package version for
- * compatibility with driver
- */
- status = ice_chk_pkg_compat(hw, pkg, &seg);
- if (status)
- return status;
-
- /* initialize package hints and then download package */
- ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
- ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- status = ICE_SUCCESS;
- }
-
- /* Get information on the package currently loaded in HW, then make sure
- * the driver is compatible with this version.
- */
- if (!status) {
- status = ice_get_pkg_info(hw);
- if (!status)
- status = ice_chk_pkg_version(&hw->active_pkg_ver);
- }
-
- if (!status) {
- hw->seg = seg;
- /* on successful package download update other required
- * registers to support the package and fill HW tables
- * with package content.
- */
- ice_init_pkg_regs(hw);
- ice_fill_blk_tbls(hw);
- ice_get_prof_index_max(hw);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- status);
- }
-
- return status;
-}
-
-/**
- * ice_copy_and_init_pkg - initialize/download a copy of the package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function copies the package buffer, and then calls ice_init_pkg() to
- * initialize the copied package contents.
- *
- * The copying is necessary if the package buffer supplied is constant, or if
- * the memory may disappear shortly after calling this function.
- *
- * If the package buffer resides in the data segment and can be modified, the
- * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
- *
- * However, if the package buffer needs to be copied first, such as when being
- * read from a file, the caller should use ice_copy_and_init_pkg().
- *
- * This function will first copy the package buffer, before calling
- * ice_init_pkg(). The caller is free to immediately destroy the original
- * package buffer, as the new copy will be managed by this function and
- * related routines.
- */
-enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
-{
- enum ice_status status;
- u8 *buf_copy;
-
- if (!buf || !len)
- return ICE_ERR_PARAM;
-
- buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
-
- status = ice_init_pkg(hw, buf_copy, len);
- if (status) {
- /* Free the copy, since we failed to initialize the package */
- ice_free(hw, buf_copy);
- } else {
- /* Track the copied pkg so we can free it later */
- hw->pkg_copy = buf_copy;
- hw->pkg_size = len;
- }
-
- return status;
-}
-
-/**
- * ice_pkg_buf_alloc
- * @hw: pointer to the HW structure
- *
- * Allocates a package buffer and returns a pointer to the buffer header.
- * Note: all package contents must be in Little Endian form.
- */
-static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
-{
- struct ice_buf_build *bld;
- struct ice_buf_hdr *buf;
-
- bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
- if (!bld)
- return NULL;
-
- buf = (struct ice_buf_hdr *)bld;
- buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
- section_entry));
- return bld;
-}
-
-/**
- * ice_get_sw_prof_type - determine switch profile type
- * @hw: pointer to the HW structure
- * @fv: pointer to the switch field vector
- */
-static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
-{
- u16 i;
-
- for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
- /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
- fv->ew[i].off == ICE_VNI_OFFSET)
- return ICE_PROF_TUN_UDP;
-
- /* GRE tunnel will have GRE protocol */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
- return ICE_PROF_TUN_GRE;
- }
-
- return ICE_PROF_NON_TUN;
-}
-
-/**
- * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
- * @hw: pointer to hardware structure
- * @req_profs: type of profiles requested
- * @bm: pointer to memory for returning the bitmap of field vectors
- */
-void
-ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
- ice_bitmap_t *bm)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- if (req_profs == ICE_PROF_ALL) {
- ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
- return;
- }
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
- ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
- ice_seg = hw->seg;
- do {
- enum ice_prof_type prof_type;
- u32 offset;
-
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- ice_seg = NULL;
-
- if (fv) {
- /* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv);
-
- if (req_profs & prof_type)
- ice_set_bit((u16)offset, bm);
- }
- } while (fv);
-}
-
-/**
- * ice_get_sw_fv_list
- * @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
- * @bm: bitmap of field vectors to consider
- * @fv_list: Head of a list
- *
- * Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
- * "ice_sw_fv_list_entry". Every structure in the list has a field vector
- * definition and profile ID information
- * NOTE: The caller of the function is responsible for freeing the memory
- * allocated for every list entry.
- */
-enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
-{
- struct ice_sw_fv_list_entry *fvl;
- struct ice_sw_fv_list_entry *tmp;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
- u32 offset;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ids_cnt || !hw->seg)
- return ICE_ERR_PARAM;
-
- ice_seg = hw->seg;
- do {
- u16 i;
-
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* If field vector is not in the bitmap list, then skip this
- * profile.
- */
- if (!ice_is_bit_set(bm, (u16)offset))
- continue;
-
- for (i = 0; i < ids_cnt; i++) {
- int j;
-
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
- break;
- if (j >= hw->blk[ICE_BLK_SW].es.fvw)
- break;
- if (i + 1 == ids_cnt) {
- fvl = (struct ice_sw_fv_list_entry *)
- ice_malloc(hw, sizeof(*fvl));
- if (!fvl)
- goto err;
- fvl->fv_ptr = fv;
- fvl->profile_id = offset;
- LIST_ADD(&fvl->list_entry, fv_list);
- break;
- }
- }
- } while (fv);
- if (LIST_EMPTY(fv_list))
- return ICE_ERR_CFG;
- return ICE_SUCCESS;
-
-err:
- LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
- list_entry) {
- LIST_DEL(&fvl->list_entry);
- ice_free(hw, fvl);
- }
-
- return ICE_ERR_NO_MEMORY;
-}
-
-/**
- * ice_init_prof_result_bm - Initialize the profile result index bitmap
- * @hw: pointer to hardware structure
- */
-void ice_init_prof_result_bm(struct ice_hw *hw)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!hw->seg)
- return;
-
- ice_seg = hw->seg;
- do {
- u32 off;
- u16 i;
-
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &off, ice_sw_fv_handler);
- ice_seg = NULL;
- if (!fv)
- break;
-
- ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
- ICE_MAX_FV_WORDS);
-
- /* Determine empty field vector indices, these can be
- * used for recipe results. Skip index 0, since it is
- * always used for Switch ID.
- */
- for (i = 1; i < ICE_MAX_FV_WORDS; i++)
- if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
- fv->ew[i].off == ICE_FV_OFFSET_INVAL)
- ice_set_bit(i,
- hw->switch_info->prof_res_bm[off]);
- } while (fv);
-}
-
-/**
- * ice_pkg_buf_free
- * @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Frees a package buffer
- */
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
-{
- ice_free(hw, bld);
-}
-
-/**
- * ice_pkg_buf_reserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to reserve
- *
- * Reserves one or more section table entries in a package buffer. This routine
- * can be called multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-static enum ice_status
-ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
-{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
-
- if (!bld)
- return ICE_ERR_PARAM;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* already an active section, can't increase table size */
- section_count = LE16_TO_CPU(buf->section_count);
- if (section_count > 0)
- return ICE_ERR_CFG;
-
- if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return ICE_ERR_CFG;
- bld->reserved_section_table_entries += count;
-
- data_end = LE16_TO_CPU(buf->data_end) +
- FLEX_ARRAY_SIZE(buf, section_entry, count);
- buf->data_end = CPU_TO_LE16(data_end);
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_pkg_buf_alloc_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- *
- * Reserves memory in the buffer for a section's content and updates the
- * buffers' status accordingly. This routine returns a pointer to the first
- * byte of the section start within the buffer, which is used to fill in the
- * section contents.
- * Note: all package contents must be in Little Endian form.
- */
-static void *
-ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
-{
- struct ice_buf_hdr *buf;
- u16 sect_count;
- u16 data_end;
-
- if (!bld || !type || !size)
- return NULL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* check for enough space left in buffer */
- data_end = LE16_TO_CPU(buf->data_end);
-
- /* section start must align on 4 byte boundary */
- data_end = ICE_ALIGN(data_end, 4);
-
- if ((data_end + size) > ICE_MAX_S_DATA_END)
- return NULL;
-
- /* check for more available section table entries */
- sect_count = LE16_TO_CPU(buf->section_count);
- if (sect_count < bld->reserved_section_table_entries) {
- void *section_ptr = ((u8 *)buf) + data_end;
-
- buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
- buf->section_entry[sect_count].size = CPU_TO_LE16(size);
- buf->section_entry[sect_count].type = CPU_TO_LE32(type);
-
- data_end += size;
- buf->data_end = CPU_TO_LE16(data_end);
-
- buf->section_count = CPU_TO_LE16(sect_count + 1);
- return section_ptr;
- }
-
- /* no free section table entries */
- return NULL;
-}
-
-/**
- * ice_pkg_buf_alloc_single_section
- * @hw: pointer to the HW structure
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- * @section: returns pointer to the section
- *
- * Allocates a package buffer with a single section.
- * Note: all package contents must be in Little Endian form.
- */
-struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section)
-{
- struct ice_buf_build *buf;
-
- if (!section)
- return NULL;
-
- buf = ice_pkg_buf_alloc(hw);
- if (!buf)
- return NULL;
-
- if (ice_pkg_buf_reserve_section(buf, 1))
- goto ice_pkg_buf_alloc_single_section_err;
-
- *section = ice_pkg_buf_alloc_section(buf, type, size);
- if (!*section)
- goto ice_pkg_buf_alloc_single_section_err;
-
- return buf;
-
-ice_pkg_buf_alloc_single_section_err:
- ice_pkg_buf_free(hw, buf);
- return NULL;
-}
-
-/**
- * ice_pkg_buf_unreserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to unreserve
- *
- * Unreserves one or more section table entries in a package buffer, releasing
- * space that can be used for section data. This routine can be called
- * multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-enum ice_status
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
-{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
-
- if (!bld)
- return ICE_ERR_PARAM;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* already an active section, can't decrease table size */
- section_count = LE16_TO_CPU(buf->section_count);
- if (section_count > 0)
- return ICE_ERR_CFG;
-
- if (count > bld->reserved_section_table_entries)
- return ICE_ERR_CFG;
- bld->reserved_section_table_entries -= count;
-
- data_end = LE16_TO_CPU(buf->data_end) -
- FLEX_ARRAY_SIZE(buf, section_entry, count);
- buf->data_end = CPU_TO_LE16(data_end);
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_pkg_buf_get_free_space
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of free bytes remaining in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
-
- if (!bld)
- return 0;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
- return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
-}
-
-/**
- * ice_pkg_buf_get_active_sections
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of active sections. Before using the package buffer
- * in an update package command, the caller should make sure that there is at
- * least one active section - otherwise, the buffer is not legal and should
- * not be used.
- * Note: all package contents must be in Little Endian form.
- */
-static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
-
- if (!bld)
- return 0;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
- return LE16_TO_CPU(buf->section_count);
-}
-
-/**
- * ice_pkg_buf
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Return a pointer to the buffer's header
- */
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
-{
- if (!bld)
- return NULL;
-
- return &bld->buf;
-}
-
-/**
* ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
* @hw: pointer to the HW structure
* @port: port to search for
@@ -3626,6 +1796,134 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
}
/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static
+void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ ice_init_lock(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ ice_init_lock(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+ u16 j;
+
+ if (hw->blk[i].is_list_init)
+ continue;
+
+ ice_init_flow_profs(hw, i);
+ ice_init_lock(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
+ hw->blk[i].is_list_init = true;
+
+ hw->blk[i].overwrite = blk_sizes[i].overwrite;
+ es->reverse = blk_sizes[i].reverse;
+
+ xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+ xlt1->count = blk_sizes[i].xlt1;
+
+ xlt1->ptypes = (struct ice_ptg_ptype *)
+ ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
+
+ if (!xlt1->ptypes)
+ goto err;
+
+ xlt1->ptg_tbl = (struct ice_ptg_entry *)
+ ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
+
+ if (!xlt1->ptg_tbl)
+ goto err;
+
+ xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
+ if (!xlt1->t)
+ goto err;
+
+ xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+ xlt2->count = blk_sizes[i].xlt2;
+
+ xlt2->vsis = (struct ice_vsig_vsi *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
+
+ if (!xlt2->vsis)
+ goto err;
+
+ xlt2->vsig_tbl = (struct ice_vsig_entry *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
+ if (!xlt2->vsig_tbl)
+ goto err;
+
+ for (j = 0; j < xlt2->count; j++)
+ INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
+ xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
+ if (!xlt2->t)
+ goto err;
+
+ prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+ prof->count = blk_sizes[i].prof_tcam;
+ prof->max_prof_id = blk_sizes[i].prof_id;
+ prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+ prof->t = (struct ice_prof_tcam_entry *)
+ ice_calloc(hw, prof->count, sizeof(*prof->t));
+
+ if (!prof->t)
+ goto err;
+
+ prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+ prof_redir->count = blk_sizes[i].prof_redir;
+ prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
+ sizeof(*prof_redir->t));
+
+ if (!prof_redir->t)
+ goto err;
+
+ es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+ es->count = blk_sizes[i].es;
+ es->fvw = blk_sizes[i].fvw;
+ es->t = (struct ice_fv_word *)
+ ice_calloc(hw, (u32)(es->count * es->fvw),
+ sizeof(*es->t));
+ if (!es->t)
+ goto err;
+
+ es->ref_count = (u16 *)
+ ice_calloc(hw, es->count, sizeof(*es->ref_count));
+
+ if (!es->ref_count)
+ goto err;
+
+ es->written = (u8 *)
+ ice_calloc(hw, es->count, sizeof(*es->written));
+
+ if (!es->written)
+ goto err;
+
+ }
+ return ICE_SUCCESS;
+
+err:
+ ice_free_hw_tbls(hw);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
* ice_fill_blk_tbls - Read package context for tables
* @hw: pointer to the hardware structure
*
@@ -3756,17 +2054,6 @@ void ice_free_hw_tbls(struct ice_hw *hw)
}
/**
- * ice_init_flow_profs - init flow profile locks and list heads
- * @hw: pointer to the hardware structure
- * @blk_idx: HW block index
- */
-static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
-{
- ice_init_lock(&hw->fl_profs_locks[blk_idx]);
- INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
-}
-
-/**
* ice_clear_hw_tbls - clear HW tables and flow profiles
* @hw: pointer to the hardware structure
*/
@@ -3788,151 +2075,59 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
ice_free_vsig_tbl(hw, (enum ice_block)i);
- ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
- ICE_NONDMA_MEM);
- ice_memset(xlt1->ptg_tbl, 0,
- ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
- ICE_NONDMA_MEM);
- ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
- ICE_NONDMA_MEM);
-
- ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
- ICE_NONDMA_MEM);
- ice_memset(xlt2->vsig_tbl, 0,
- xlt2->count * sizeof(*xlt2->vsig_tbl),
- ICE_NONDMA_MEM);
- ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
- ICE_NONDMA_MEM);
-
- ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
- ICE_NONDMA_MEM);
- ice_memset(prof_redir->t, 0,
- prof_redir->count * sizeof(*prof_redir->t),
- ICE_NONDMA_MEM);
-
- ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
- ICE_NONDMA_MEM);
- ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
- ICE_NONDMA_MEM);
- ice_memset(es->written, 0, es->count * sizeof(*es->written),
- ICE_NONDMA_MEM);
- }
-}
-
-/**
- * ice_init_hw_tbls - init hardware table memory
- * @hw: pointer to the hardware structure
- */
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
-{
- u8 i;
-
- ice_init_lock(&hw->rss_locks);
- INIT_LIST_HEAD(&hw->rss_list_head);
- for (i = 0; i < ICE_BLK_COUNT; i++) {
- struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
- struct ice_prof_tcam *prof = &hw->blk[i].prof;
- struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
- struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
- struct ice_es *es = &hw->blk[i].es;
- u16 j;
-
- if (hw->blk[i].is_list_init)
- continue;
-
- ice_init_flow_profs(hw, i);
- ice_init_lock(&es->prof_map_lock);
- INIT_LIST_HEAD(&es->prof_map);
- hw->blk[i].is_list_init = true;
-
- hw->blk[i].overwrite = blk_sizes[i].overwrite;
- es->reverse = blk_sizes[i].reverse;
-
- xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
- xlt1->count = blk_sizes[i].xlt1;
-
- xlt1->ptypes = (struct ice_ptg_ptype *)
- ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
-
- if (!xlt1->ptypes)
- goto err;
-
- xlt1->ptg_tbl = (struct ice_ptg_entry *)
- ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
-
- if (!xlt1->ptg_tbl)
- goto err;
-
- xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
- if (!xlt1->t)
- goto err;
-
- xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
- xlt2->count = blk_sizes[i].xlt2;
-
- xlt2->vsis = (struct ice_vsig_vsi *)
- ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
-
- if (!xlt2->vsis)
- goto err;
-
- xlt2->vsig_tbl = (struct ice_vsig_entry *)
- ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
- if (!xlt2->vsig_tbl)
- goto err;
+ if (xlt1->ptypes)
+ ice_memset(xlt1->ptypes, 0,
+ xlt1->count * sizeof(*xlt1->ptypes),
+ ICE_NONDMA_MEM);
- for (j = 0; j < xlt2->count; j++)
- INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
-
- xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
- if (!xlt2->t)
- goto err;
+ if (xlt1->ptg_tbl)
+ ice_memset(xlt1->ptg_tbl, 0,
+ ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
+ ICE_NONDMA_MEM);
- prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
- prof->count = blk_sizes[i].prof_tcam;
- prof->max_prof_id = blk_sizes[i].prof_id;
- prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
- prof->t = (struct ice_prof_tcam_entry *)
- ice_calloc(hw, prof->count, sizeof(*prof->t));
+ if (xlt1->t)
+ ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
+ ICE_NONDMA_MEM);
- if (!prof->t)
- goto err;
+ if (xlt2->vsis)
+ ice_memset(xlt2->vsis, 0,
+ xlt2->count * sizeof(*xlt2->vsis),
+ ICE_NONDMA_MEM);
- prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
- prof_redir->count = blk_sizes[i].prof_redir;
- prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
- sizeof(*prof_redir->t));
+ if (xlt2->vsig_tbl)
+ ice_memset(xlt2->vsig_tbl, 0,
+ xlt2->count * sizeof(*xlt2->vsig_tbl),
+ ICE_NONDMA_MEM);
- if (!prof_redir->t)
- goto err;
+ if (xlt2->t)
+ ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
+ ICE_NONDMA_MEM);
- es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
- es->count = blk_sizes[i].es;
- es->fvw = blk_sizes[i].fvw;
- es->t = (struct ice_fv_word *)
- ice_calloc(hw, (u32)(es->count * es->fvw),
- sizeof(*es->t));
- if (!es->t)
- goto err;
+ if (prof->t)
+ ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
+ ICE_NONDMA_MEM);
- es->ref_count = (u16 *)
- ice_calloc(hw, es->count, sizeof(*es->ref_count));
+ if (prof_redir->t)
+ ice_memset(prof_redir->t, 0,
+ prof_redir->count * sizeof(*prof_redir->t),
+ ICE_NONDMA_MEM);
- if (!es->ref_count)
- goto err;
+ if (es->t)
+ ice_memset(es->t, 0,
+ es->count * sizeof(*es->t) * es->fvw,
+ ICE_NONDMA_MEM);
- es->written = (u8 *)
- ice_calloc(hw, es->count, sizeof(*es->written));
+ if (es->ref_count)
+ ice_memset(es->ref_count, 0,
+ es->count * sizeof(*es->ref_count),
+ ICE_NONDMA_MEM);
- if (!es->written)
- goto err;
+ if (es->written)
+ ice_memset(es->written, 0,
+ es->count * sizeof(*es->written),
+ ICE_NONDMA_MEM);
}
- return ICE_SUCCESS;
-
-err:
- ice_free_hw_tbls(hw);
- return ICE_ERR_NO_MEMORY;
}
/**
@@ -4338,7 +2533,7 @@ error_tmp:
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
- * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
* @es: extraction sequence (length of array is determined by the block)
*
* This function registers a profile, which matches a set of PTGs with a
@@ -4347,15 +2542,14 @@ error_tmp:
* the ID value used here.
*/
enum ice_status
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ ice_bitmap_t *ptypes, struct ice_fv_word *es)
{
- u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
- u8 byte = 0;
u8 prof_id;
+ u16 ptype;
ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
@@ -4387,42 +2581,24 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ ice_for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
-
- /* Examine 8 bits per byte */
- ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
-
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
-
- /* If PTG is already added, skip and continue */
- if (ice_is_bit_set(ptgs_used, ptg))
- continue;
- ice_set_bit(ptg, ptgs_used);
- prof->ptg[prof->ptg_cnt] = ptg;
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
- if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- break;
- }
+ ice_set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
- bytes--;
- byte++;
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
}
LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
@@ -4588,12 +2764,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- enum ice_status status;
/* remove TCAM entries */
LIST_FOR_EACH_ENTRY_SAFE(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list) {
+ enum ice_status status;
+
status = ice_rem_prof_id(hw, blk, d);
if (status)
return status;
@@ -4619,7 +2796,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p->type = ICE_VSIG_REM;
p->orig_vsig = vsig;
p->vsig = ICE_DEFAULT_VSIG;
- p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+ p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis);
LIST_ADD(&p->list_entry, chg);
@@ -4643,12 +2820,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- enum ice_status status;
LIST_FOR_EACH_ENTRY_SAFE(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list)
if (p->profile_cookie == hdl) {
+ enum ice_status status;
+
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
return ice_rem_vsig(hw, blk, vsig, chg);
@@ -5507,10 +3685,11 @@ enum ice_status
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id)
{
- enum ice_status status;
u16 i;
for (i = 0; i < count; i++) {
+ enum ice_status status;
+
status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
if (status)
return status;
@@ -5689,10 +3868,11 @@ enum ice_status
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id)
{
- enum ice_status status;
u16 i;
for (i = 0; i < count; i++) {
+ enum ice_status status;
+
status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
if (status)
return status;
diff --git a/sys/dev/ice/ice_flex_pipe.h b/sys/dev/ice/ice_flex_pipe.h
index 5c1dd7157537..ada71b2d446a 100644
--- a/sys/dev/ice/ice_flex_pipe.h
+++ b/sys/dev/ice/ice_flex_pipe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,20 +35,6 @@
#include "ice_type.h"
-/* Package minimal version supported */
-#define ICE_PKG_SUPP_VER_MAJ 1
-#define ICE_PKG_SUPP_VER_MNR 3
-
-/* Package format version */
-#define ICE_PKG_FMT_VER_MAJ 1
-#define ICE_PKG_FMT_VER_MNR 0
-#define ICE_PKG_FMT_VER_UPD 0
-#define ICE_PKG_FMT_VER_DFT 0
-
-#define ICE_PKG_CNT 4
-
-enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
@@ -61,12 +47,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
-enum ice_status
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
-enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
@@ -89,8 +69,8 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
enum ice_status
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
enum ice_status
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- struct ice_fv_word *es);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ ice_bitmap_t *ptypes, struct ice_fv_word *es);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
enum ice_status
@@ -103,11 +83,7 @@ enum ice_status
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
enum ice_status
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
-enum ice_status
-ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
-void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
@@ -119,10 +95,14 @@ ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
-struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section);
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
+void ice_fill_blk_tbls(struct ice_hw *hw);
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+#define ICE_TNL_PRE "TNL_"
+
+void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/sys/dev/ice/ice_flex_type.h b/sys/dev/ice/ice_flex_type.h
index bd3b6ddeaf7b..145797f34b7a 100644
--- a/sys/dev/ice/ice_flex_type.h
+++ b/sys/dev/ice/ice_flex_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,6 +42,7 @@ struct ice_fv_word {
u16 off; /* Offset within the protocol header */
u8 resvrd;
};
+
#pragma pack()
#define ICE_MAX_NUM_PROFILES 256
@@ -51,251 +52,6 @@ struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
-/* Package and segment headers and tables */
-struct ice_pkg_hdr {
- struct ice_pkg_ver pkg_format_ver;
- __le32 seg_count;
- __le32 seg_offset[STRUCT_HACK_VAR_LEN];
-};
-
-/* generic segment */
-struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE_E810 0x00000010
- __le32 seg_type;
- struct ice_pkg_ver seg_format_ver;
- __le32 seg_size;
- char seg_id[ICE_PKG_NAME_SIZE];
-};
-
-/* ice specific segment */
-
-union ice_device_id {
- struct {
- __le16 device_id;
- __le16 vendor_id;
- } dev_vend_id;
- __le32 id;
-};
-
-struct ice_device_id_entry {
- union ice_device_id device;
- union ice_device_id sub_device;
-};
-
-struct ice_seg {
- struct ice_generic_seg_hdr hdr;
- __le32 device_table_count;
- struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
-};
-
-struct ice_nvm_table {
- __le32 table_count;
- __le32 vers[STRUCT_HACK_VAR_LEN];
-};
-
-struct ice_buf {
-#define ICE_PKG_BUF_SIZE 4096
- u8 buf[ICE_PKG_BUF_SIZE];
-};
-
-struct ice_buf_table {
- __le32 buf_count;
- struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
-};
-
-/* global metadata specific segment */
-struct ice_global_metadata_seg {
- struct ice_generic_seg_hdr hdr;
- struct ice_pkg_ver pkg_ver;
- __le32 rsvd;
- char pkg_name[ICE_PKG_NAME_SIZE];
-};
-
-#define ICE_MIN_S_OFF 12
-#define ICE_MAX_S_OFF 4095
-#define ICE_MIN_S_SZ 1
-#define ICE_MAX_S_SZ 4084
-
-/* section information */
-struct ice_section_entry {
- __le32 type;
- __le16 offset;
- __le16 size;
-};
-
-#define ICE_MIN_S_COUNT 1
-#define ICE_MAX_S_COUNT 511
-#define ICE_MIN_S_DATA_END 12
-#define ICE_MAX_S_DATA_END 4096
-
-#define ICE_METADATA_BUF 0x80000000
-
-struct ice_buf_hdr {
- __le16 section_count;
- __le16 data_end;
- struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
-};
-
-#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
- (ent_sz))
-
-/* ice package section IDs */
-#define ICE_SID_METADATA 1
-#define ICE_SID_XLT0_SW 10
-#define ICE_SID_XLT_KEY_BUILDER_SW 11
-#define ICE_SID_XLT1_SW 12
-#define ICE_SID_XLT2_SW 13
-#define ICE_SID_PROFID_TCAM_SW 14
-#define ICE_SID_PROFID_REDIR_SW 15
-#define ICE_SID_FLD_VEC_SW 16
-#define ICE_SID_CDID_KEY_BUILDER_SW 17
-#define ICE_SID_CDID_REDIR_SW 18
-
-#define ICE_SID_XLT0_ACL 20
-#define ICE_SID_XLT_KEY_BUILDER_ACL 21
-#define ICE_SID_XLT1_ACL 22
-#define ICE_SID_XLT2_ACL 23
-#define ICE_SID_PROFID_TCAM_ACL 24
-#define ICE_SID_PROFID_REDIR_ACL 25
-#define ICE_SID_FLD_VEC_ACL 26
-#define ICE_SID_CDID_KEY_BUILDER_ACL 27
-#define ICE_SID_CDID_REDIR_ACL 28
-
-#define ICE_SID_XLT0_FD 30
-#define ICE_SID_XLT_KEY_BUILDER_FD 31
-#define ICE_SID_XLT1_FD 32
-#define ICE_SID_XLT2_FD 33
-#define ICE_SID_PROFID_TCAM_FD 34
-#define ICE_SID_PROFID_REDIR_FD 35
-#define ICE_SID_FLD_VEC_FD 36
-#define ICE_SID_CDID_KEY_BUILDER_FD 37
-#define ICE_SID_CDID_REDIR_FD 38
-
-#define ICE_SID_XLT0_RSS 40
-#define ICE_SID_XLT_KEY_BUILDER_RSS 41
-#define ICE_SID_XLT1_RSS 42
-#define ICE_SID_XLT2_RSS 43
-#define ICE_SID_PROFID_TCAM_RSS 44
-#define ICE_SID_PROFID_REDIR_RSS 45
-#define ICE_SID_FLD_VEC_RSS 46
-#define ICE_SID_CDID_KEY_BUILDER_RSS 47
-#define ICE_SID_CDID_REDIR_RSS 48
-
-#define ICE_SID_RXPARSER_CAM 50
-#define ICE_SID_RXPARSER_NOMATCH_CAM 51
-#define ICE_SID_RXPARSER_IMEM 52
-#define ICE_SID_RXPARSER_XLT0_BUILDER 53
-#define ICE_SID_RXPARSER_NODE_PTYPE 54
-#define ICE_SID_RXPARSER_MARKER_PTYPE 55
-#define ICE_SID_RXPARSER_BOOST_TCAM 56
-#define ICE_SID_RXPARSER_PROTO_GRP 57
-#define ICE_SID_RXPARSER_METADATA_INIT 58
-#define ICE_SID_RXPARSER_XLT0 59
-
-#define ICE_SID_TXPARSER_CAM 60
-#define ICE_SID_TXPARSER_NOMATCH_CAM 61
-#define ICE_SID_TXPARSER_IMEM 62
-#define ICE_SID_TXPARSER_XLT0_BUILDER 63
-#define ICE_SID_TXPARSER_NODE_PTYPE 64
-#define ICE_SID_TXPARSER_MARKER_PTYPE 65
-#define ICE_SID_TXPARSER_BOOST_TCAM 66
-#define ICE_SID_TXPARSER_PROTO_GRP 67
-#define ICE_SID_TXPARSER_METADATA_INIT 68
-#define ICE_SID_TXPARSER_XLT0 69
-
-#define ICE_SID_RXPARSER_INIT_REDIR 70
-#define ICE_SID_TXPARSER_INIT_REDIR 71
-#define ICE_SID_RXPARSER_MARKER_GRP 72
-#define ICE_SID_TXPARSER_MARKER_GRP 73
-#define ICE_SID_RXPARSER_LAST_PROTO 74
-#define ICE_SID_TXPARSER_LAST_PROTO 75
-#define ICE_SID_RXPARSER_PG_SPILL 76
-#define ICE_SID_TXPARSER_PG_SPILL 77
-#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
-#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
-
-#define ICE_SID_XLT0_PE 80
-#define ICE_SID_XLT_KEY_BUILDER_PE 81
-#define ICE_SID_XLT1_PE 82
-#define ICE_SID_XLT2_PE 83
-#define ICE_SID_PROFID_TCAM_PE 84
-#define ICE_SID_PROFID_REDIR_PE 85
-#define ICE_SID_FLD_VEC_PE 86
-#define ICE_SID_CDID_KEY_BUILDER_PE 87
-#define ICE_SID_CDID_REDIR_PE 88
-
-#define ICE_SID_RXPARSER_FLAG_REDIR 97
-
-/* Label Metadata section IDs */
-#define ICE_SID_LBL_FIRST 0x80000010
-#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
-#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
-#define ICE_SID_LBL_RESERVED_12 0x80000012
-#define ICE_SID_LBL_RESERVED_13 0x80000013
-#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
-#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
-#define ICE_SID_LBL_PTYPE 0x80000016
-#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
-#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
-#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
-#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
-#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
-#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
-#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
-#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
-#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
-#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
-#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
-#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
-#define ICE_SID_LBL_FLAG 0x80000023
-#define ICE_SID_LBL_REG 0x80000024
-#define ICE_SID_LBL_SW_PTG 0x80000025
-#define ICE_SID_LBL_ACL_PTG 0x80000026
-#define ICE_SID_LBL_PE_PTG 0x80000027
-#define ICE_SID_LBL_RSS_PTG 0x80000028
-#define ICE_SID_LBL_FD_PTG 0x80000029
-#define ICE_SID_LBL_SW_VSIG 0x8000002A
-#define ICE_SID_LBL_ACL_VSIG 0x8000002B
-#define ICE_SID_LBL_PE_VSIG 0x8000002C
-#define ICE_SID_LBL_RSS_VSIG 0x8000002D
-#define ICE_SID_LBL_FD_VSIG 0x8000002E
-#define ICE_SID_LBL_PTYPE_META 0x8000002F
-#define ICE_SID_LBL_SW_PROFID 0x80000030
-#define ICE_SID_LBL_ACL_PROFID 0x80000031
-#define ICE_SID_LBL_PE_PROFID 0x80000032
-#define ICE_SID_LBL_RSS_PROFID 0x80000033
-#define ICE_SID_LBL_FD_PROFID 0x80000034
-#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
-#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
-#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
-#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
-/* The following define MUST be updated to reflect the last label section ID */
-#define ICE_SID_LBL_LAST 0x80000038
-
-enum ice_block {
- ICE_BLK_SW = 0,
- ICE_BLK_ACL,
- ICE_BLK_FD,
- ICE_BLK_RSS,
- ICE_BLK_PE,
- ICE_BLK_COUNT
-};
-
-enum ice_sect {
- ICE_XLT0 = 0,
- ICE_XLT_KB,
- ICE_XLT1,
- ICE_XLT2,
- ICE_PROF_TCAM,
- ICE_PROF_REDIR,
- ICE_VEC_TBL,
- ICE_CDID_KB,
- ICE_CDID_REDIR,
- ICE_SECT_COUNT
-};
-
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
#define ICE_PTYPE_IPV4FRAG_PAY 22
@@ -401,10 +157,18 @@ struct ice_sw_fv_list_entry {
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
-#define ICE_BOOST_REMAINING_HV_KEY 15
+#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
- __le16 hv_dst_port_key;
- __le16 hv_src_port_key;
+ union {
+ struct {
+ __le16 hv_dst_port_key;
+ __le16 hv_src_port_key;
+ } /* udp_tunnel */;
+ struct {
+ __le16 hv_vlan_id_key;
+ __le16 hv_etype_key;
+ } vlan;
+ };
u8 tcam_search_key;
};
#pragma pack()
@@ -457,33 +221,15 @@ struct ice_prof_redir_section {
u8 redir_value[STRUCT_HACK_VAR_LEN];
};
-/* package buffer building */
-
-struct ice_buf_build {
- struct ice_buf buf;
- u16 reserved_section_table_entries;
-};
-
-struct ice_pkg_enum {
- struct ice_buf_table *buf_table;
- u32 buf_idx;
-
- u32 type;
- struct ice_buf_hdr *buf;
- u32 sect_idx;
- void *sect;
- u32 sect_type;
-
- u32 entry_idx;
- void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
-};
-
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ TNL_GRETAP,
TNL_GTP,
+ TNL_GTPC,
+ TNL_GTPU,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@@ -726,10 +472,13 @@ struct ice_chs_chg {
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
enum ice_prof_type {
+ ICE_PROF_INVALID = 0x0,
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
- ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_TUN_GTPU = 0x8,
+ ICE_PROF_TUN_GTPC = 0x10,
+ ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF,
};
diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c
index 387882bfe903..73abf03d43b8 100644
--- a/sys/dev/ice/ice_flow.c
+++ b/sys/dev/ice/ice_flow.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -400,6 +400,7 @@ struct ice_flow_prof_params {
* This will give us the direction flags.
*/
struct ice_fv_word es[ICE_MAX_FV_WORDS];
+
ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
};
@@ -566,8 +567,8 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld)
{
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
+ u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
- u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
u16 off;
@@ -593,7 +594,6 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_IPV4_TTL:
case ICE_FLOW_FIELD_IDX_IPV4_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
-
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
@@ -606,7 +606,6 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_IPV6_TTL:
case ICE_FLOW_FIELD_IDX_IPV6_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
-
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
@@ -666,7 +665,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
- flds[fld].xtrct.prot_id = prot_id;
+ flds[fld].xtrct.prot_id = (u8)prot_id;
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
ICE_FLOW_FV_EXTRACT_SZ;
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
@@ -702,7 +701,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
else
idx = params->es_cnt;
- params->es[idx].prot_id = prot_id;
+ params->es[idx].prot_id = (u8)prot_id;
params->es[idx].off = off;
params->es_cnt++;
}
@@ -952,8 +951,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
- params->es);
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes, params->es);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1286,13 +1284,13 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
{
struct ice_flow_seg_info *seg;
u64 val;
- u8 i;
+ u16 i;
/* set inner most segment */
seg = &segs[seg_cnt - 1];
ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
- ICE_FLOW_FIELD_IDX_MAX)
+ (u16)ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
diff --git a/sys/dev/ice/ice_flow.h b/sys/dev/ice/ice_flow.h
index a66c773b8d77..07e16e3bc4d1 100644
--- a/sys/dev/ice/ice_flow.h
+++ b/sys/dev/ice/ice_flow.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c
index fb97df5f5797..e49a82c88982 100644
--- a/sys/dev/ice/ice_fw_logging.c
+++ b/sys/dev/ice/ice_fw_logging.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_fwlog.c b/sys/dev/ice/ice_fwlog.c
index 375b80647e1a..9c04b4ca0411 100644
--- a/sys/dev/ice/ice_fwlog.c
+++ b/sys/dev/ice/ice_fwlog.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_fwlog.h b/sys/dev/ice/ice_fwlog.h
index c8906d56a75a..4b8cc0938db5 100644
--- a/sys/dev/ice/ice_fwlog.h
+++ b/sys/dev/ice/ice_fwlog.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -72,7 +72,7 @@ struct ice_fwlog_cfg {
/* options used to configure firmware logging */
u16 options;
/* minimum number of log events sent per Admin Receive Queue event */
- u8 log_resolution;
+ u16 log_resolution;
};
void ice_fwlog_set_support_ena(struct ice_hw *hw);
diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h
index a1e9024ba34e..691c36d81078 100644
--- a/sys/dev/ice/ice_hw_autogen.h
+++ b/sys/dev/ice/ice_hw_autogen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h
index 07654afe4539..9e073b332dd0 100644
--- a/sys/dev/ice/ice_iflib.h
+++ b/sys/dev/ice/ice_iflib.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -285,10 +285,16 @@ struct ice_softc {
/* Ethertype filters enabled */
bool enable_tx_fc_filter;
bool enable_tx_lldp_filter;
-
+
/* Other tunable flags */
bool enable_health_events;
+ /* 5-layer scheduler topology enabled */
+ bool tx_balance_en;
+
+ /* Allow additional non-standard FEC mode */
+ bool allow_no_fec_mod_in_auto;
+
int rebuild_ticks;
/* driver state flags, only access using atomic functions */
@@ -297,6 +303,8 @@ struct ice_softc {
/* NVM link override settings */
struct ice_link_default_override_tlv ldo_tlv;
+ u16 fw_debug_dump_cluster_mask;
+
struct sx *iflib_ctx_lock;
/* Tri-state feature flags (capable/enabled) */
diff --git a/sys/dev/ice/ice_iflib_recovery_txrx.c b/sys/dev/ice/ice_iflib_recovery_txrx.c
index 91f52330684d..d83f12a94843 100644
--- a/sys/dev/ice/ice_iflib_recovery_txrx.c
+++ b/sys/dev/ice/ice_iflib_recovery_txrx.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_iflib_sysctls.h b/sys/dev/ice/ice_iflib_sysctls.h
index fc88f8e58821..7dfdbc9d5f9b 100644
--- a/sys/dev/ice/ice_iflib_sysctls.h
+++ b/sys/dev/ice/ice_iflib_sysctls.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_iflib_txrx.c b/sys/dev/ice/ice_iflib_txrx.c
index 52c4364a2430..f2ae62c77f7c 100644
--- a/sys/dev/ice/ice_iflib_txrx.c
+++ b/sys/dev/ice/ice_iflib_txrx.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,7 @@ static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
-static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m);
+static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
* advanced 32byte Rx descriptors.
@@ -79,7 +79,7 @@ struct if_txrx ice_txrx = {
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
.ift_rxd_refill = ice_ift_rxd_refill,
.ift_rxd_flush = ice_ift_rxd_flush,
- .ift_txq_select = ice_ift_queue_select,
+ .ift_txq_select_v2 = ice_ift_queue_select,
};
/**
@@ -284,7 +284,6 @@ static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
- if_softc_ctx_t scctx = sc->scctx;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
union ice_32b_rx_flex_desc *cur;
u16 status0, plen, ptype;
@@ -342,7 +341,7 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Get packet type and set checksum flags */
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
- if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
+ if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
@@ -408,9 +407,10 @@ ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
}
static qidx_t
-ice_ift_queue_select(void *arg, struct mbuf *m)
+ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_vsi *vsi = &sc->pf_vsi;
u16 tc_base_queue, tc_qcount;
u8 up, tc;
@@ -431,12 +431,21 @@ ice_ift_queue_select(void *arg, struct mbuf *m)
return (0);
}
- /* Use default TC unless overridden */
+ /* Use default TC unless overridden later */
tc = 0; /* XXX: Get default TC for traffic if >1 TC? */
- if (m->m_flags & M_VLANTAG) {
+ local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg;
+
+#if defined(INET) || defined(INET6)
+ if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
+ (pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) {
+ u8 dscp_val = pi->ipi_ip_tos >> 2;
+ tc = local_dcbx_cfg->dscp_map[dscp_val];
+ } else
+#endif /* defined(INET) || defined(INET6) */
+ if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */
up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag);
- tc = sc->hw.port_info->qos_cfg.local_dcbx_cfg.etscfg.prio_table[up];
+ tc = local_dcbx_cfg->etscfg.prio_table[up];
}
tc_base_queue = vsi->tc_info[tc].qoffset;
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 5cc611fef25f..b489388c68a8 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -948,10 +948,10 @@ struct ice_tx_ctx_desc {
__le64 qw1;
};
-#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */
-#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */
-#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */
-#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */
+#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */
+#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */
+#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */
+#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */
#define ICE_TXD_CTX_QW1_DTYPE_S 0
#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index f562b3b55b63..0c047b574bf2 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -161,20 +161,29 @@ static void
ice_debug_print_mib_change_event(struct ice_softc *sc,
struct ice_rq_event_info *event);
static bool ice_check_ets_bw(u8 *table);
+static u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
static bool
ice_dcb_needs_reconfig(struct ice_softc *sc, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg);
static void ice_dcb_recfg(struct ice_softc *sc);
-static u8 ice_dcb_num_tc(u8 tc_map);
+static u8 ice_dcb_tc_contig(u8 tc_map);
static int ice_ets_str_to_tbl(const char *str, u8 *table, u8 limit);
static int ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map);
static void ice_sbuf_print_ets_cfg(struct sbuf *sbuf, const char *name,
struct ice_dcb_ets_cfg *ets);
static void ice_stop_pf_vsi(struct ice_softc *sc);
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt);
-static void ice_do_dcb_reconfig(struct ice_softc *sc);
+static void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
static int ice_config_pfc(struct ice_softc *sc, u8 new_mode);
-static u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
+void
+ice_add_dscp2tc_map_sysctls(struct ice_softc *sc,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *ctx_list);
+static void ice_set_default_local_mib_settings(struct ice_softc *sc);
+static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg);
+static void ice_start_dcbx_agent(struct ice_softc *sc);
+static void ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
+ struct sbuf *sbuf, u16 cluster_id);
static int ice_module_init(void);
static int ice_module_exit(void);
@@ -228,6 +237,11 @@ static int ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS);
/**
* ice_map_bar - Map PCIe BAR memory
@@ -567,7 +581,6 @@ ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
MPASS(vsi->rx_qmap != NULL);
/* TODO:
- * Handle multiple Traffic Classes
* Handle scattered queues (for VFs)
*/
if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS)
@@ -578,7 +591,6 @@ ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]);
ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues);
-
/* Calculate the next power-of-2 of number of queues */
if (vsi->num_rx_queues)
pow = flsl(vsi->num_rx_queues - 1);
@@ -587,6 +599,17 @@ ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M;
ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap);
+ /* Fill out default driver TC queue info for VSI */
+ vsi->tc_info[0].qoffset = 0;
+ vsi->tc_info[0].qcount_rx = vsi->num_rx_queues;
+ vsi->tc_info[0].qcount_tx = vsi->num_tx_queues;
+ for (int i = 1; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ vsi->tc_info[i].qoffset = 0;
+ vsi->tc_info[i].qcount_rx = 1;
+ vsi->tc_info[i].qcount_tx = 1;
+ }
+ vsi->tc_map = 0x1;
+
return 0;
}
@@ -1748,7 +1771,7 @@ ice_free_fltr_list(struct ice_list_head *list)
* Add a MAC address filter for a given VSI. This is a wrapper around
* ice_add_mac to simplify the interface. First, it only accepts a single
* address, so we don't have to mess around with the list setup in other
- * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that
+ * functions. Second, it ignores the ICE_ERR_ALREADY_EXISTS error, so that
* callers don't need to worry about attempting to add the same filter twice.
*/
int
@@ -1955,8 +1978,8 @@ ice_process_link_event(struct ice_softc *sc,
device_t dev = sc->dev;
enum ice_status status;
- /* Sanity check that the data length matches */
- MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data));
+ /* Sanity check that the data length isn't too small */
+ MPASS(le16toh(e->desc.datalen) >= ICE_GET_LINK_STATUS_DATALEN_V1);
/*
* Even though the adapter gets link status information inside the
@@ -3085,7 +3108,10 @@ ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS)
if (strcmp(req_fec, "auto") == 0 ||
strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) {
- new_mode = ICE_FEC_AUTO;
+ if (sc->allow_no_fec_mod_in_auto)
+ new_mode = ICE_FEC_DIS_AUTO;
+ else
+ new_mode = ICE_FEC_AUTO;
} else if (strcmp(req_fec, "fc") == 0 ||
strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) {
new_mode = ICE_FEC_BASER;
@@ -3641,6 +3667,23 @@ ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS)
return (0);
}
+/**
+ * ice_dscp_is_mapped - Check for non-zero DSCP to TC mappings
+ * @dcbcfg: Configuration struct to check for mappings in
+ *
+ * @return true if there exists a non-zero DSCP to TC mapping
+ * inside the input DCB configuration struct.
+ */
+static bool
+ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg)
+{
+ for (int i = 0; i < ICE_DSCP_NUM_VAL; i++)
+ if (dcbcfg->dscp_map[i] != 0)
+ return (true);
+
+ return (false);
+}
+
#define ICE_SYSCTL_HELP_FW_LLDP_AGENT \
"\nDisplay or change FW LLDP agent state:" \
"\n\t0 - disabled" \
@@ -3660,6 +3703,7 @@ static int
ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_status status;
@@ -3706,6 +3750,15 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
if (old_state != 0 && fw_lldp_enabled == true)
return (0);
+ /* Block transition to FW LLDP if DSCP mode is enabled */
+ local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg;
+ if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
+ ice_dscp_is_mapped(local_dcbx_cfg)) {
+ device_printf(dev,
+ "Cannot enable FW-LLDP agent while DSCP QoS is active.\n");
+ return (EOPNOTSUPP);
+ }
+
if (fw_lldp_enabled == false) {
status = ice_aq_stop_lldp(hw, true, true, NULL);
/* EPERM is returned if the LLDP agent is already shutdown */
@@ -3744,6 +3797,7 @@ retry_start_lldp:
return (EIO);
}
}
+ ice_start_dcbx_agent(sc);
hw->port_info->qos_cfg.is_sw_lldp = false;
}
@@ -3855,7 +3909,7 @@ ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS)
return (EIO);
}
- ice_do_dcb_reconfig(sc);
+ ice_do_dcb_reconfig(sc, false);
return (0);
}
@@ -3937,9 +3991,11 @@ ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS)
return (ret);
}
- /* Prepare updated ETS TLV */
+ /* Prepare updated ETS CFG/REC TLVs */
memcpy(local_dcbx_cfg->etscfg.prio_table, new_up2tc,
sizeof(new_up2tc));
+ memcpy(local_dcbx_cfg->etsrec.prio_table, new_up2tc,
+ sizeof(new_up2tc));
status = ice_set_dcb_cfg(pi);
if (status) {
@@ -3950,7 +4006,7 @@ ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS)
return (EIO);
}
- ice_do_dcb_reconfig(sc);
+ ice_do_dcb_reconfig(sc, false);
return (0);
}
@@ -3998,7 +4054,7 @@ ice_config_pfc(struct ice_softc *sc, u8 new_mode)
return (EIO);
}
- ice_do_dcb_reconfig(sc);
+ ice_do_dcb_reconfig(sc, false);
return (0);
}
@@ -4070,6 +4126,97 @@ ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS)
return ice_config_pfc(sc, user_pfc);
}
+#define ICE_SYSCTL_HELP_PFC_MODE \
+"\nDisplay and set the current QoS mode for the firmware" \
+"\n\t0: VLAN UP mode" \
+"\n\t1: DSCP mode"
+
+/**
+ * ice_sysctl_pfc_mode
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Gets and sets whether the port is in DSCP or VLAN PCP-based
+ * PFC mode. This is also used to set whether DSCP or VLAN PCP
+ * -based settings are configured for DCB.
+ */
+static int
+ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_dcbx_cfg *local_dcbx_cfg;
+ struct ice_port_info *pi;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u8 user_pfc_mode, aq_pfc_mode;
+ int ret;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, sizeof(u8));
+ return (ret);
+ }
+
+ pi = hw->port_info;
+ local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+
+ user_pfc_mode = local_dcbx_cfg->pfc_mode;
+
+ /* Read in the new mode */
+ ret = sysctl_handle_8(oidp, &user_pfc_mode, 0, req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ /* Don't allow setting changes in FW DCB mode */
+ if (!hw->port_info->qos_cfg.is_sw_lldp)
+ return (EPERM);
+
+ /* Currently, there are only two modes */
+ switch (user_pfc_mode) {
+ case 0:
+ aq_pfc_mode = ICE_AQC_PFC_VLAN_BASED_PFC;
+ break;
+ case 1:
+ aq_pfc_mode = ICE_AQC_PFC_DSCP_BASED_PFC;
+ break;
+ default:
+ device_printf(dev,
+ "%s: Valid input range is 0-1 (input %d)\n",
+ __func__, user_pfc_mode);
+ return (EINVAL);
+ }
+
+ status = ice_aq_set_pfc_mode(hw, aq_pfc_mode, NULL);
+ if (status == ICE_ERR_NOT_SUPPORTED) {
+ device_printf(dev,
+ "%s: Failed to set PFC mode; DCB not supported\n",
+ __func__);
+ return (ENODEV);
+ }
+ if (status) {
+ device_printf(dev,
+ "%s: Failed to set PFC mode; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ /* Reset settings to default when mode is changed */
+ ice_set_default_local_mib_settings(sc);
+ /* Cache current settings and reconfigure */
+ local_dcbx_cfg->pfc_mode = user_pfc_mode;
+ ice_do_dcb_reconfig(sc, false);
+
+ return (0);
+}
+
/**
* ice_add_device_sysctls - add device specific dynamic sysctls
* @sc: device private structure
@@ -4141,6 +4288,18 @@ ice_add_device_sysctls(struct ice_softc *sc)
OID_AUTO, "pfc", CTLTYPE_U8 | CTLFLAG_RW,
sc, 0, ice_sysctl_pfc_config, "CU", ICE_SYSCTL_HELP_PFC_CONFIG);
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "pfc_mode", CTLTYPE_U8 | CTLFLAG_RWTUN,
+ sc, 0, ice_sysctl_pfc_mode, "CU", ICE_SYSCTL_HELP_PFC_MODE);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "allow_no_fec_modules_in_auto",
+ CTLTYPE_U8 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
+ sc, 0, ice_sysctl_allow_no_fec_mod_in_auto, "CU",
+ "Allow \"No FEC\" mode in FEC auto-negotiation");
+
+ ice_add_dscp2tc_map_sysctls(sc, ctx, ctx_list);
+
/* Differentiate software and hardware statistics, by keeping hw stats
* in their own node. This isn't in ice_add_device_tunables, because
* we won't have any CTLFLAG_TUN sysctls under this node.
@@ -5207,6 +5366,55 @@ ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi)
}
/**
+ * ice_add_dscp2tc_map_sysctls - Add sysctl tree for DSCP to TC mapping
+ * @sc: pointer to device private softc
+ * @ctx: the sysctl ctx to use
+ * @ctx_list: list of sysctl children for device (to add sysctl tree to)
+ *
+ * Add a sysctl tree for individual dscp2tc_map sysctls. Each child of this
+ * node can map 8 DSCPs to TC values; there are 8 of these in turn for a total
+ * of 64 DSCP to TC map values that the user can configure.
+ */
+void
+ice_add_dscp2tc_map_sysctls(struct ice_softc *sc,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *ctx_list)
+{
+ struct sysctl_oid_list *node_list;
+ struct sysctl_oid *node;
+ struct sbuf *namebuf, *descbuf;
+ int first_dscp_val, last_dscp_val;
+
+ node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "dscp2tc_map", CTLFLAG_RD,
+ NULL, "Map of DSCP values to DCB TCs");
+ node_list = SYSCTL_CHILDREN(node);
+
+ namebuf = sbuf_new_auto();
+ descbuf = sbuf_new_auto();
+ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ sbuf_clear(namebuf);
+ sbuf_clear(descbuf);
+
+ first_dscp_val = i * 8;
+ last_dscp_val = first_dscp_val + 7;
+
+ sbuf_printf(namebuf, "%d-%d", first_dscp_val, last_dscp_val);
+ sbuf_printf(descbuf, "Map DSCP values %d to %d to TCs",
+ first_dscp_val, last_dscp_val);
+
+ sbuf_finish(namebuf);
+ sbuf_finish(descbuf);
+
+ SYSCTL_ADD_PROC(ctx, node_list,
+ OID_AUTO, sbuf_data(namebuf), CTLTYPE_STRING | CTLFLAG_RW,
+ sc, i, ice_sysctl_dscp2tc_map, "A", sbuf_data(descbuf));
+ }
+
+ sbuf_delete(namebuf);
+ sbuf_delete(descbuf);
+}
+
+/**
* ice_add_device_tunables - Add early tunable sysctls and sysctl nodes
* @sc: device private structure
*
@@ -5584,6 +5792,39 @@ ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS)
return (0);
}
+#define ICE_SYSCTL_DEBUG_MASK_HELP \
+"\nSelect debug statements to print to kernel messages" \
+"\nFlags:" \
+"\n\t 0x1 - Function Tracing" \
+"\n\t 0x2 - Driver Initialization" \
+"\n\t 0x4 - Release" \
+"\n\t 0x8 - FW Logging" \
+"\n\t 0x10 - Link" \
+"\n\t 0x20 - PHY" \
+"\n\t 0x40 - Queue Context" \
+"\n\t 0x80 - NVM" \
+"\n\t 0x100 - LAN" \
+"\n\t 0x200 - Flow" \
+"\n\t 0x400 - DCB" \
+"\n\t 0x800 - Diagnostics" \
+"\n\t 0x1000 - Flow Director" \
+"\n\t 0x2000 - Switch" \
+"\n\t 0x4000 - Scheduler" \
+"\n\t 0x8000 - RDMA" \
+"\n\t 0x10000 - DDP Package" \
+"\n\t 0x20000 - Resources" \
+"\n\t 0x40000 - ACL" \
+"\n\t 0x80000 - PTP" \
+"\n\t 0x100000 - Admin Queue messages" \
+"\n\t 0x200000 - Admin Queue descriptors" \
+"\n\t 0x400000 - Admin Queue descriptor buffers" \
+"\n\t 0x800000 - Admin Queue commands" \
+"\n\t 0x1000000 - Parser" \
+"\n\t ..." \
+"\n\t 0x8000000 - (Reserved for user)" \
+"\n\t" \
+"\nUse \"sysctl -x\" to view flags properly."
+
/**
* ice_add_debug_tunables - Add tunables helpful for debugging the device driver
* @sc: device private structure
@@ -5613,7 +5854,7 @@ ice_add_debug_tunables(struct ice_softc *sc)
SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask",
ICE_CTLFLAG_DEBUG | CTLFLAG_RW | CTLFLAG_TUN,
&sc->hw.debug_mask, 0,
- "Debug message enable/disable mask");
+ ICE_SYSCTL_DEBUG_MASK_HELP);
/* Load the default value from the global sysctl first */
sc->enable_tx_fc_filter = ice_enable_tx_fc_filter;
@@ -5623,6 +5864,12 @@ ice_add_debug_tunables(struct ice_softc *sc)
&sc->enable_tx_fc_filter, 0,
"Drop Ethertype 0x8808 control frames originating from software on this PF");
+ sc->tx_balance_en = ice_tx_balance_en;
+ SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "tx_balance",
+ ICE_CTLFLAG_DEBUG | CTLFLAG_RWTUN,
+ &sc->tx_balance_en, 0,
+ "Enable 5-layer scheduler topology");
+
/* Load the default value from the global sysctl first */
sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter;
@@ -5768,6 +6015,300 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
return (0);
}
+#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \
+"\nSelect clusters to dump with \"dump\" sysctl" \
+"\nFlags:" \
+"\n\t 0x1 - Switch" \
+"\n\t 0x2 - ACL" \
+"\n\t 0x4 - Tx Scheduler" \
+"\n\t 0x8 - Profile Configuration" \
+"\n\t 0x20 - Link" \
+"\n\t 0x80 - DCB" \
+"\n\t 0x100 - L2P" \
+"\n\t" \
+"\nUse \"sysctl -x\" to view flags properly."
+
+/**
+ * ice_sysctl_fw_debug_dump_cluster_setting - Set which clusters to dump
+ * from FW when FW debug dump occurs
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ */
+static int
+ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ device_t dev = sc->dev;
+ u16 clusters;
+ int ret;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ ret = priv_check(curthread, PRIV_DRIVER);
+ if (ret)
+ return (ret);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ clusters = sc->fw_debug_dump_cluster_mask;
+
+ ret = sysctl_handle_16(oidp, &clusters, 0, req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ if (!clusters ||
+ (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK))) {
+ device_printf(dev,
+ "%s: ERROR: Incorrect settings requested\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ sc->fw_debug_dump_cluster_mask = clusters;
+
+ return (0);
+}
+
+#define ICE_FW_DUMP_AQ_COUNT_LIMIT (10000)
+
+/**
+ * ice_fw_debug_dump_print_cluster - Print formatted cluster data from FW
+ * @sc: the device softc
+ * @sbuf: initialized sbuf to print data to
+ * @cluster_id: FW cluster ID to print data from
+ *
+ * Reads debug data from the specified cluster id in the FW and prints it to
+ * the input sbuf. This function issues multiple AQ commands to the FW in
+ * order to get all of the data in the cluster.
+ *
+ * @remark Only intended to be used by the sysctl handler
+ * ice_sysctl_fw_debug_dump_do_dump
+ */
+static void
+ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 cluster_id)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u16 data_buf_size = ICE_AQ_MAX_BUF_LEN;
+ const u8 reserved_buf[8] = {};
+ enum ice_status status;
+ int counter = 0;
+ u8 *data_buf;
+
+ /* Other setup */
+ data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO);
+ if (!data_buf)
+ return;
+
+ /* Input parameters / loop variables */
+ u16 table_id = 0;
+ u32 offset = 0;
+
+ /* Output from the Get Internal Data AQ command */
+ u16 ret_buf_size = 0;
+ u16 ret_next_table = 0;
+ u32 ret_next_index = 0;
+
+ ice_debug(hw, ICE_DBG_DIAG, "%s: dumping cluster id %d\n", __func__,
+ cluster_id);
+
+ for (;;) {
+ /* Do not trust the FW behavior to be completely correct */
+ if (counter++ >= ICE_FW_DUMP_AQ_COUNT_LIMIT) {
+ device_printf(dev,
+ "%s: Exceeded counter limit for cluster %d\n",
+ __func__, cluster_id);
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_DIAG, "---\n");
+ ice_debug(hw, ICE_DBG_DIAG,
+ "table_id 0x%04x offset 0x%08x buf_size %d\n",
+ table_id, offset, data_buf_size);
+
+ status = ice_aq_get_internal_data(hw, cluster_id, table_id,
+ offset, data_buf, data_buf_size, &ret_buf_size,
+ &ret_next_table, &ret_next_index, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: ice_aq_get_internal_data in cluster %d: err %s aq_err %s\n",
+ __func__, cluster_id, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_DIAG,
+ "ret_table_id 0x%04x ret_offset 0x%08x ret_buf_size %d\n",
+ ret_next_table, ret_next_index, ret_buf_size);
+
+ /* Print cluster id */
+ u32 print_cluster_id = (u32)cluster_id;
+ sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id));
+ /* Print table id */
+ u32 print_table_id = (u32)table_id;
+ sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id));
+ /* Print table length */
+ u32 print_table_length = (u32)ret_buf_size;
+ sbuf_bcat(sbuf, &print_table_length, sizeof(print_table_length));
+ /* Print current offset */
+ u32 print_curr_offset = offset;
+ sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset));
+ /* Print reserved bytes */
+ sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf));
+ /* Print data */
+ sbuf_bcat(sbuf, data_buf, ret_buf_size);
+
+ /* Adjust loop variables */
+ memset(data_buf, 0, data_buf_size);
+ bool same_table_next = (table_id == ret_next_table);
+ bool last_table_next = (ret_next_table == 0xff || ret_next_table == 0xffff);
+ bool last_offset_next = (ret_next_index == 0xffffffff || ret_next_index == 0);
+
+ if ((!same_table_next && !last_offset_next) ||
+ (same_table_next && last_table_next)) {
+ device_printf(dev,
+ "%s: Unexpected conditions for same_table_next(%d) last_table_next(%d) last_offset_next(%d), ending cluster (%d)\n",
+ __func__, same_table_next, last_table_next, last_offset_next, cluster_id);
+ break;
+ }
+
+ if (!same_table_next && !last_table_next && last_offset_next) {
+ /* We've hit the end of the table */
+ table_id = ret_next_table;
+ offset = 0;
+ }
+ else if (!same_table_next && last_table_next && last_offset_next) {
+ /* We've hit the end of the cluster */
+ break;
+ }
+ else if (same_table_next && !last_table_next && last_offset_next) {
+ if (cluster_id == 0x1 && table_id < 39)
+ table_id += 1;
+ else
+ break;
+ }
+ else { /* if (same_table_next && !last_table_next && !last_offset_next) */
+ /* More data left in the table */
+ offset = ret_next_index;
+ }
+ }
+
+ free(data_buf, M_ICE);
+}
+
+#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \
+"\nWrite 1 to output a FW debug dump containing the clusters specified by the \"clusters\" sysctl" \
+"\nThe \"-b\" flag must be used in order to dump this data as binary data because" \
+"\nthis data is opaque and not a string."
+
+#define ICE_FW_DUMP_BASE_TEXT_SIZE (1024 * 1024)
+#define ICE_FW_DUMP_CLUST0_TEXT_SIZE (2 * 1024 * 1024)
+#define ICE_FW_DUMP_CLUST1_TEXT_SIZE (128 * 1024)
+#define ICE_FW_DUMP_CLUST2_TEXT_SIZE (2 * 1024 * 1024)
+
+/**
+ * ice_sysctl_fw_debug_dump_do_dump - Dump data from FW to sysctl output
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Sysctl handler for the debug.dump.dump sysctl. Prints out a specially-
+ * formatted dump of some debug FW data intended to be processed by a special
+ * Intel tool. Prints out the cluster data specified by the "clusters"
+ * sysctl.
+ *
+ * @remark The actual AQ calls and printing are handled by a helper
+ * function above.
+ */
+static int
+ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ device_t dev = sc->dev;
+ struct sbuf *sbuf;
+ int bit, ret;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ ret = priv_check(curthread, PRIV_DRIVER);
+ if (ret)
+ return (ret);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* If the user hasn't written "1" to this sysctl yet: */
+ if (!ice_test_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP)) {
+ /* Avoid output on the first set of reads to this sysctl in
+ * order to prevent a null byte from being written to the
+ * end result when called via sysctl(8).
+ */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, 0);
+ return (ret);
+ }
+
+ char input_buf[2] = "";
+ ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ /* If we get '1', then indicate we'll do a dump in the next
+ * sysctl read call.
+ */
+ if (input_buf[0] == '1') {
+ ice_set_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP);
+ return (0);
+ }
+
+ return (EINVAL);
+ }
+
+ /* --- FW debug dump state is set --- */
+
+ if (!sc->fw_debug_dump_cluster_mask) {
+ device_printf(dev,
+ "%s: Debug Dump failed because no cluster was specified.\n",
+ __func__);
+ ret = EINVAL;
+ goto out;
+ }
+
+ /* Caller just wants the upper bound for size */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ size_t est_output_len = ICE_FW_DUMP_BASE_TEXT_SIZE;
+ if (sc->fw_debug_dump_cluster_mask & 0x1)
+ est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE;
+ if (sc->fw_debug_dump_cluster_mask & 0x2)
+ est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE;
+ if (sc->fw_debug_dump_cluster_mask & 0x4)
+ est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE;
+
+ ret = SYSCTL_OUT(req, 0, est_output_len);
+ return (ret);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
+
+ ice_debug(&sc->hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
+
+ for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
+ sizeof(sc->fw_debug_dump_cluster_mask) * 8)
+ ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+out:
+ ice_clear_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP);
+ return (ret);
+}
+
/**
* ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver
* @sc: device private structure
@@ -5779,8 +6320,8 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
static void
ice_add_debug_sysctls(struct ice_softc *sc)
{
- struct sysctl_oid *sw_node;
- struct sysctl_oid_list *debug_list, *sw_list;
+ struct sysctl_oid *sw_node, *dump_node;
+ struct sysctl_oid_list *debug_list, *sw_list, *dump_list;
device_t dev = sc->dev;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
@@ -5929,6 +6470,20 @@ ice_add_debug_sysctls(struct ice_softc *sc)
ice_sysctl_dump_ethertype_mac_filters, "A",
"Ethertype/MAC Filters");
+ dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump",
+ ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
+ "Internal FW Dump");
+ dump_list = SYSCTL_CHILDREN(dump_node);
+
+ SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
+ ICE_CTLFLAG_DEBUG | CTLTYPE_U16 | CTLFLAG_RW, sc, 0,
+ ice_sysctl_fw_debug_dump_cluster_setting, "SU",
+ ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING);
+
+ SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump",
+ ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
+ ice_sysctl_fw_debug_dump_do_dump, "",
+ ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP);
}
/**
@@ -5972,21 +6527,26 @@ ice_vsi_disable_tx(struct ice_vsi *vsi)
}
ice_for_each_traffic_class(tc) {
+ struct ice_tc_info *tc_info = &vsi->tc_info[tc];
+ u16 start_idx, end_idx;
+
+ /* Skip rest of disabled TCs once the first
+ * disabled TC is found */
+ if (!(vsi->tc_map & BIT(tc)))
+ break;
+
+ /* Fill out TX queue information for this TC */
+ start_idx = tc_info->qoffset;
+ end_idx = start_idx + tc_info->qcount_tx;
buf_idx = 0;
- for (j = 0; j < vsi->num_tx_queues; j++) {
+ for (j = start_idx; j < end_idx; j++) {
struct ice_tx_queue *txq = &vsi->tx_queues[j];
- if (txq->tc != tc)
- continue;
-
q_ids[buf_idx] = vsi->tx_qmap[j];
q_handles[buf_idx] = txq->q_handle;
q_teids[buf_idx] = txq->q_teid;
buf_idx++;
}
- /* Skip TC if no queues belong to it */
- if (buf_idx == 0)
- continue;
status = ice_dis_vsi_txq(hw->port_info, vsi->idx, tc, buf_idx,
q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL);
@@ -6005,9 +6565,9 @@ ice_vsi_disable_tx(struct ice_vsi *vsi)
}
/* Clear buffers */
- memset(q_teids, 0, q_teids_size);
- memset(q_ids, 0, q_ids_size);
- memset(q_handles, 0, q_handles_size);
+ memset(q_teids, 0, q_teids_size);
+ memset(q_ids, 0, q_ids_size);
+ memset(q_handles, 0, q_handles_size);
}
/* free_q_handles: */
@@ -6463,15 +7023,15 @@ ice_config_rss(struct ice_vsi *vsi)
* @pkg_status: the status result of ice_copy_and_init_pkg
*
* Called by ice_load_pkg after an attempt to download the DDP package
- * contents to the device. Determines whether the download was successful or
- * not and logs an appropriate message for the system administrator.
+ * contents to the device to log an appropriate message for the system
+ * administrator about download status.
*
- * @post if a DDP package was previously downloaded on another port and it
- * is not compatible with this driver, pkg_status will be updated to reflect
- * this, and the driver will transition to safe mode.
+ * @post ice_is_init_pkg_successful function is used to determine
+ * whether the download was successful and DDP package is compatible
+ * with this driver. Otherwise driver will transition to Safe Mode.
*/
void
-ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status)
+ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
@@ -6485,60 +7045,37 @@ ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status)
ice_os_pkg_version_str(hw, os_pkg);
sbuf_finish(os_pkg);
- switch (*pkg_status) {
- case ICE_SUCCESS:
- /* The package download AdminQ command returned success because
- * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
- * already a package loaded on the device.
- */
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name,
- sizeof(hw->pkg_name))) {
- switch (hw->pkg_dwnld_status) {
- case ICE_AQ_RC_OK:
- device_printf(dev,
- "The DDP package was successfully loaded: %s.\n",
- sbuf_data(active_pkg));
- break;
- case ICE_AQ_RC_EEXIST:
- device_printf(dev,
- "DDP package already present on device: %s.\n",
- sbuf_data(active_pkg));
- break;
- default:
- /* We do not expect this to occur, but the
- * extra messaging is here in case something
- * changes in the ice_init_pkg flow.
- */
- device_printf(dev,
- "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n",
- sbuf_data(active_pkg),
- ice_aq_str(hw->pkg_dwnld_status));
- break;
- }
- } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) {
- device_printf(dev,
- "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n",
- sbuf_data(active_pkg),
- sbuf_data(os_pkg));
- } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) {
- device_printf(dev,
- "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- sbuf_data(active_pkg),
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
- *pkg_status = ICE_ERR_NOT_SUPPORTED;
- } else {
- device_printf(dev,
- "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- sbuf_data(active_pkg),
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
- *pkg_status = ICE_ERR_NOT_SUPPORTED;
- }
+ switch (pkg_status) {
+ case ICE_DDP_PKG_SUCCESS:
+ device_printf(dev,
+ "The DDP package was successfully loaded: %s.\n",
+ sbuf_data(active_pkg));
break;
- case ICE_ERR_NOT_SUPPORTED:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_ALREADY_LOADED:
+ device_printf(dev,
+ "DDP package already present on device: %s.\n",
+ sbuf_data(active_pkg));
+ break;
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ device_printf(dev,
+ "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n",
+ sbuf_data(active_pkg),
+ sbuf_data(os_pkg));
+ break;
+ case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
+ device_printf(dev,
+ "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
+ device_printf(dev,
+ "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
/*
* This assumes that the active_pkg_ver will not be
* initialized if the ice_ddp package version is not
@@ -6558,9 +7095,7 @@ ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status)
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
} else {
device_printf(dev,
- "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- ice_status_str(*pkg_status),
- ice_aq_str(hw->pkg_dwnld_status),
+ "An unknown error occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
sbuf_data(os_pkg),
sbuf_data(active_pkg),
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
@@ -6578,54 +7113,41 @@ ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status)
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
} else {
device_printf(dev,
- "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- ice_status_str(*pkg_status),
- ice_aq_str(hw->pkg_dwnld_status),
+ "An unknown error occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
sbuf_data(os_pkg),
sbuf_data(active_pkg),
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
}
}
break;
- case ICE_ERR_CFG:
- case ICE_ERR_BUF_TOO_SHORT:
- case ICE_ERR_PARAM:
+ case ICE_DDP_PKG_INVALID_FILE:
device_printf(dev,
"The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n");
break;
- case ICE_ERR_FW_DDP_MISMATCH:
+ case ICE_DDP_PKG_FW_MISMATCH:
device_printf(dev,
"The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
break;
- case ICE_ERR_AQ_ERROR:
- switch (hw->pkg_dwnld_status) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- device_printf(dev,
- "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n");
- goto free_sbufs;
- case ICE_AQ_RC_ESVN:
- device_printf(dev,
- "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n");
- goto free_sbufs;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- device_printf(dev,
- "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n");
- goto free_sbufs;
- default:
- break;
- }
- /* fall-through */
+ case ICE_DDP_PKG_NO_SEC_MANIFEST:
+ case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
+ device_printf(dev,
+ "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW:
+ device_printf(dev,
+ "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_MANIFEST_INVALID:
+ case ICE_DDP_PKG_BUFFER_INVALID:
+ device_printf(dev,
+ "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n");
+ break;
default:
device_printf(dev,
- "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n",
- ice_status_str(*pkg_status),
- ice_aq_str(hw->pkg_dwnld_status));
+ "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
break;
}
-free_sbufs:
sbuf_delete(active_pkg);
sbuf_delete(os_pkg);
}
@@ -6643,39 +7165,71 @@ free_sbufs:
* ice_deinit_hw(). This allows the firmware reference to be immediately
* released using firmware_put.
*/
-void
+enum ice_status
ice_load_pkg_file(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ enum ice_ddp_state state;
const struct firmware *pkg;
+ enum ice_status status = ICE_SUCCESS;
+ u8 cached_layer_count;
+ u8 *buf_copy;
pkg = firmware_get("ice_ddp");
if (!pkg) {
- device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n");
+ device_printf(dev,
+ "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n");
if (cold)
device_printf(dev,
- "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n");
- ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
- ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
- return;
+ "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n");
+ status = ICE_ERR_CFG;
+ goto err_load_pkg;
+ }
+
+ /* Check for topology change */
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_TX_BALANCE)) {
+ cached_layer_count = hw->num_tx_sched_layers;
+ buf_copy = (u8 *)malloc(pkg->datasize, M_ICE, M_NOWAIT);
+ if (buf_copy == NULL)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(buf_copy, pkg->data, pkg->datasize);
+ status = ice_cfg_tx_topo(&sc->hw, buf_copy, pkg->datasize);
+ free(buf_copy, M_ICE);
+ /* Success indicates a change was made */
+ if (status == ICE_SUCCESS) {
+ /* 9 -> 5 */
+ if (cached_layer_count == 9)
+ device_printf(dev,
+ "Transmit balancing feature enabled\n");
+ else
+ device_printf(dev,
+ "Transmit balancing feature disabled\n");
+ ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_en);
+ return (status);
+ }
}
/* Copy and download the pkg contents */
- status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize);
+ state = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize);
/* Release the firmware reference */
firmware_put(pkg, FIRMWARE_UNLOAD);
/* Check the active DDP package version and log a message */
- ice_log_pkg_init(sc, &status);
+ ice_log_pkg_init(sc, state);
/* Place the driver into safe mode */
- if (status != ICE_SUCCESS) {
- ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
- ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
- }
+ if (ice_is_init_pkg_successful(state))
+ return (ICE_ERR_ALREADY_EXISTS);
+
+err_load_pkg:
+ ice_zero_bitmap(sc->feat_cap, ICE_FEATURE_COUNT);
+ ice_zero_bitmap(sc->feat_en, ICE_FEATURE_COUNT);
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
+
+ return (status);
}
/**
@@ -7333,37 +7887,30 @@ ice_handle_mdd_event(struct ice_softc *sc)
}
/**
- * ice_init_dcb_setup - Initialize DCB settings for HW
+ * ice_start_dcbx_agent - Start DCBX agent in FW via AQ command
* @sc: the device softc
*
- * This needs to be called after the fw_lldp_agent sysctl is added, since that
- * can update the device's LLDP agent status if a tunable value is set.
+ * @pre device is DCB capable and the FW LLDP agent has started
*
- * Get and store the initial state of DCB settings on driver load. Print out
- * informational messages as well.
+ * Checks DCBX status and starts the DCBX agent if it is not in
+ * a valid state via an AQ command.
*/
-void
-ice_init_dcb_setup(struct ice_softc *sc)
+static void
+ice_start_dcbx_agent(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
bool dcbx_agent_status;
enum ice_status status;
- /* Don't do anything if DCB isn't supported */
- if (!hw->func_caps.common_cap.dcb) {
- device_printf(dev, "%s: No DCB support\n",
- __func__);
- return;
- }
-
hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw);
+
if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE &&
hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
/*
* Start DCBX agent, but not LLDP. The return value isn't
* checked here because a more detailed dcbx agent status is
- * retrieved and checked in ice_init_dcb() and below.
+ * retrieved and checked in ice_init_dcb() and elsewhere.
*/
status = ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL);
if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM)
@@ -7372,6 +7919,35 @@ ice_init_dcb_setup(struct ice_softc *sc)
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
+}
+
+/**
+ * ice_init_dcb_setup - Initialize DCB settings for HW
+ * @sc: the device softc
+ *
+ * This needs to be called after the fw_lldp_agent sysctl is added, since that
+ * can update the device's LLDP agent status if a tunable value is set.
+ *
+ * Get and store the initial state of DCB settings on driver load. Print out
+ * informational messages as well.
+ */
+void
+ice_init_dcb_setup(struct ice_softc *sc)
+{
+ struct ice_dcbx_cfg *local_dcbx_cfg;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u8 pfcmode_ret;
+
+ /* Don't do anything if DCB isn't supported */
+ if (!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DCB)) {
+ device_printf(dev, "%s: No DCB support\n", __func__);
+ return;
+ }
+
+ /* Starts DCBX agent if it needs starting */
+ ice_start_dcbx_agent(sc);
/* This sets hw->port_info->qos_cfg.is_sw_lldp */
status = ice_init_dcb(hw, true);
@@ -7410,6 +7986,31 @@ ice_init_dcb_setup(struct ice_softc *sc)
ice_add_rx_lldp_filter(sc);
device_printf(dev, "Firmware LLDP agent disabled\n");
}
+
+ /* Query and cache PFC mode */
+ status = ice_aq_query_pfc_mode(hw, &pfcmode_ret, NULL);
+ if (status) {
+ device_printf(dev, "PFC mode query failed, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+ local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg;
+ switch (pfcmode_ret) {
+ case ICE_AQC_PFC_VLAN_BASED_PFC:
+ local_dcbx_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
+ break;
+ case ICE_AQC_PFC_DSCP_BASED_PFC:
+ local_dcbx_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
+ break;
+ default:
+ /* DCB is disabled, but we shouldn't get here */
+ break;
+ }
+
+ /* Set default SW MIB for init */
+ ice_set_default_local_mib_settings(sc);
+
+ ice_set_bit(ICE_FEATURE_DCB, sc->feat_en);
}
/**
@@ -7419,7 +8020,7 @@ ice_init_dcb_setup(struct ice_softc *sc)
* Scans a TC mapping table inside dcbcfg to find traffic classes
* enabled and @returns a bitmask of enabled TCs
*/
-static u8
+u8
ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg)
{
u8 tc_map = 0;
@@ -7434,6 +8035,10 @@ ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg)
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++)
tc_map |= BIT(dcbcfg->etscfg.prio_table[i]);
break;
+ case ICE_QOS_MODE_DSCP:
+ for (i = 0; i < ICE_DSCP_NUM_VAL; i++)
+ tc_map |= BIT(dcbcfg->dscp_map[i]);
+ break;
default:
/* Invalid Mode */
tc_map = ICE_DFLT_TRAFFIC_CLASS;
@@ -7444,32 +8049,22 @@ ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg)
}
/**
- * ice_dcb_num_tc - Count the number of TCs in a bitmap
- * @tc_map: bitmap of enabled traffic classes
+ * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
+ * @dcbcfg: config to retrieve number of TCs from
*
- * @return the number of traffic classes in
- * an 8-bit TC bitmap, or 0 if they are noncontiguous
+ * @return number of contiguous TCs found in dcbcfg's ETS Configuration
+ * Priority Assignment Table, a value from 1 to 8. If there are
+ * non-contiguous TCs used (e.g. assigning 1 and 3 without using 2),
+ * then returns 0.
*/
static u8
-ice_dcb_num_tc(u8 tc_map)
+ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
{
- bool tc_unused = false;
- u8 ret = 0;
- int i = 0;
+ u8 tc_map;
- ice_for_each_traffic_class(i) {
- if (tc_map & BIT(i)) {
- if (!tc_unused) {
- ret++;
- } else {
- /* Non-contiguous TCs detected */
- return (0);
- }
- } else
- tc_unused = true;
- }
+ tc_map = ice_dcb_get_tc_map(dcbcfg);
- return (ret);
+ return (ice_dcb_tc_contig(tc_map));
}
/**
@@ -7541,6 +8136,13 @@ ice_dcb_needs_reconfig(struct ice_softc *sc, struct ice_dcbx_cfg *old_cfg,
struct ice_hw *hw = &sc->hw;
bool needs_reconfig = false;
+ /* No change detected in DCBX config */
+ if (!memcmp(old_cfg, new_cfg, sizeof(*old_cfg))) {
+ ice_debug(hw, ICE_DBG_DCB,
+ "No change detected in local DCBX configuration\n");
+ return (false);
+ }
+
/* Check if ETS config has changed */
if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
sizeof(new_cfg->etscfg))) {
@@ -7555,21 +8157,29 @@ ice_dcb_needs_reconfig(struct ice_softc *sc, struct ice_dcbx_cfg *old_cfg,
/* These are just informational */
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
- sizeof(new_cfg->etscfg.tcbwtable)))
+ sizeof(new_cfg->etscfg.tcbwtable))) {
ice_debug(hw, ICE_DBG_DCB, "ETS TCBW table changed\n");
+ needs_reconfig = true;
+ }
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
- sizeof(new_cfg->etscfg.tsatable)))
+ sizeof(new_cfg->etscfg.tsatable))) {
ice_debug(hw, ICE_DBG_DCB, "ETS TSA table changed\n");
+ needs_reconfig = true;
+ }
}
/* Check if PFC config has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
- needs_reconfig = true;
ice_debug(hw, ICE_DBG_DCB, "PFC config changed\n");
+ needs_reconfig = true;
}
+ /* Check if APP table has changed */
+ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app)))
+ ice_debug(hw, ICE_DBG_DCB, "APP Table changed\n");
+
ice_debug(hw, ICE_DBG_DCB, "%s result: %d\n", __func__, needs_reconfig);
return (needs_reconfig);
@@ -7604,8 +8214,9 @@ ice_stop_pf_vsi(struct ice_softc *sc)
static void
ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
+ u16 qcounts[ICE_MAX_TRAFFIC_CLASS] = {};
u16 offset = 0, qmap = 0, pow = 0;
- u16 num_txq_per_tc, num_rxq_per_tc, qcount_rx;
+ u16 num_q_per_tc, qcount_rx, rem_queues;
int i, j, k;
if (vsi->num_tcs == 0) {
@@ -7615,15 +8226,20 @@ ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
}
qcount_rx = vsi->num_rx_queues;
- num_rxq_per_tc = min(qcount_rx / vsi->num_tcs, ICE_MAX_RXQS_PER_TC);
- if (!num_rxq_per_tc)
- num_rxq_per_tc = 1;
+ num_q_per_tc = min(qcount_rx / vsi->num_tcs, ICE_MAX_RXQS_PER_TC);
- /* Have TX queue count match RX queue count */
- num_txq_per_tc = num_rxq_per_tc;
+ if (!num_q_per_tc)
+ num_q_per_tc = 1;
- /* find the (rounded up) power-of-2 of qcount */
- pow = flsl(num_rxq_per_tc - 1);
+ /* Set initial values for # of queues to use for each active TC */
+ ice_for_each_traffic_class(i)
+ if (i < vsi->num_tcs)
+ qcounts[i] = num_q_per_tc;
+
+ /* If any queues are unassigned, add them to TC 0 */
+ rem_queues = qcount_rx % vsi->num_tcs;
+ if (rem_queues > 0)
+ qcounts[0] += rem_queues;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -7649,8 +8265,11 @@ ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* TC is enabled */
vsi->tc_info[i].qoffset = offset;
- vsi->tc_info[i].qcount_rx = num_rxq_per_tc;
- vsi->tc_info[i].qcount_tx = num_txq_per_tc;
+ vsi->tc_info[i].qcount_rx = qcounts[i];
+ vsi->tc_info[i].qcount_tx = qcounts[i];
+
+ /* find the (rounded up) log-2 of queue count for current TC */
+ pow = fls(qcounts[i] - 1);
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
@@ -7659,14 +8278,14 @@ ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
ctxt->info.tc_mapping[i] = CPU_TO_LE16(qmap);
/* Store traffic class and handle data in queue structures */
- for (j = offset, k = 0; j < offset + num_txq_per_tc; j++, k++) {
+ for (j = offset, k = 0; j < offset + qcounts[i]; j++, k++) {
vsi->tx_queues[j].q_handle = k;
vsi->tx_queues[j].tc = i;
- }
- for (j = offset; j < offset + num_rxq_per_tc; j++)
+
vsi->rx_queues[j].tc = i;
+ }
- offset += num_rxq_per_tc;
+ offset += qcounts[i];
}
/* Rx queue mapping */
@@ -7729,6 +8348,13 @@ ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map)
for (i = 0; i < num_tcs; i++)
max_txqs[i] = vsi->tc_info[i].qcount_tx;
+ if (hw->debug_mask & ICE_DBG_DCB) {
+ device_printf(dev, "%s: max_txqs:", __func__);
+ ice_for_each_traffic_class(i)
+ printf(" %d", max_txqs[i]);
+ printf("\n");
+ }
+
/* Update LAN Tx queue info in firmware */
status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, vsi->tc_map,
max_txqs);
@@ -7746,6 +8372,35 @@ ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map)
}
/**
+ * ice_dcb_tc_contig - Count TCs if they're contiguous
+ * @tc_map: pointer to priority table
+ *
+ * @return The number of traffic classes in
+ * an 8-bit TC bitmap, or if there is a gap, then returns 0.
+ */
+static u8
+ice_dcb_tc_contig(u8 tc_map)
+{
+ bool tc_unused = false;
+ u8 ret = 0;
+
+ /* Scan bitmask for contiguous TCs starting with TC0 */
+ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ if (tc_map & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ /* Non-contiguous TCs detected */
+ return (0);
+ }
+ } else
+ tc_unused = true;
+ }
+
+ return (ret);
+}
+
+/**
* ice_dcb_recfg - Reconfigure VSI with new DCB settings
* @sc: the device private softc
*
@@ -7768,7 +8423,7 @@ ice_dcb_recfg(struct ice_softc *sc)
* the default TC instead. There's no support for
* non-contiguous TCs being used.
*/
- if (ice_dcb_num_tc(tc_map) == 0) {
+ if (ice_dcb_tc_contig(tc_map) == 0) {
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_set_default_local_lldp_mib(sc);
}
@@ -7783,8 +8438,57 @@ ice_dcb_recfg(struct ice_softc *sc)
}
/**
+ * ice_set_default_local_mib_settings - Set Local LLDP MIB to default settings
+ * @sc: device softc structure
+ *
+ * Overwrites the driver's SW local LLDP MIB with default settings. This
+ * ensures the driver has a valid MIB when it next uses the Set Local LLDP MIB
+ * admin queue command.
+ */
+static void
+ice_set_default_local_mib_settings(struct ice_softc *sc)
+{
+ struct ice_dcbx_cfg *dcbcfg;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi;
+ u8 maxtcs, maxtcs_ets, old_pfc_mode;
+
+ pi = hw->port_info;
+
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+
+ maxtcs = hw->func_caps.common_cap.maxtc;
+ /* This value is only 3 bits; 8 TCs maps to 0 */
+ maxtcs_ets = maxtcs & ICE_IEEE_ETS_MAXTC_M;
+
+ /* VLAN vs DSCP mode needs to be preserved */
+ old_pfc_mode = dcbcfg->pfc_mode;
+
+ /**
+ * Setup the default settings used by the driver for the Set Local
+ * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no
+ * PFC, TSA=2).
+ */
+ memset(dcbcfg, 0, sizeof(*dcbcfg));
+
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ dcbcfg->etscfg.maxtcs = maxtcs_ets;
+ dcbcfg->etscfg.tsatable[0] = 2;
+
+ dcbcfg->etsrec = dcbcfg->etscfg;
+ dcbcfg->etsrec.willing = 0;
+
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = maxtcs;
+
+ dcbcfg->pfc_mode = old_pfc_mode;
+}
+
+/**
* ice_do_dcb_reconfig - notify RDMA and reconfigure PF LAN VSI
* @sc: the device private softc
+ * @pending_mib: FW has a pending MIB change to execute
*
* @pre Determined that the DCB configuration requires a change
*
@@ -7792,7 +8496,7 @@ ice_dcb_recfg(struct ice_softc *sc)
* found in the hw struct's/port_info's/ local dcbx configuration.
*/
static void
-ice_do_dcb_reconfig(struct ice_softc *sc)
+ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib)
{
struct ice_aqc_port_ets_elem port_ets = { 0 };
struct ice_dcbx_cfg *local_dcbx_cfg;
@@ -7800,16 +8504,31 @@ ice_do_dcb_reconfig(struct ice_softc *sc)
struct ice_port_info *pi;
device_t dev = sc->dev;
enum ice_status status;
- u8 tc_map;
pi = sc->hw.port_info;
local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
ice_rdma_notify_dcb_qos_change(sc);
+ /* If there's a pending MIB, tell the FW to execute the MIB change
+ * now.
+ */
+ if (pending_mib) {
+ status = ice_lldp_execute_pending_mib(hw);
+ if ((status == ICE_ERR_AQ_ERROR) &&
+ (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)) {
+ device_printf(dev,
+ "Execute Pending LLDP MIB AQ call failed, no pending MIB\n");
+ } else if (status) {
+ device_printf(dev,
+ "Execute Pending LLDP MIB AQ call failed, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ /* This won't break traffic, but QoS will not work as expected */
+ }
+ }
/* Set state when there's more than one TC */
- tc_map = ice_dcb_get_tc_map(local_dcbx_cfg);
- if (ice_dcb_num_tc(tc_map) > 1) {
+ if (ice_dcb_get_num_tc(local_dcbx_cfg) > 1) {
device_printf(dev, "Multiple traffic classes enabled\n");
ice_set_state(&sc->state, ICE_STATE_MULTIPLE_TCS);
} else {
@@ -7857,7 +8576,7 @@ ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *even
struct ice_port_info *pi;
device_t dev = sc->dev;
struct ice_hw *hw = &sc->hw;
- bool needs_reconfig;
+ bool needs_reconfig, mib_is_pending;
enum ice_status status;
u8 mib_type, bridge_type;
@@ -7871,6 +8590,8 @@ ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *even
ICE_AQ_LLDP_MIB_TYPE_S;
bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >>
ICE_AQ_LLDP_BRID_TYPE_S;
+ mib_is_pending = (params->state & ICE_AQ_LLDP_MIB_CHANGE_STATE_M) >>
+ ICE_AQ_LLDP_MIB_CHANGE_STATE_S;
/* Ignore if event is not for Nearest Bridge */
if (bridge_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
@@ -7897,32 +8618,32 @@ ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *even
tmp_dcbx_cfg = *local_dcbx_cfg;
memset(local_dcbx_cfg, 0, sizeof(*local_dcbx_cfg));
- /* Get updated DCBX data from firmware */
- status = ice_get_dcb_cfg(pi);
- if (status) {
- device_printf(dev,
- "%s: Failed to get Local DCB config; status %s, aq_err %s\n",
- __func__, ice_status_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return;
- }
-
- /* No change detected in DCBX config */
- if (!memcmp(&tmp_dcbx_cfg, local_dcbx_cfg,
- sizeof(tmp_dcbx_cfg))) {
- ice_debug(hw, ICE_DBG_DCB, "No change detected in local DCBX configuration\n");
- return;
+ /* Update the current local_dcbx_cfg with new data */
+ if (mib_is_pending) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ /* Get updated DCBX data from firmware */
+ status = ice_get_dcb_cfg(pi);
+ if (status) {
+ device_printf(dev,
+ "%s: Failed to get Local DCB config; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return;
+ }
}
/* Check to see if DCB needs reconfiguring */
needs_reconfig = ice_dcb_needs_reconfig(sc, &tmp_dcbx_cfg,
local_dcbx_cfg);
- if (!needs_reconfig)
+ if (!needs_reconfig && !mib_is_pending)
return;
- /* Reconfigure */
- ice_do_dcb_reconfig(sc);
+ /* Reconfigure -- this will also notify FW that configuration is done,
+ * if the FW MIB change is only pending instead of executed.
+ */
+ ice_do_dcb_reconfig(sc, mib_is_pending);
}
/**
@@ -8745,6 +9466,12 @@ ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
if (err)
return (err);
+ if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
+ device_printf(dev, "%s: Driver must rebuild data structures after a reset. Operation aborted.\n",
+ __func__);
+ return (EBUSY);
+ }
+
if (ifd_len < sizeof(struct ice_nvm_access_cmd)) {
device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n",
__func__, ifd_len, sizeof(struct ice_nvm_access_cmd));
@@ -9155,7 +9882,7 @@ ice_init_health_events(struct ice_softc *sc)
u8 health_mask;
if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) ||
- (!sc->enable_health_events))
+ (!sc->enable_health_events))
return;
health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
@@ -9349,43 +10076,34 @@ ice_handle_health_status_event(struct ice_softc *sc,
}
/**
- * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings
+ * ice_set_default_local_lldp_mib - Possibly apply local LLDP MIB to FW
* @sc: device softc structure
*
- * This function needs to be called after link up; it makes sure the FW
- * has certain PFC/DCB settings. This is intended to workaround a FW behavior
- * where these settings seem to be cleared on link up.
+ * This function needs to be called after link up; it makes sure the FW has
+ * certain PFC/DCB settings. In certain configurations this will re-apply a
+ * default local LLDP MIB configuration; this is intended to workaround a FW
+ * behavior where these settings seem to be cleared on link up.
*/
void
ice_set_default_local_lldp_mib(struct ice_softc *sc)
{
- struct ice_dcbx_cfg *dcbcfg;
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
enum ice_status status;
- u8 maxtcs, maxtcs_ets;
- pi = hw->port_info;
-
- dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+ /* Set Local MIB can disrupt flow control settings for
+ * non-DCB-supported devices.
+ */
+ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_DCB))
+ return;
- maxtcs = hw->func_caps.common_cap.maxtc;
- /* This value is only 3 bits; 8 TCs maps to 0 */
- maxtcs_ets = maxtcs & ICE_IEEE_ETS_MAXTC_M;
+ pi = hw->port_info;
- /**
- * Setup the default settings used by the driver for the Set Local
- * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no
- * PFC).
- */
- memset(dcbcfg, 0, sizeof(*dcbcfg));
- dcbcfg->etscfg.willing = 1;
- dcbcfg->etscfg.tcbwtable[0] = 100;
- dcbcfg->etscfg.maxtcs = maxtcs_ets;
- dcbcfg->etsrec = dcbcfg->etscfg;
- dcbcfg->pfc.willing = 1;
- dcbcfg->pfc.pfccap = maxtcs;
+ /* Don't overwrite a custom SW configuration */
+ if (!pi->qos_cfg.is_sw_lldp &&
+ !ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS))
+ ice_set_default_local_mib_settings(sc);
status = ice_set_dcb_cfg(pi);
@@ -9488,6 +10206,10 @@ ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS)
dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
dcbcfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ else
+ device_printf(dev, "Get CEE DCB Cfg AQ cmd err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
maxtcs = hw->func_caps.common_cap.maxtc;
dcbx_status = ice_get_dcbx_status(hw);
@@ -9518,6 +10240,14 @@ ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS)
sbuf_printf(sbuf, "pfc.pfcena: 0x%0x\n", dcbcfg->pfc.pfcena);
if (arg2 == ICE_AQ_LLDP_MIB_LOCAL) {
+ sbuf_printf(sbuf, "dscp_map:\n");
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 8; j++)
+ sbuf_printf(sbuf, " %d",
+ dcbcfg->dscp_map[i * 8 + j]);
+ sbuf_printf(sbuf, "\n");
+ }
+
sbuf_printf(sbuf, "\nLocal registers:\n");
sbuf_printf(sbuf, "PRTDCB_GENC.NUMTC: %d\n",
(rd32(hw, PRTDCB_GENC) & PRTDCB_GENC_NUMTC_M)
@@ -9744,3 +10474,290 @@ ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS)
return (0);
}
+
+/**
+ * ice_sysctl_dscp2tc_map - Map DSCP to hardware TCs
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: which eight DSCP to UP mappings to configure (0 - 7)
+ * @req: sysctl request pointer
+ *
+ * Gets or sets the current DSCP to UP table cached by the driver. Since there
+ * are 64 possible DSCP values to configure, this sysctl only configures
+ * chunks of 8 in that space at a time.
+ *
+ * This sysctl is only relevant in DSCP mode, and will only function in SW DCB
+ * mode.
+ */
+static int
+ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_dcbx_cfg *local_dcbx_cfg;
+ struct ice_port_info *pi;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ struct sbuf *sbuf;
+ int ret;
+
+ /* Store input rates from user */
+ char dscp_user_buf[128] = "";
+ u8 new_dscp_table_seg[ICE_MAX_TRAFFIC_CLASS] = {};
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, 128);
+ return (ret);
+ }
+
+ pi = hw->port_info;
+ local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+
+ sbuf = sbuf_new(NULL, dscp_user_buf, 128, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
+
+ /* Format DSCP-to-UP data for output */
+ for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ sbuf_printf(sbuf, "%d", local_dcbx_cfg->dscp_map[arg2 * 8 + i]);
+ if (i != ICE_MAX_TRAFFIC_CLASS - 1)
+ sbuf_printf(sbuf, ",");
+ }
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ /* Read in the new DSCP mapping values */
+ ret = sysctl_handle_string(oidp, dscp_user_buf, sizeof(dscp_user_buf), req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ /* Don't allow setting changes in FW DCB mode */
+ if (!hw->port_info->qos_cfg.is_sw_lldp) {
+ device_printf(dev, "%s: DSCP mapping is not allowed in FW DCBX mode\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ /* Convert 8 values in a string to a table; this is similar to what
+ * needs to be done for ETS settings, so this function can be re-used
+ * for that purpose.
+ */
+ ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg, 8);
+ if (ret) {
+ device_printf(dev, "%s: Could not parse input DSCP2TC table: %s\n",
+ __func__, dscp_user_buf);
+ return (ret);
+ }
+
+ memcpy(&local_dcbx_cfg->dscp_map[arg2 * 8], new_dscp_table_seg,
+ sizeof(new_dscp_table_seg));
+
+ local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+
+ status = ice_set_dcb_cfg(pi);
+ if (status) {
+ device_printf(dev,
+ "%s: Failed to set DCB config; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ ice_do_dcb_reconfig(sc, false);
+
+ return (0);
+}
+
+/**
+ * ice_handle_debug_dump_ioctl - Handle a debug dump ioctl request
+ * @sc: the device private softc
+ * @ifd: ifdrv ioctl request pointer
+ */
+int
+ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
+{
+ size_t ifd_len = ifd->ifd_len;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct ice_debug_dump_cmd *ddc;
+ enum ice_status status;
+ int err = 0;
+
+ /* Returned arguments from the Admin Queue */
+ u16 ret_buf_size = 0;
+ u16 ret_next_table = 0;
+ u32 ret_next_index = 0;
+
+ /*
+ * ifioctl forwards SIOCxDRVSPEC to iflib without performing
+ * a privilege check. In turn, iflib forwards the ioctl to the driver
+ * without performing a privilege check. Perform one here to ensure
+ * that non-privileged threads cannot access this interface.
+ */
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err)
+ return (err);
+
+ if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
+ device_printf(dev,
+ "%s: Driver must rebuild data structures after a reset. Operation aborted.\n",
+ __func__);
+ return (EBUSY);
+ }
+
+ if (ifd_len < sizeof(*ddc)) {
+ device_printf(dev,
+ "%s: ifdrv length is too small. Got %zu, but expected %zu\n",
+ __func__, ifd_len, sizeof(*ddc));
+ return (EINVAL);
+ }
+
+ if (ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: ifd data buffer not present.\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ ddc = (struct ice_debug_dump_cmd *)malloc(ifd_len, M_ICE, M_ZERO | M_NOWAIT);
+ if (!ddc)
+ return (ENOMEM);
+
+ /* Copy the NVM access command and data in from user space */
+ /* coverity[tainted_data_argument] */
+ err = copyin(ifd->ifd_data, ddc, ifd_len);
+ if (err) {
+ device_printf(dev, "%s: Copying request from user space failed, err %s\n",
+ __func__, ice_err_str(err));
+ goto out;
+ }
+
+ /* The data_size arg must be at least 1 for the AQ cmd to work */
+ if (ddc->data_size == 0) {
+ device_printf(dev,
+ "%s: data_size must be greater than 0\n", __func__);
+ err = EINVAL;
+ goto out;
+ }
+ /* ...and it can't be too long */
+ if (ddc->data_size > (ifd_len - sizeof(*ddc))) {
+ device_printf(dev,
+ "%s: data_size (%d) is larger than ifd_len space (%zu)?\n", __func__,
+ ddc->data_size, ifd_len - sizeof(*ddc));
+ err = EINVAL;
+ goto out;
+ }
+
+ /* Make sure any possible data buffer space is zeroed */
+ memset(ddc->data, 0, ifd_len - sizeof(*ddc));
+
+ status = ice_aq_get_internal_data(hw, ddc->cluster_id, ddc->table_id, ddc->offset,
+ (u8 *)ddc->data, ddc->data_size, &ret_buf_size, &ret_next_table, &ret_next_index, NULL);
+ ice_debug(hw, ICE_DBG_DIAG, "%s: ret_buf_size %d, ret_next_table %d, ret_next_index %d\n",
+ __func__, ret_buf_size, ret_next_table, ret_next_index);
+ if (status) {
+ device_printf(dev,
+ "%s: Get Internal Data AQ command failed, err %s aq_err %s\n",
+ __func__,
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ goto aq_error;
+ }
+
+ ddc->table_id = ret_next_table;
+ ddc->offset = ret_next_index;
+ ddc->data_size = ret_buf_size;
+
+ /* Copy the possibly modified contents of the handled request out */
+ err = copyout(ddc, ifd->ifd_data, ifd->ifd_len);
+ if (err) {
+ device_printf(dev, "%s: Copying response back to user space failed, err %s\n",
+ __func__, ice_err_str(err));
+ goto out;
+ }
+
+aq_error:
+ /* Convert private status to an error code for proper ioctl response */
+ switch (status) {
+ case ICE_SUCCESS:
+ err = (0);
+ break;
+ case ICE_ERR_NO_MEMORY:
+ err = (ENOMEM);
+ break;
+ case ICE_ERR_OUT_OF_RANGE:
+ err = (ENOTTY);
+ break;
+ case ICE_ERR_AQ_ERROR:
+ err = (EIO);
+ break;
+ case ICE_ERR_PARAM:
+ default:
+ err = (EINVAL);
+ break;
+ }
+
+out:
+ free(ddc, M_ICE);
+ return (err);
+}
+
+/**
+ * ice_sysctl_allow_no_fec_mod_in_auto - Change Auto FEC behavior
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Allows user to let "No FEC" mode to be used in "Auto"
+ * FEC mode during FEC negotiation. This is only supported
+ * on newer firmware versions.
+ */
+static int
+ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u8 user_flag;
+ int ret;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ ret = priv_check(curthread, PRIV_DRIVER);
+ if (ret)
+ return (ret);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ user_flag = (u8)sc->allow_no_fec_mod_in_auto;
+
+ ret = sysctl_handle_bool(oidp, &user_flag, 0, req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ if (!ice_fw_supports_fec_dis_auto(hw)) {
+ log(LOG_INFO,
+ "%s: Enabling or disabling of auto configuration of modules that don't support FEC is unsupported by the current firmware\n",
+ device_get_nameunit(dev));
+ return (ENODEV);
+ }
+
+ if (user_flag == (bool)sc->allow_no_fec_mod_in_auto)
+ return (0);
+
+ sc->allow_no_fec_mod_in_auto = (u8)user_flag;
+
+ if (sc->allow_no_fec_mod_in_auto)
+ log(LOG_INFO, "%s: Enabled auto configuration of No FEC modules\n",
+ device_get_nameunit(dev));
+ else
+ log(LOG_INFO,
+ "%s: Auto configuration of No FEC modules reset to NVM defaults\n",
+ device_get_nameunit(dev));
+
+ return (0);
+}
+
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index 948f9858d43d..4d875b062a9f 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -119,6 +119,9 @@ extern bool ice_enable_tx_lldp_filter;
/* global sysctl indicating whether FW health status events should be enabled */
extern bool ice_enable_health_events;
+/* global sysctl indicating whether to enable 5-layer scheduler topology */
+extern bool ice_tx_balance_en;
+
/**
* @struct ice_bar_info
* @brief PCI BAR mapping information
@@ -203,6 +206,16 @@ struct ice_bar_info {
#define ICE_NVM_ACCESS \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
+/**
+ * ICE_DEBUG_DUMP
+ * @brief Private ioctl command number for retrieving debug dump data
+ *
+ * The ioctl command number used by a userspace tool for accessing the driver for
+ * getting debug dump data from the firmware.
+ */
+#define ICE_DEBUG_DUMP \
+ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6)
+
#define ICE_AQ_LEN 1023
#define ICE_MBXQ_LEN 512
#define ICE_SBQ_LEN 512
@@ -329,6 +342,7 @@ enum ice_rx_dtype {
#define ICE_FEC_STRING_RS "RS-FEC"
#define ICE_FEC_STRING_BASER "FC-FEC/BASE-R"
#define ICE_FEC_STRING_NONE "None"
+#define ICE_FEC_STRING_DIS_AUTO "Auto (w/ No-FEC)"
/* Strings used for displaying Flow Control mode
*
@@ -364,6 +378,12 @@ enum ice_rx_dtype {
ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_MCAST_RX)
+/*
+ * Only certain cluster IDs are valid for the FW debug dump functionality,
+ * so define a mask of those here.
+ */
+#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x1af
+
struct ice_softc;
/**
@@ -549,6 +569,20 @@ struct ice_vsi {
};
/**
+ * @struct ice_debug_dump_cmd
+ * @brief arguments/return value for debug dump ioctl
+ */
+struct ice_debug_dump_cmd {
+ u32 offset; /* offset to read/write from table, in bytes */
+ u16 cluster_id;
+ u16 table_id;
+ u16 data_size; /* size of data field, in bytes */
+ u16 reserved1;
+ u32 reserved2;
+ u8 data[];
+};
+
+/**
* @enum ice_state
* @brief Driver state flags
*
@@ -574,6 +608,7 @@ enum ice_state {
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
ICE_STATE_MULTIPLE_TCS,
+ ICE_STATE_DO_FW_DEBUG_DUMP,
/* This entry must be last */
ICE_STATE_LAST,
};
@@ -832,8 +867,8 @@ void ice_add_txq_sysctls(struct ice_tx_queue *txq);
void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
int ice_config_rss(struct ice_vsi *vsi);
void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
-void ice_load_pkg_file(struct ice_softc *sc);
-void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status);
+enum ice_status ice_load_pkg_file(struct ice_softc *sc);
+void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status);
uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
void ice_save_pci_info(struct ice_hw *hw, device_t dev);
int ice_replay_all_vsi_cfg(struct ice_softc *sc);
@@ -865,5 +900,7 @@ void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
void ice_init_health_events(struct ice_softc *sc);
void ice_cfg_pba_num(struct ice_softc *sc);
+int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
+u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
#endif /* _ICE_LIB_H_ */
diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c
index d4ec3cbb1bcd..b324e92f180f 100644
--- a/sys/dev/ice/ice_nvm.c
+++ b/sys/dev/ice/ice_nvm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -406,7 +406,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
/* Report the number of words successfully read */
- *words = bytes / 2;
+ *words = (u16)(bytes / 2);
/* Byte swap the words up to the amount we actually read */
for (i = 0; i < *words; i++)
@@ -983,7 +983,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
struct ice_orom_civd_info tmp;
- enum ice_status status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
@@ -992,6 +991,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* equal 0.
*/
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ enum ice_status status;
u8 sum = 0, i;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
@@ -1726,22 +1726,41 @@ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
/**
* ice_nvm_write_activate
* @hw: pointer to the HW struct
- * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
*
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
*/
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+enum ice_status
+ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
+ enum ice_status status;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = cmd_flags;
+ cmd->cmd_flags = ICE_LO_BYTE(cmd_flags);
+ cmd->offset_high = ICE_HI_BYTE(cmd_flags);
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return status;
}
/**
@@ -1847,12 +1866,12 @@ ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
/* Update flash data */
status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data,
- true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL);
+ false, ICE_AQC_NVM_SPECIAL_UPDATE, NULL);
if (status)
goto exit_release_res;
/* Dump the Shadow RAM to the flash */
- status = ice_nvm_write_activate(hw, 0);
+ status = ice_nvm_write_activate(hw, 0, NULL);
exit_release_res:
ice_release_nvm(hw);
diff --git a/sys/dev/ice/ice_nvm.h b/sys/dev/ice/ice_nvm.h
index b53e7d9fe91f..95957c8dd303 100644
--- a/sys/dev/ice/ice_nvm.h
+++ b/sys/dev/ice/ice_nvm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -155,5 +155,6 @@ enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
+enum ice_status
+ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
#endif /* _ICE_NVM_H_ */
diff --git a/sys/dev/ice/ice_opts.h b/sys/dev/ice/ice_opts.h
index ef163db402de..e381d041d598 100644
--- a/sys/dev/ice/ice_opts.h
+++ b/sys/dev/ice/ice_opts.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_osdep.c b/sys/dev/ice/ice_osdep.c
index ef1179614ad0..d294f7491d56 100644
--- a/sys/dev/ice/ice_osdep.c
+++ b/sys/dev/ice/ice_osdep.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_osdep.h b/sys/dev/ice/ice_osdep.h
index f8d33b72ceb6..5336044b85bd 100644
--- a/sys/dev/ice/ice_osdep.h
+++ b/sys/dev/ice/ice_osdep.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index f20efa766113..145861ba7254 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,6 +59,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
+ ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
@@ -73,6 +74,8 @@ enum ice_protocol_type {
ICE_VXLAN_GPE,
ICE_NVGRE,
ICE_GTP,
+ ICE_GTP_NO_PAY,
+ ICE_PPPOE,
ICE_PROTOCOL_LAST
};
@@ -104,6 +107,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_GTP_IPV4_UDP,
ICE_SW_TUN_GTP_IPV6_TCP,
ICE_SW_TUN_GTP_IPV6_UDP,
+ ICE_SW_TUN_GTPU,
+ ICE_SW_TUN_GTPC,
ICE_SW_TUN_IPV4_GTPU_IPV4,
ICE_SW_TUN_IPV4_GTPU_IPV6,
ICE_SW_TUN_IPV6_GTPU_IPV4,
@@ -141,6 +146,7 @@ enum ice_prot_id {
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_IPV6_IL_IL = 42,
+ ICE_PROT_IPV6_NEXT_PROTO = 43,
ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
@@ -165,9 +171,11 @@ enum ice_prot_id {
#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
+#define ICE_NAN_OFFSET 511
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
+#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
@@ -184,12 +192,15 @@ enum ice_prot_id {
*/
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_GRE_OF_HW 64 /* NVGRE */
+#define ICE_PPPOE_HW 103
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
#define ICE_MDID_SIZE 2
-#define ICE_TUN_FLAG_MDID 21
-#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
+#define ICE_TUN_FLAG_MDID 20
+#define ICE_TUN_FLAG_MDID_OFF(word) \
+ (ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2
@@ -287,6 +298,13 @@ struct ice_udp_gtp_hdr {
u8 qfi;
u8 rsvrd;
};
+struct ice_pppoe_hdr {
+ u8 rsrvd_ver_type;
+ u8 rsrvd_code;
+ __be16 session_id;
+ __be16 length;
+ __be16 ppp_prot_id; /* control and data only */
+};
struct ice_nvgre {
__be16 flags;
@@ -305,6 +323,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pppoe_hdr pppoe_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/sys/dev/ice/ice_rdma.c b/sys/dev/ice/ice_rdma.c
index 5d89deed0f90..7307eef78aab 100644
--- a/sys/dev/ice/ice_rdma.c
+++ b/sys/dev/ice/ice_rdma.c
@@ -241,9 +241,7 @@ ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_
switch(res->res_type) {
case ICE_RDMA_QSET_ALLOC:
dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg;
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
- ena_tc |= BIT(dcbx_cfg->etscfg.prio_table[i]);
- }
+ ena_tc = ice_dcb_get_tc_map(dcbx_cfg);
ice_debug(hw, ICE_DBG_RDMA, "%s:%d ena_tc=%x\n", __func__, __LINE__, ena_tc);
status = ice_cfg_vsi_rdma(hw->port_info, vsi->idx, ena_tc,
@@ -401,6 +399,10 @@ ice_rdma_cp_qos_info(struct ice_hw *hw, struct ice_dcbx_cfg *dcbx_cfg,
qos_info->apps[j].prot_id = dcbx_cfg->app[j].prot_id;
qos_info->apps[j].selector = dcbx_cfg->app[j].selector;
}
+
+ /* Gather DSCP-to-TC mapping and QoS/PFC mode */
+ memcpy(qos_info->dscp_map, dcbx_cfg->dscp_map, sizeof(qos_info->dscp_map));
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
}
/**
@@ -481,6 +483,7 @@ int
ice_rdma_register(struct ice_rdma_info *info)
{
struct ice_rdma_entry *entry;
+ struct ice_softc *sc;
int err = 0;
sx_xlock(&ice_rdma.mtx);
@@ -513,6 +516,12 @@ ice_rdma_register(struct ice_rdma_info *info)
*/
LIST_FOREACH(entry, &ice_rdma.peers, node) {
kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class);
+ /* Gather DCB/QOS info into peer */
+ sc = __containerof(entry, struct ice_softc, rdma_entry);
+ memset(&entry->peer.initial_qos_info, 0, sizeof(entry->peer.initial_qos_info));
+ ice_rdma_cp_qos_info(&sc->hw, &sc->hw.port_info->qos_cfg.local_dcbx_cfg,
+ &entry->peer.initial_qos_info);
+
IRDMA_PROBE(&entry->peer);
if (entry->initiated)
IRDMA_OPEN(&entry->peer);
diff --git a/sys/dev/ice/ice_rdma.h b/sys/dev/ice/ice_rdma.h
index 5d3c33dd4e66..6dad4a7fdcff 100644
--- a/sys/dev/ice/ice_rdma.h
+++ b/sys/dev/ice/ice_rdma.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_resmgr.c b/sys/dev/ice/ice_resmgr.c
index 88c4575785fe..a9f79ddf1f8f 100644
--- a/sys/dev/ice/ice_resmgr.c
+++ b/sys/dev/ice/ice_resmgr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_resmgr.h b/sys/dev/ice/ice_resmgr.h
index 7b2a240864db..591dbe17245c 100644
--- a/sys/dev/ice/ice_resmgr.h
+++ b/sys/dev/ice/ice_resmgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_rss.h b/sys/dev/ice/ice_rss.h
index c8af5311f039..2c0299a1e11d 100644
--- a/sys/dev/ice/ice_rss.h
+++ b/sys/dev/ice/ice_rss.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_sbq_cmd.h b/sys/dev/ice/ice_sbq_cmd.h
index 1004ad9331fa..9b2cca14c92b 100644
--- a/sys/dev/ice/ice_sbq_cmd.h
+++ b/sys/dev/ice/ice_sbq_cmd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c
index 5784db89c443..306761ff2cfc 100644
--- a/sys/dev/ice/ice_sched.c
+++ b/sys/dev/ice/ice_sched.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -468,7 +468,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static enum ice_status
+enum ice_status
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -910,6 +910,33 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
+ * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes
+ * @hw: pointer to the HW struct
+ * @num_nodes: the number of nodes whose attributes to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure Node Attributes (0x0417)
+ */
+enum ice_status
+ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
+ struct ice_aqc_node_attr_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_node_attr *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.node_attr;
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_cfg_node_attr);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = CPU_TO_LE16(num_nodes);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
* ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
* @hw: pointer to the HW struct
* @num_l2_nodes: the number of L2 nodes whose CGDs to configure
@@ -1173,12 +1200,11 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
+ /* qgroup and VSI layers are same */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1195,12 +1221,8 @@ static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1417,9 +1439,10 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
if (status)
goto sched_query_out;
- hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
+ hw->num_tx_sched_layers =
+ (u8)LE16_TO_CPU(buf->sched_props.logical_levels);
hw->num_tx_sched_phys_layers =
- LE16_TO_CPU(buf->sched_props.phys_levels);
+ (u8)LE16_TO_CPU(buf->sched_props.phys_levels);
hw->flattened_layers = buf->sched_props.flattening_bitmap;
hw->max_cgds = buf->sched_props.max_pf_cgds;
@@ -1585,10 +1608,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
+ u8 qgrp_layer, vsi_layer;
u16 max_children;
- u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -1599,6 +1623,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
+ /* If the queue group and vsi layer are same then queues
+ * are all attached directly to VSI
+ */
+ if (qgrp_layer == vsi_layer)
+ return vsi_node;
+
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@@ -1748,7 +1778,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
@@ -1757,6 +1786,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
+ enum ice_status status;
+
if (!parent)
return ICE_ERR_CFG;
@@ -1850,7 +1881,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
@@ -1860,6 +1890,8 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ enum ice_status status;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
@@ -3928,7 +3960,7 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
u16 wakeup = 0;
/* Get the wakeup integer value */
- bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
+ bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
if (wakeup_int > 63) {
wakeup = (u16)((1 << 15) | wakeup_int);
@@ -3937,7 +3969,7 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
* Convert Integer value to a constant multiplier
*/
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
- wakeup_a = DIV_S64(ICE_RL_PROF_MULTIPLIER *
+ wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER *
hw->psm_clk_freq, bytes_per_sec);
/* Get Fraction value */
@@ -3980,13 +4012,13 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
return status;
/* Bytes per second from Kbps */
- bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
+ bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
/* encode is 6 bits but really useful are 5 bits */
for (i = 0; i < 64; i++) {
u64 pow_result = BIT_ULL(i);
- ts_rate = DIV_S64(hw->psm_clk_freq,
+ ts_rate = DIV_S64((s64)hw->psm_clk_freq,
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
if (ts_rate <= 0)
continue;
@@ -4045,7 +4077,7 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
enum ice_status status;
u8 profile_type;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!hw || layer_num >= hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@@ -4061,8 +4093,6 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
return NULL;
}
- if (!hw)
- return NULL;
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
@@ -4264,7 +4294,7 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
enum ice_status status = ICE_SUCCESS;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!hw || layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
/* Check the existing list for RL profile */
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
@@ -4844,7 +4874,6 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc)
{
struct ice_sched_node *node = NULL;
- struct ice_sched_node *child_node;
switch (agg_type) {
case ICE_AGG_TYPE_VSI: {
@@ -4872,16 +4901,19 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
case ICE_AGG_TYPE_Q:
/* The current implementation allows single queue to modify */
- node = ice_sched_get_node(pi, id);
+ node = ice_sched_find_node_by_teid(pi->root, id);
break;
- case ICE_AGG_TYPE_QG:
+ case ICE_AGG_TYPE_QG: {
+ struct ice_sched_node *child_node;
+
/* The current implementation allows single qg to modify */
- child_node = ice_sched_get_node(pi, id);
+ child_node = ice_sched_find_node_by_teid(pi->root, id);
if (!child_node)
break;
node = child_node->parent;
break;
+ }
default:
break;
diff --git a/sys/dev/ice/ice_sched.h b/sys/dev/ice/ice_sched.h
index 7b37ca828c88..1ccfa11db2af 100644
--- a/sys/dev/ice/ice_sched.h
+++ b/sys/dev/ice/ice_sched.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,9 @@
#include "ice_common.h"
+#define ICE_SCHED_5_LAYERS 5
+#define ICE_SCHED_9_LAYERS 9
+
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
@@ -106,10 +109,18 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
+ struct ice_aqc_node_attr_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_move_elem *buf, u16 buf_size,
+ u16 *grps_movd, struct ice_sq_cd *cd);
+enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
diff --git a/sys/dev/ice/ice_status.h b/sys/dev/ice/ice_status.h
index 5acc18df8a60..37e239b759ff 100644
--- a/sys/dev/ice/ice_status.h
+++ b/sys/dev/ice/ice_status.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/ice_strings.c b/sys/dev/ice/ice_strings.c
index 9a527a6dd8c4..d60bffe24e90 100644
--- a/sys/dev/ice/ice_strings.c
+++ b/sys/dev/ice/ice_strings.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -624,6 +624,8 @@ ice_fec_str(enum ice_fec_mode mode)
return ICE_FEC_STRING_BASER;
case ICE_FEC_NONE:
return ICE_FEC_STRING_NONE;
+ case ICE_FEC_DIS_AUTO:
+ return ICE_FEC_STRING_DIS_AUTO;
}
/* The compiler generates errors on unhandled enum values if we omit
@@ -762,6 +764,8 @@ ice_fwd_act_str(enum ice_sw_fwd_act_type action)
return "FWD_TO_QGRP";
case ICE_DROP_PACKET:
return "DROP_PACKET";
+ case ICE_LG_ACTION:
+ return "LG_ACTION";
case ICE_INVAL_ACT:
return "INVAL_ACT";
}
@@ -1037,6 +1041,8 @@ ice_state_to_str(enum ice_state state)
return "LLDP_RX_FLTR_FROM_DRIVER";
case ICE_STATE_MULTIPLE_TCS:
return "MULTIPLE_TCS";
+ case ICE_STATE_DO_FW_DEBUG_DUMP:
+ return "DO_FW_DEBUG_DUMP";
case ICE_STATE_LAST:
return NULL;
}
diff --git a/sys/dev/ice/ice_switch.c b/sys/dev/ice/ice_switch.c
index 2c2f0e8de6a9..f41c2a4a8099 100644
--- a/sys/dev/ice/ice_switch.c
+++ b/sys/dev/ice/ice_switch.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,6 +30,7 @@
*/
/*$FreeBSD$*/
+#include "ice_common.h"
#include "ice_switch.h"
#include "ice_flex_type.h"
#include "ice_flow.h"
@@ -39,6 +40,7 @@
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
#define ICE_IPV6_ETHER_ID 0x86DD
+#define ICE_PPP_IPV6_PROTO_ID 0x0057
#define ICE_ETH_P_8021Q 0x8100
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
@@ -60,6 +62,9 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle);
+
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -819,6 +824,8 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
else /* remove VSI from mirror rule */
mr_list[i] = CPU_TO_LE16(id);
}
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
}
cmd = &desc.params.add_update_rule;
@@ -902,6 +909,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT ||
lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
@@ -1002,7 +1010,7 @@ ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
@@ -1047,8 +1055,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@@ -1517,7 +1523,7 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_aqc_opc_update_sw_rules, NULL);
if (!status) {
m_ent->lg_act_idx = l_id;
- m_ent->counter_index = counter_id;
+ m_ent->counter_index = (u8)counter_id;
}
ice_free(hw, lg_act);
@@ -1588,6 +1594,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT ||
lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
@@ -1748,11 +1755,12 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*/
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = ICE_SUCCESS;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ struct ice_switch_info *sw;
+ sw = hw->switch_info;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@@ -1811,7 +1819,6 @@ ice_add_update_vsi_list(struct ice_hw *hw,
{
enum ice_status status = ICE_SUCCESS;
u16 vsi_list_id = 0;
-
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
return ICE_ERR_NOT_IMPL;
@@ -1936,7 +1943,7 @@ ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
* handle element. This can be extended further to search VSI list with more
* than 1 vsi_count. Returns pointer to VSI list entry if found.
*/
-static struct ice_vsi_list_map_info *
+struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
u16 *vsi_list_id)
{
@@ -2352,7 +2359,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI)
+ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
@@ -2781,6 +2789,83 @@ ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
}
/**
+ * ice_get_lg_act_aqc_res_type - get resource type for a large action
+ * @res_type: resource type to be filled in case of function success
+ * @num_acts: number of actions to hold with a large action entry
+ *
+ * Get resource type for a large action depending on the number
+ * of single actions that it contains.
+ */
+static enum ice_status
+ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
+{
+ if (!res_type)
+ return ICE_ERR_BAD_PTR;
+
+ /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
+ * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
+ * If num_acts is greater than 2, then use
+ * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
+ * The num_acts cannot be equal to 0 or greater than 4.
+ */
+ switch (num_acts) {
+ case 1:
+ *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_1;
+ break;
+ case 2:
+ *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_2;
+ break;
+ case 3:
+ case 4:
+ *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_4;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_res_lg_act - add large action resource
+ * @hw: pointer to the hardware structure
+ * @l_id: large action ID to fill it in
+ * @num_acts: number of actions to hold with a large action entry
+ */
+static enum ice_status
+ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len, res_type;
+
+ if (!l_id)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_get_lg_act_aqc_res_type(&res_type, num_acts);
+ if (status)
+ return status;
+
+ /* Allocate resource for large action */
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->res_type = CPU_TO_LE16(res_type);
+ sw_buf->num_elems = CPU_TO_LE16(1);
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
+
+ ice_free(hw, sw_buf);
+
+ return status;
+}
+
+/**
* ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
* @rule_head: pointer to the switch list structure that we want to delete
@@ -2832,24 +2917,19 @@ enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_sw_recipe *recp_list;
struct ice_fltr_info f_info;
struct ice_hw *hw = pi->hw;
- enum ice_adminq_opc opcode;
enum ice_status status;
- u16 s_rule_size;
+ u8 lport = pi->lport;
u16 hw_vsi_id;
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
-
- s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
@@ -2857,54 +2937,63 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = pi->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- pi->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- pi->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, recp_list, lport,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- pi->dflt_tx_vsi_num = hw_vsi_id;
- pi->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- pi->dflt_rx_vsi_num = hw_vsi_id;
- pi->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ status = ice_remove_rule_internal(hw, recp_list,
+ &f_list_entry);
+
+ return status;
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
+ *
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
+ */
+bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_sw_recipe *recp_list;
+ struct ice_lock *rule_lock;
+ bool ret = false;
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ ice_acquire_lock(rule_lock);
+
+ if (rule_exists && !LIST_EMPTY(rule_head))
+ *rule_exists = true;
+
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head,
+ ice_fltr_mgmt_list_entry, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
}
}
-out:
- ice_free(hw, s_rule);
- return status;
+ ice_release_lock(rule_lock);
+ return ret;
}
/**
@@ -3546,6 +3635,13 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
list_entry) {
+ /* Avoid enabling or disabling vlan zero twice when in double
+ * vlan mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = _ice_clear_vsi_promisc(hw, vsi_handle,
@@ -3555,7 +3651,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
status = _ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id,
lport, sw);
- if (status)
+ if (status && status != ICE_ERR_ALREADY_EXISTS)
break;
}
@@ -3624,7 +3720,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break;
case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_PROMISC_VLAN:
- ice_remove_promisc(hw, lkup, &remove_list_head);
+ ice_remove_promisc(hw, (u8)lkup, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
@@ -3788,53 +3884,6 @@ enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
}
/**
- * ice_alloc_res_lg_act - add large action resource
- * @hw: pointer to the hardware structure
- * @l_id: large action ID to fill it in
- * @num_acts: number of actions to hold with a large action entry
- */
-static enum ice_status
-ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
-{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
- u16 buf_len;
-
- if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
- return ICE_ERR_PARAM;
-
- /* Allocate resource for large action */
- buf_len = ice_struct_size(sw_buf, elem, 1);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
- if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
-
- sw_buf->num_elems = CPU_TO_LE16(1);
-
- /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
- * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
- * If num_acts is greater than 2, then use
- * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
- * The num_acts cannot exceed 4. This was ensured at the
- * beginning of the function.
- */
- if (num_acts == 1)
- sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
- else if (num_acts == 2)
- sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
- else
- sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
-
- status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
- if (!status)
- *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
-
- ice_free(hw, sw_buf);
- return status;
-}
-
-/**
* ice_add_mac_with_sw_marker - add filter with sw marker
* @hw: pointer to the hardware structure
* @f_info: filter info structure containing the MAC filter information
@@ -4201,10 +4250,12 @@ enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle)
{
- struct ice_switch_info *sw = hw->switch_info;
+struct ice_switch_info *sw;
enum ice_status status = ICE_SUCCESS;
u8 i;
+ sw = hw->switch_info;
+
/* Update the recipes that were created */
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct LIST_HEAD_TYPE *head;
diff --git a/sys/dev/ice/ice_switch.h b/sys/dev/ice/ice_switch.h
index b1f1b312b136..3ee5b467ae32 100644
--- a/sys/dev/ice/ice_switch.h
+++ b/sys/dev/ice/ice_switch.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,7 @@
#ifndef _ICE_SWITCH_H_
#define _ICE_SWITCH_H_
-#include "ice_common.h"
+#include "ice_type.h"
#include "ice_protocol_type.h"
#define ICE_SW_CFG_MAX_BUF_LEN 2048
@@ -43,6 +43,14 @@
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
+#define ICE_PROFID_IPV4_GTPC_TEID 41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
+#define ICE_PROFID_IPV4_GTPU_TEID 43
+#define ICE_PROFID_IPV6_GTPC_TEID 44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
+#define ICE_PROFID_IPV6_GTPU_TEID 46
+#define ICE_PROFID_IPV6_GTPU_IPV6_TCP 70
+
#define DUMMY_ETH_HDR_LEN 16
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
@@ -232,6 +240,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
+ u16 lg_id;
struct ice_adv_rule_flags_info flags_info;
};
@@ -382,6 +391,42 @@ enum ice_promisc_flags {
ICE_PROMISC_VLAN_TX = 0x80,
};
+struct ice_dummy_pkt_offsets {
+ enum ice_protocol_type type;
+ u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
+};
+
+void
+ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ enum ice_sw_tunnel_type tun_type, const u8 **pkt,
+ u16 *pkt_len,
+ const struct ice_dummy_pkt_offsets **offsets);
+
+enum ice_status
+ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ struct ice_aqc_sw_rules_elem *s_rule,
+ const u8 *dummy_pkt, u16 pkt_len,
+ const struct ice_dummy_pkt_offsets *offsets);
+
+enum ice_status
+ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid);
+
+struct ice_adv_fltr_mgmt_list_entry *
+ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, u16 recp_id,
+ struct ice_adv_rule_info *rinfo);
+
+enum ice_status
+ice_adv_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_adv_fltr_mgmt_list_entry *m_entry,
+ struct ice_adv_rule_info *cur_fltr,
+ struct ice_adv_rule_info *new_fltr);
+
+struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
+ u16 *vsi_list_id);
+
/* VSI related commands */
enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
@@ -468,6 +513,8 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction);
+bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
@@ -498,4 +545,7 @@ ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle);
void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/sys/dev/ice/ice_type.h b/sys/dev/ice/ice_type.h
index c7a25b026130..bd36fc388130 100644
--- a/sys/dev/ice/ice_type.h
+++ b/sys/dev/ice/ice_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,48 +33,15 @@
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
-#define ETH_ALEN 6
-
-#define ETH_HEADER_LEN 14
-
-#define BIT(a) (1UL << (a))
-#ifndef BIT_ULL
-#define BIT_ULL(a) (1ULL << (a))
-#endif /* BIT_ULL */
-
-#define BITS_PER_BYTE 8
-
-#define _FORCE_
-
-#define ICE_BYTES_PER_WORD 2
-#define ICE_BYTES_PER_DWORD 4
-#define ICE_MAX_TRAFFIC_CLASS 8
-
-#ifndef MIN_T
-#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
-#endif
-
-#define IS_ASCII(_ch) ((_ch) < 0x80)
-
-#define STRUCT_HACK_VAR_LEN
-/**
- * ice_struct_size - size of struct with C99 flexible array member
- * @ptr: pointer to structure
- * @field: flexible array member (last member of the structure)
- * @num: number of elements of that flexible array member
- */
-#define ice_struct_size(ptr, field, num) \
- (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
-
-#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
-
+#include "ice_defs.h"
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_bitops.h" /* Must come before ice_controlq.h */
-#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
+#include "ice_ddp_common.h"
+#include "ice_controlq.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_vlan_mode.h"
@@ -135,6 +102,8 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
#define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
#define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF))
+#define ICE_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define ICE_LO_BYTE(x) ((u8)((x) & 0xFF))
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_TRACE BIT_ULL(0) /* for function-trace only */
@@ -203,11 +172,6 @@ enum ice_aq_res_ids {
#define ICE_CHANGE_LOCK_TIMEOUT 1000
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
-enum ice_aq_res_access_type {
- ICE_RES_READ = 1,
- ICE_RES_WRITE
-};
-
struct ice_driver_ver {
u8 major_ver;
u8 minor_ver;
@@ -236,7 +200,8 @@ enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
ICE_FEC_BASER,
- ICE_FEC_AUTO
+ ICE_FEC_AUTO,
+ ICE_FEC_DIS_AUTO
};
struct ice_phy_cache_mode_data {
@@ -261,6 +226,7 @@ enum ice_mac_type {
ICE_MAC_VF,
ICE_MAC_E810,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K,
};
/* Media Types */
@@ -338,6 +304,15 @@ struct ice_phy_info {
#define ICE_MAX_NUM_MIRROR_RULES 64
+#define ICE_L2TPV2_FLAGS_CTRL 0x8000
+#define ICE_L2TPV2_FLAGS_LEN 0x4000
+#define ICE_L2TPV2_FLAGS_SEQ 0x0800
+#define ICE_L2TPV2_FLAGS_OFF 0x0200
+#define ICE_L2TPV2_FLAGS_VER 0x0002
+
+#define ICE_L2TPV2_PKT_LENGTH 6
+#define ICE_PPP_PKT_LENGTH 4
+
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
/* Write CSR protection */
@@ -406,6 +381,7 @@ struct ice_hw_common_caps {
u8 iscsi;
u8 mgmt_cem;
u8 iwarp;
+ u8 roce_lag;
/* WoL and APM support */
#define ICE_WOL_SUPPORT_M BIT(0)
@@ -437,6 +413,17 @@ struct ice_hw_common_caps {
#define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ bool tx_sched_topo_comp_mode_en;
+ bool dyn_flattening_en;
+};
+
+#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
+#define ICE_NAC_TOPO_DUAL_M BIT(1)
+#define ICE_NAC_TOPO_ID_M MAKEMASK(0xf, 0)
+
+struct ice_nac_topology {
+ u32 mode;
+ u8 id;
};
/* Function specific capabilities */
@@ -453,6 +440,7 @@ struct ice_hw_dev_caps {
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
u32 num_funcs;
+ struct ice_nac_topology nac_topo;
};
/* Information about MAC such as address, etc... */
@@ -862,10 +850,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
@@ -887,7 +871,6 @@ struct ice_switch_info {
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
-
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@@ -962,6 +945,13 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+/* PHY configuration */
+enum ice_phy_cfg {
+ ICE_PHY_E810 = 1,
+ ICE_PHY_E822,
+ ICE_PHY_ETH56G,
+};
+
/* Port hardware description */
struct ice_hw {
u8 *hw_addr;
@@ -985,6 +975,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
+ enum ice_phy_cfg phy_cfg;
u16 max_burst_size; /* driver sets this value */
@@ -1046,23 +1037,23 @@ struct ice_hw {
/* true if VSIs can share unicast MAC addr */
u8 umac_shared;
-#define ICE_PHY_PER_NAC 1
-#define ICE_MAX_QUAD 2
-#define ICE_NUM_QUAD_TYPE 2
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PHY_0_LAST_QUAD 1
-#define ICE_PORTS_PER_PHY 8
-#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+#define ICE_PHY_PER_NAC_E822 1
+#define ICE_MAX_QUAD 2
+#define ICE_QUADS_PER_PHY_E822 2
+#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PORTS_PER_PHY_E810 4
+#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 pkg_seg_id;
+ u32 pkg_sign_type;
u32 active_track_id;
+ u8 pkg_has_signing_seg:1;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
- enum ice_aq_err pkg_dwnld_status;
-
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
@@ -1173,6 +1164,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_LG_ACTION,
ICE_INVAL_ACT
};
@@ -1344,6 +1336,12 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* FW version for FEC disable in Auto FEC mode */
+#define ICE_FW_FEC_DIS_AUTO_BRANCH 1
+#define ICE_FW_FEC_DIS_AUTO_MAJ 7
+#define ICE_FW_FEC_DIS_AUTO_MIN 0
+#define ICE_FW_FEC_DIS_AUTO_PATCH 5
+
/* AQ API version for FW health reports */
#define ICE_FW_API_HEALTH_REPORT_MAJ 1
#define ICE_FW_API_HEALTH_REPORT_MIN 7
diff --git a/sys/dev/ice/ice_vlan_mode.c b/sys/dev/ice/ice_vlan_mode.c
index bfddd75b6ff2..a5665486a3bc 100644
--- a/sys/dev/ice/ice_vlan_mode.c
+++ b/sys/dev/ice/ice_vlan_mode.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,6 +32,7 @@
#include "ice_common.h"
+#include "ice_ddp_common.h"
/**
* ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN mode (DVM)
* @hw: pointer to the HW struct
diff --git a/sys/dev/ice/ice_vlan_mode.h b/sys/dev/ice/ice_vlan_mode.h
index 1e52214c9028..3d414bf0bea1 100644
--- a/sys/dev/ice/ice_vlan_mode.h
+++ b/sys/dev/ice/ice_vlan_mode.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c
index 89dbedee6d8e..c470251692c4 100644
--- a/sys/dev/ice/if_ice_iflib.c
+++ b/sys/dev/ice/if_ice_iflib.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -480,6 +480,8 @@ ice_if_attach_pre(if_ctx_t ctx)
/* Setup ControlQ lengths */
ice_set_ctrlq_len(hw);
+reinit_hw:
+
fw_mode = ice_get_fw_mode(hw);
if (fw_mode == ICE_FW_MODE_REC) {
device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
@@ -514,12 +516,22 @@ ice_if_attach_pre(if_ctx_t ctx)
goto free_pci_mapping;
}
+ ice_init_device_features(sc);
+
/* Notify firmware of the device driver version */
err = ice_send_version(sc);
if (err)
goto deinit_hw;
- ice_load_pkg_file(sc);
+ /*
+ * Success indicates a change was made that requires a reinitialization
+ * of the hardware
+ */
+ err = ice_load_pkg_file(sc);
+ if (err == ICE_SUCCESS) {
+ ice_deinit_hw(hw);
+ goto reinit_hw;
+ }
err = ice_init_link_events(sc);
if (err) {
@@ -528,9 +540,19 @@ ice_if_attach_pre(if_ctx_t ctx)
goto deinit_hw;
}
- ice_print_nvm_version(sc);
+ /* Initialize VLAN mode in FW; if dual VLAN mode is supported by the package
+ * and firmware, this will force them to use single VLAN mode.
+ */
+ status = ice_set_vlan_mode(hw);
+ if (status) {
+ err = EIO;
+ device_printf(dev, "Unable to initialize VLAN mode, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ goto deinit_hw;
+ }
- ice_init_device_features(sc);
+ ice_print_nvm_version(sc);
/* Setup the MAC address */
iflib_set_mac(ctx, hw->port_info->mac.lan_addr);
@@ -978,7 +1000,7 @@ ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
if (!(vsi->tx_queues =
- (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_WAITOK | M_ZERO))) {
+ (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_NOWAIT | M_ZERO))) {
device_printf(sc->dev, "Unable to allocate Tx queue memory\n");
return (ENOMEM);
}
@@ -986,7 +1008,7 @@ ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate report status arrays */
for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
if (!(txq->tx_rsq =
- (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_WAITOK))) {
+ (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) {
device_printf(sc->dev, "Unable to allocate tx_rsq memory\n");
err = ENOMEM;
goto free_tx_queues;
@@ -1070,7 +1092,7 @@ ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
if (!(vsi->rx_queues =
- (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_WAITOK | M_ZERO))) {
+ (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_NOWAIT | M_ZERO))) {
device_printf(sc->dev, "Unable to allocate Rx queue memory\n");
return (ENOMEM);
}
@@ -2303,7 +2325,7 @@ ice_prepare_for_reset(struct ice_softc *sc)
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
}
/**
@@ -2410,6 +2432,7 @@ ice_rebuild(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
+ enum ice_ddp_state pkg_state;
enum ice_status status;
int err;
@@ -2504,10 +2527,9 @@ ice_rebuild(struct ice_softc *sc)
/* If we previously loaded the package, it needs to be reloaded now */
if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) {
- status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
- if (status) {
- ice_log_pkg_init(sc, &status);
-
+ pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ if (!ice_is_init_pkg_successful(pkg_state)) {
+ ice_log_pkg_init(sc, pkg_state);
ice_transition_safe_mode(sc);
}
}
@@ -2583,7 +2605,8 @@ err_release_queue_allocations:
err_sched_cleanup:
ice_sched_cleanup_all(hw);
err_shutdown_ctrlq:
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
+ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
device_printf(dev, "Driver rebuild failed, please reload the device driver\n");
}
@@ -2695,13 +2718,6 @@ ice_handle_pf_reset_request(struct ice_softc *sc)
static void
ice_init_device_features(struct ice_softc *sc)
{
- /*
- * A failed pkg file download triggers safe mode, disabling advanced
- * device feature support
- */
- if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE))
- return;
-
/* Set capabilities that all devices support */
ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
@@ -2712,12 +2728,16 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
/* Disable features due to hardware limitations... */
if (!sc->hw.func_caps.common_cap.rss_table_size)
ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
if (!sc->hw.func_caps.common_cap.iwarp || !ice_enable_irdma)
ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
+ if (!sc->hw.func_caps.common_cap.dcb)
+ ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap);
/* Disable features due to firmware limitations... */
if (!ice_is_fw_health_report_supported(&sc->hw))
ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
@@ -2736,6 +2756,10 @@ ice_init_device_features(struct ice_softc *sc)
/* RSS is always enabled for iflib */
if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS))
ice_set_bit(ICE_FEATURE_RSS, sc->feat_en);
+
+ /* Disable features based on sysctl settings */
+ if (!ice_tx_balance_en)
+ ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
}
/**
@@ -2999,6 +3023,8 @@ ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
switch (ifd->ifd_cmd) {
case ICE_NVM_ACCESS:
return ice_handle_nvm_access_ioctl(sc, ifd);
+ case ICE_DEBUG_DUMP:
+ return ice_handle_debug_dump_ioctl(sc, ifd);
default:
return EINVAL;
}
diff --git a/sys/dev/ice/irdma_di_if.m b/sys/dev/ice/irdma_di_if.m
index 929e1db5343e..6260278732aa 100644
--- a/sys/dev/ice/irdma_di_if.m
+++ b/sys/dev/ice/irdma_di_if.m
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright (c) 2021, Intel Corporation
+# Copyright (c) 2022, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/irdma_if.m b/sys/dev/ice/irdma_if.m
index 84651b7cecc0..afcbc1d027fb 100644
--- a/sys/dev/ice/irdma_if.m
+++ b/sys/dev/ice/irdma_if.m
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright (c) 2021, Intel Corporation
+# Copyright (c) 2022, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ice/virtchnl.h b/sys/dev/ice/virtchnl.h
index 0dd7fff09677..bd75c11d0849 100644
--- a/sys/dev/ice/virtchnl.h
+++ b/sys/dev/ice/virtchnl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -196,10 +196,12 @@ enum virtchnl_ops {
/* opcodes 60 through 65 are reserved */
VIRTCHNL_OP_GET_QOS_CAPS = 66,
VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
- /* opcode 68, 69 are reserved */
+ /* opcode 68 through 70 are reserved */
VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+ VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+ VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_MAX,
};
@@ -274,12 +276,6 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_DEL_FDIR_FILTER";
case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
- case VIRTCHNL_OP_ENABLE_QUEUES_V2:
- return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
- case VIRTCHNL_OP_DISABLE_QUEUES_V2:
- return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
- case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
- return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
case VIRTCHNL_OP_ADD_VLAN_V2:
@@ -298,6 +294,12 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+ case VIRTCHNL_OP_ENABLE_QUEUES_V2:
+ return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
+ case VIRTCHNL_OP_DISABLE_QUEUES_V2:
+ return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
+ case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
+ return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_MAX:
return "VIRTCHNL_OP_MAX";
default:
@@ -492,21 +494,14 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* RX descriptor IDs (range from 0 to 63) */
enum virtchnl_rx_desc_ids {
VIRTCHNL_RXDID_0_16B_BASE = 0,
- /* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors
- * because they can be differentiated based on queue model; e.g. single
- * queue model can only use 32B_BASE and split queue model can only use
- * FLEX_SPLITQ. Having these as 1 allows them to be used as default
- * descriptors without negotiation.
- */
VIRTCHNL_RXDID_1_32B_BASE = 1,
- VIRTCHNL_RXDID_1_FLEX_SPLITQ = 1,
VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
VIRTCHNL_RXDID_7_HW_RSVD = 7,
- /* 9 through 15 are reserved */
+ /* 8 through 15 are reserved */
VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
@@ -520,7 +515,6 @@ enum virtchnl_rx_desc_ids {
enum virtchnl_rx_desc_id_bitmasks {
VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE),
VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE),
- VIRTCHNL_RXDID_1_FLEX_SPLITQ_M = BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ),
VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
@@ -1211,6 +1205,46 @@ struct virtchnl_rss_lut {
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+/* enum virthcnl_hash_filter
+ *
+ * Bits defining the hash filters in the hena field of the virtchnl_rss_hena
+ * structure. Each bit indicates a specific hash filter for RSS.
+ *
+ * Note that not all bits are supported on all hardware. The VF should use
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of
+ * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters.
+ */
+enum virtchnl_hash_filter {
+ /* Bits 0 through 28 are reserved for future use */
+ /* Bit 29, 30, and 32 are not supported on XL710 a X710 */
+ VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP = 29,
+ VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP = 30,
+ VIRTCHNL_HASH_FILTER_IPV4_UDP = 31,
+ VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK = 32,
+ VIRTCHNL_HASH_FILTER_IPV4_TCP = 33,
+ VIRTCHNL_HASH_FILTER_IPV4_SCTP = 34,
+ VIRTCHNL_HASH_FILTER_IPV4_OTHER = 35,
+ VIRTCHNL_HASH_FILTER_FRAG_IPV4 = 36,
+ /* Bits 37 and 38 are reserved for future use */
+ /* Bit 39, 40, and 42 are not supported on XL710 a X710 */
+ VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP = 39,
+ VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP = 40,
+ VIRTCHNL_HASH_FILTER_IPV6_UDP = 41,
+ VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK = 42,
+ VIRTCHNL_HASH_FILTER_IPV6_TCP = 43,
+ VIRTCHNL_HASH_FILTER_IPV6_SCTP = 44,
+ VIRTCHNL_HASH_FILTER_IPV6_OTHER = 45,
+ VIRTCHNL_HASH_FILTER_FRAG_IPV6 = 46,
+ /* Bit 37 is reserved for future use */
+ VIRTCHNL_HASH_FILTER_FCOE_OX = 48,
+ VIRTCHNL_HASH_FILTER_FCOE_RX = 49,
+ VIRTCHNL_HASH_FILTER_FCOE_OTHER = 50,
+ /* Bits 51 through 62 are reserved for future use */
+ VIRTCHNL_HASH_FILTER_L2_PAYLOAD = 63,
+};
+
+#define VIRTCHNL_HASH_FILTER_INVALID (0)
+
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
@@ -1219,6 +1253,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
* traffic types that are hashed by the hardware.
*/
struct virtchnl_rss_hena {
+ /* see enum virtchnl_hash_filter */
u64 hena;
};
@@ -1378,13 +1413,6 @@ struct virtchnl_pf_event {
u8 link_status;
u8 pad[3];
} link_event_adv;
- struct {
- /* link_speed provided in Mbps */
- u32 link_speed;
- u16 vport_id;
- u8 link_status;
- u8 pad;
- } link_event_adv_vport;
} event_data;
s32 severity;
@@ -1410,6 +1438,7 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) \
(proto_hdr_type << PROTO_HDR_SHIFT)
@@ -1581,6 +1610,10 @@ enum virtchnl_proto_hdr_field {
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+ /* L2TPv2 */
+ VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+ VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
};
struct virtchnl_proto_hdr {
@@ -1601,13 +1634,26 @@ struct virtchnl_proto_hdrs {
u8 tunnel_level;
/**
* specify where protocol header start from.
+ * must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
- **/
- int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
- struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ */
+ int count;
+ /**
+ * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
+ * must be 0 for a raw packet request.
+ */
+ union {
+ struct virtchnl_proto_hdr
+ proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct {
+ u16 pkt_len;
+ u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
@@ -1796,18 +1842,28 @@ struct virtchnl_queue_tc_mapping {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
-/* TX and RX queue types are valid in legacy as well as split queue models.
- * With Split Queue model, 2 additional types are introduced - TX_COMPLETION
- * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW
- * posts completions.
- */
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+ u16 queue_id;
+ u8 tc;
+ u8 pad;
+ struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+ u16 vsi_id;
+ u16 num_queues;
+ struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
+
+/* queue types */
enum virtchnl_queue_type {
VIRTCHNL_QUEUE_TYPE_TX = 0,
VIRTCHNL_QUEUE_TYPE_RX = 1,
- VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2,
- VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3,
- VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4,
- VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5
};
/* structure to specify a chunk of contiguous queues */
@@ -1831,19 +1887,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
/* VIRTCHNL_OP_ENABLE_QUEUES_V2
* VIRTCHNL_OP_DISABLE_QUEUES_V2
- * VIRTCHNL_OP_DEL_QUEUES
- *
- * If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0
- * then all of these ops are available.
*
- * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
- * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are
- * available.
+ * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in
+ * VIRTCHNL_OP_GET_VF_RESOURCES
*
- * PF sends these messages to enable, disable or delete queues specified in
- * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues
- * to be enabled/disabled/deleted. Also applicable to single queue RX or
- * TX. CP performs requested action and returns status.
+ * VF sends virtchnl_ena_dis_queues struct to specify the queues to be
+ * enabled/disabled in chunks. Also applicable to single queue RX or
+ * TX. PF performs requested action and returns status.
*/
struct virtchnl_del_ena_dis_queues {
u16 vport_id;
@@ -1877,13 +1927,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
/* VIRTCHNL_OP_MAP_QUEUE_VECTOR
*
- * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
- * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available.
+ * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated
+ * in VIRTCHNL_OP_GET_VF_RESOURCES
*
- * PF sends this message to map or unmap queues to vectors and ITR index
- * registers. External data buffer contains virtchnl_queue_vector_maps structure
+ * VF sends this message to map queues to vectors and ITR index registers.
+ * External data buffer contains virtchnl_queue_vector_maps structure
* that contains num_qv_maps of virtchnl_queue_vector structures.
- * CP maps the requested queue vector maps after validating the queue and vector
+ * PF maps the requested queue vector maps after validating the queue and vector
* ids and returns a status code.
*/
struct virtchnl_queue_vector_maps {
@@ -1895,6 +1945,13 @@ struct virtchnl_queue_vector_maps {
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
+struct virtchnl_quanta_cfg {
+ u16 quanta_size;
+ struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+
/* Since VF messages are limited by u16 size, precalculate the maximum possible
* values of nested elements in virtchnl structures that virtual channel can
* possibly handle in a single message.
@@ -2130,6 +2187,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
sizeof(q_tc->tc[0]);
}
break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+ if (msglen >= valid_len) {
+ struct virtchnl_queues_bw_cfg *q_bw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ if (q_bw->num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += (q_bw->num_queues - 1) *
+ sizeof(q_bw->cfg[0]);
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ valid_len = sizeof(struct virtchnl_quanta_cfg);
+ if (msglen >= valid_len) {
+ struct virtchnl_quanta_cfg *q_quanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ if (q_quanta->quanta_size == 0 ||
+ q_quanta->queue_select.num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
break;
case VIRTCHNL_OP_ADD_VLAN_V2:
diff --git a/sys/dev/ice/virtchnl_inline_ipsec.h b/sys/dev/ice/virtchnl_inline_ipsec.h
index a8b1c7d12d97..c02b383ce040 100644
--- a/sys/dev/ice/virtchnl_inline_ipsec.h
+++ b/sys/dev/ice/virtchnl_inline_ipsec.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -478,6 +478,15 @@ struct virtchnl_ipsec_sp_cfg {
/* Set TC (congestion domain) if true. For future use. */
u8 set_tc;
+
+ /* 0 for NAT-T unsupported, 1 for NAT-T supported */
+ u8 is_udp;
+
+ /* reserved */
+ u8 reserved;
+
+ /* NAT-T UDP port number. Only valid in case NAT-T supported */
+ u16 udp_port;
};
#pragma pack(1)
diff --git a/sys/dev/ice/virtchnl_lan_desc.h b/sys/dev/ice/virtchnl_lan_desc.h
index 2dd4106de721..e69de29bb2d1 100644
--- a/sys/dev/ice/virtchnl_lan_desc.h
+++ b/sys/dev/ice/virtchnl_lan_desc.h
@@ -1,550 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/*$FreeBSD$*/
-
-#ifndef _VIRTCHNL_LAN_DESC_H_
-#define _VIRTCHNL_LAN_DESC_H_
-
-/* Rx */
-/* For splitq virtchnl_rx_flex_desc_adv desc members */
-#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S 0
-#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_M \
- MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S 0
-#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_M \
- MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S 10
-#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_M \
- MAKEMASK(0x3UL, VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S 12
-#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_M \
- MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S 0
-#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_M \
- MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S 14
-#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_M \
- BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
-#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_M \
- BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S 0
-#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_M \
- MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S 10
-#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_M \
- BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S 11
-#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_M \
- BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S 12
-#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_M \
- BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S)
-#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_S 13
-#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M \
- MAKEMASK(0x7UL, VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M)
-
-enum virtchnl_rx_flex_desc_adv_status_error_0_qw1_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_DD_S = 0,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_EOF_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_HBO_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S,
-};
-
-enum virtchnl_rx_flex_desc_adv_status_error_0_qw0_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LPBK_S = 0,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RXE_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_CRCP_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LAST /* this entry must be last!!! */
-};
-
-enum virtchnl_rx_flex_desc_adv_status_error_1_bits {
- /* Note: These are predefined bit offsets */
- /* 2 bits */
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_RSVD_S = 0,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S = 2,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S = 3,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S = 4,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S = 5,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S = 6,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S = 7,
- VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_LAST /* this entry must be last!!! */
-};
-
-/* for singleq (flex) virtchnl_rx_flex_desc fields */
-/* for virtchnl_rx_flex_desc.ptype_flex_flags0 member */
-#define VIRTCHNL_RX_FLEX_DESC_PTYPE_S 0
-#define VIRTCHNL_RX_FLEX_DESC_PTYPE_M \
- MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_PTYPE_S) /* 10-bits */
-
-/* for virtchnl_rx_flex_desc.pkt_length member */
-#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S 0
-#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_M \
- MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S) /* 14-bits */
-
-enum virtchnl_rx_flex_desc_status_error_0_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
- VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
-};
-
-enum virtchnl_rx_flex_desc_status_error_1_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
- VIRTCHNL_RX_FLEX_DESC_STATUS1_NAT_S = 4,
- VIRTCHNL_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
- /* [10:6] reserved */
- VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
- VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
- VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
- VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
- VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
- VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
-};
-
-/* For singleq (non flex) virtchnl_singleq_base_rx_desc legacy desc members */
-#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S 63
-#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_M \
- BIT_ULL(VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S)
-#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S 52
-#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_M \
- MAKEMASK(0x7FFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S)
-#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S 38
-#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_M \
- MAKEMASK(0x3FFFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S)
-#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S 30
-#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_M \
- MAKEMASK(0xFFULL, VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S)
-#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S 19
-#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_M \
- MAKEMASK(0xFFUL, VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S)
-#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S 0
-#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_M \
- MAKEMASK(0x7FFFFUL, VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S)
-
-enum virtchnl_rx_base_desc_status_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0,
- VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1,
- VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2,
- VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3,
- VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4,
- VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */
- VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8,
- VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */
- VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11,
- VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */
- VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14,
- VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15,
- VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */
- VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18,
- VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-enum virtchnl_rx_base_desc_ext_status_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S = 0
-};
-
-enum virtchnl_rx_base_desc_error_bits {
- /* Note: These are predefined bit offsets */
- VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0,
- VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1,
- VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2,
- VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */
- VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3,
- VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4,
- VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5,
- VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6,
- VIRTCHNL_RX_BASE_DESC_ERROR_PPRS_S = 7
-};
-
-enum virtchnl_rx_base_desc_fltstat_values {
- VIRTCHNL_RX_BASE_DESC_FLTSTAT_NO_DATA = 0,
- VIRTCHNL_RX_BASE_DESC_FLTSTAT_FD_ID = 1,
- VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSV = 2,
- VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSS_HASH = 3,
-};
-
-/* Receive Descriptors */
-/* splitq buf
- | 16| 0|
- ----------------------------------------------------------------
- | RSV | Buffer ID |
- ----------------------------------------------------------------
- | Rx packet buffer adresss |
- ----------------------------------------------------------------
- | Rx header buffer adresss |
- ----------------------------------------------------------------
- | RSV |
- ----------------------------------------------------------------
- | 0|
- */
-struct virtchnl_splitq_rx_buf_desc {
- struct {
- __le16 buf_id; /* Buffer Identifier */
- __le16 rsvd0;
- __le32 rsvd1;
- } qword0;
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- __le64 rsvd2;
-}; /* read used with buffer queues*/
-
-/* singleq buf
- | 0|
- ----------------------------------------------------------------
- | Rx packet buffer adresss |
- ----------------------------------------------------------------
- | Rx header buffer adresss |
- ----------------------------------------------------------------
- | RSV |
- ----------------------------------------------------------------
- | RSV |
- ----------------------------------------------------------------
- | 0|
- */
-struct virtchnl_singleq_rx_buf_desc {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- __le64 rsvd1;
- __le64 rsvd2;
-}; /* read used with buffer queues*/
-
-union virtchnl_rx_buf_desc {
- struct virtchnl_singleq_rx_buf_desc read;
- struct virtchnl_splitq_rx_buf_desc split_rd;
-};
-
-/* (0x00) singleq wb(compl) */
-struct virtchnl_singleq_base_rx_desc {
- struct {
- struct {
- __le16 mirroring_status;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow Director filter id */
- } hi_dword;
- } qword0;
- struct {
- /* status/error/PTYPE/length */
- __le64 status_error_ptype_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- __le32 reserved;
- __le32 fd_id;
- } qword3;
-}; /* writeback */
-
-/* (0x01) singleq flex compl */
-struct virtchnl_rx_flex_desc {
- /* Qword 0 */
- u8 rxdid; /* descriptor builder profile id */
- u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
- __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
- __le16 pkt_len; /* [15:14] are reserved */
- __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
- /* sph=[11:11] */
- /* ff1/ext=[15:12] */
-
- /* Qword 1 */
- __le16 status_error0;
- __le16 l2tag1;
- __le16 flex_meta0;
- __le16 flex_meta1;
-
- /* Qword 2 */
- __le16 status_error1;
- u8 flex_flags2;
- u8 time_stamp_low;
- __le16 l2tag2_1st;
- __le16 l2tag2_2nd;
-
- /* Qword 3 */
- __le16 flex_meta2;
- __le16 flex_meta3;
- union {
- struct {
- __le16 flex_meta4;
- __le16 flex_meta5;
- } flex;
- __le32 ts_high;
- } flex_ts;
-};
-
-/* (0x02) */
-struct virtchnl_rx_flex_desc_nic {
- /* Qword 0 */
- u8 rxdid;
- u8 mir_id_umb_cast;
- __le16 ptype_flex_flags0;
- __le16 pkt_len;
- __le16 hdr_len_sph_flex_flags1;
-
- /* Qword 1 */
- __le16 status_error0;
- __le16 l2tag1;
- __le32 rss_hash;
-
- /* Qword 2 */
- __le16 status_error1;
- u8 flexi_flags2;
- u8 ts_low;
- __le16 l2tag2_1st;
- __le16 l2tag2_2nd;
-
- /* Qword 3 */
- __le32 flow_id;
- union {
- struct {
- __le16 rsvd;
- __le16 flow_id_ipv6;
- } flex;
- __le32 ts_high;
- } flex_ts;
-};
-
-/* Rx Flex Descriptor Switch Profile
- * RxDID Profile Id 3
- * Flex-field 0: Source Vsi
- */
-struct virtchnl_rx_flex_desc_sw {
- /* Qword 0 */
- u8 rxdid;
- u8 mir_id_umb_cast;
- __le16 ptype_flex_flags0;
- __le16 pkt_len;
- __le16 hdr_len_sph_flex_flags1;
-
- /* Qword 1 */
- __le16 status_error0;
- __le16 l2tag1;
- __le16 src_vsi; /* [10:15] are reserved */
- __le16 flex_md1_rsvd;
-
- /* Qword 2 */
- __le16 status_error1;
- u8 flex_flags2;
- u8 ts_low;
- __le16 l2tag2_1st;
- __le16 l2tag2_2nd;
-
- /* Qword 3 */
- __le32 rsvd; /* flex words 2-3 are reserved */
- __le32 ts_high;
-};
-
-/* Rx Flex Descriptor NIC Profile
- * RxDID Profile Id 6
- * Flex-field 0: RSS hash lower 16-bits
- * Flex-field 1: RSS hash upper 16-bits
- * Flex-field 2: Flow Id lower 16-bits
- * Flex-field 3: Source Vsi
- * Flex-field 4: reserved, Vlan id taken from L2Tag
- */
-struct virtchnl_rx_flex_desc_nic_2 {
- /* Qword 0 */
- u8 rxdid;
- u8 mir_id_umb_cast;
- __le16 ptype_flex_flags0;
- __le16 pkt_len;
- __le16 hdr_len_sph_flex_flags1;
-
- /* Qword 1 */
- __le16 status_error0;
- __le16 l2tag1;
- __le32 rss_hash;
-
- /* Qword 2 */
- __le16 status_error1;
- u8 flexi_flags2;
- u8 ts_low;
- __le16 l2tag2_1st;
- __le16 l2tag2_2nd;
-
- /* Qword 3 */
- __le16 flow_id;
- __le16 src_vsi;
- union {
- struct {
- __le16 rsvd;
- __le16 flow_id_ipv6;
- } flex;
- __le32 ts_high;
- } flex_ts;
-};
-
-/* Rx Flex Descriptor Advanced (Split Queue Model)
- * RxDID Profile Id 7
- */
-struct virtchnl_rx_flex_desc_adv {
- /* Qword 0 */
- u8 rxdid_ucast; /* profile_id=[3:0] */
- /* rsvd=[5:4] */
- /* ucast=[7:6] */
- u8 status_err0_qw0;
- __le16 ptype_err_fflags0; /* ptype=[9:0] */
- /* ip_hdr_err=[10:10] */
- /* udp_len_err=[11:11] */
- /* ff0=[15:12] */
- __le16 pktlen_gen_bufq_id; /* plen=[13:0] */
- /* gen=[14:14] only in splitq */
- /* bufq_id=[15:15] only in splitq */
- __le16 hdrlen_flags; /* header=[9:0] */
- /* rsc=[10:10] only in splitq */
- /* sph=[11:11] only in splitq */
- /* ext_udp_0=[12:12] */
- /* int_udp_0=[13:13] */
- /* trunc_mirr=[14:14] */
- /* miss_prepend=[15:15] */
- /* Qword 1 */
- u8 status_err0_qw1;
- u8 status_err1;
- u8 fflags1;
- u8 ts_low;
- __le16 fmd0;
- __le16 fmd1;
- /* Qword 2 */
- __le16 fmd2;
- u8 fflags2;
- u8 hash3;
- __le16 fmd3;
- __le16 fmd4;
- /* Qword 3 */
- __le16 fmd5;
- __le16 fmd6;
- __le16 fmd7_0;
- __le16 fmd7_1;
-}; /* writeback */
-
-/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile
- * RxDID Profile Id 8
- * Flex-field 0: BufferID
- * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
- * Flex-field 2: Hash[15:0]
- * Flex-flags 2: Hash[23:16]
- * Flex-field 3: L2TAG2
- * Flex-field 5: L2TAG1
- * Flex-field 7: Timestamp (upper 32 bits)
- */
-struct virtchnl_rx_flex_desc_adv_nic_3 {
- /* Qword 0 */
- u8 rxdid_ucast; /* profile_id=[3:0] */
- /* rsvd=[5:4] */
- /* ucast=[7:6] */
- u8 status_err0_qw0;
- __le16 ptype_err_fflags0; /* ptype=[9:0] */
- /* ip_hdr_err=[10:10] */
- /* udp_len_err=[11:11] */
- /* ff0=[15:12] */
- __le16 pktlen_gen_bufq_id; /* plen=[13:0] */
- /* gen=[14:14] only in splitq */
- /* bufq_id=[15:15] only in splitq */
- __le16 hdrlen_flags; /* header=[9:0] */
- /* rsc=[10:10] only in splitq */
- /* sph=[11:11] only in splitq */
- /* ext_udp_0=[12:12] */
- /* int_udp_0=[13:13] */
- /* trunc_mirr=[14:14] */
- /* miss_prepend=[15:15] */
- /* Qword 1 */
- u8 status_err0_qw1;
- u8 status_err1;
- u8 fflags1;
- u8 ts_low;
- __le16 buf_id; /* only in splitq */
- union {
- __le16 raw_cs;
- __le16 l2tag1;
- __le16 rscseglen;
- } misc;
- /* Qword 2 */
- __le16 hash1;
- union {
- u8 fflags2;
- u8 mirrorid;
- u8 hash2;
- } ff2_mirrid_hash2;
- u8 hash3;
- __le16 l2tag2;
- __le16 fmd4;
- /* Qword 3 */
- __le16 l2tag1;
- __le16 fmd6;
- __le32 ts_high;
-}; /* writeback */
-
-union virtchnl_rx_desc {
- struct virtchnl_singleq_rx_buf_desc read;
- struct virtchnl_singleq_base_rx_desc base_wb;
- struct virtchnl_rx_flex_desc flex_wb;
- struct virtchnl_rx_flex_desc_nic flex_nic_wb;
- struct virtchnl_rx_flex_desc_sw flex_sw_wb;
- struct virtchnl_rx_flex_desc_nic_2 flex_nic_2_wb;
- struct virtchnl_rx_flex_desc_adv flex_adv_wb;
- struct virtchnl_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb;
-};
-
-#endif /* _VIRTCHNL_LAN_DESC_H_ */
diff --git a/sys/modules/ice/Makefile b/sys/modules/ice/Makefile
index 25774585c180..e4573c4122d8 100644
--- a/sys/modules/ice/Makefile
+++ b/sys/modules/ice/Makefile
@@ -14,7 +14,7 @@ SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
# Core source
SRCS += ice_lib.c ice_osdep.c ice_resmgr.c ice_strings.c
SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c
-SRCS += ice_fw_logging.c
+SRCS += ice_fw_logging.c ice_ddp_common.c
# RDMA Client interface
# TODO: Is this the right way to compile this?