aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Joyner <erj@FreeBSD.org>2024-02-13 06:26:26 +0000
committerEric Joyner <erj@FreeBSD.org>2024-04-24 22:14:48 +0000
commitf6de0a7c94e9c9e71ef0532e8c06b565abdb7113 (patch)
treecdc8d30d1119313859255ca1d071885b6caa24a1
parent988dd5a1f3613ca0ea6ed2cdb3dbf8aa9e640048 (diff)
ice(4): Update to 1.39.13-k
- Adds mirror interface functionality - Remove unused virtchnl headers Signed-off-by: Eric Joyner <erj@FreeBSD.org> Sponsored by: Intel Corporation Tested by: jeffrey.e.pieper@intel.com Differential Revision: https://reviews.freebsd.org/D44004 (cherry picked from commit 9e54973fc33aa44b77d1c851cb36fcd82dc44cda)
-rw-r--r--sys/dev/ice/ice_adminq_cmd.h21
-rw-r--r--sys/dev/ice/ice_bitops.h4
-rw-r--r--sys/dev/ice/ice_common.c43
-rw-r--r--sys/dev/ice/ice_common.h8
-rw-r--r--sys/dev/ice/ice_controlq.c10
-rw-r--r--sys/dev/ice/ice_controlq.h3
-rw-r--r--sys/dev/ice/ice_drv_info.h10
-rw-r--r--sys/dev/ice/ice_features.h1
-rw-r--r--sys/dev/ice/ice_flex_pipe.c1
-rw-r--r--sys/dev/ice/ice_flow.c71
-rw-r--r--sys/dev/ice/ice_flow.h8
-rw-r--r--sys/dev/ice/ice_hw_autogen.h1
-rw-r--r--sys/dev/ice/ice_iflib.h32
-rw-r--r--sys/dev/ice/ice_iflib_txrx.c352
-rw-r--r--sys/dev/ice/ice_lib.c360
-rw-r--r--sys/dev/ice/ice_lib.h30
-rw-r--r--sys/dev/ice/ice_nvm.c33
-rw-r--r--sys/dev/ice/ice_nvm.h1
-rw-r--r--sys/dev/ice/ice_protocol_type.h1
-rw-r--r--sys/dev/ice/ice_sched.c7
-rw-r--r--sys/dev/ice/ice_strings.c6
-rw-r--r--sys/dev/ice/ice_switch.c31
-rw-r--r--sys/dev/ice/ice_switch.h9
-rw-r--r--sys/dev/ice/ice_type.h15
-rw-r--r--sys/dev/ice/if_ice_iflib.c1266
-rw-r--r--sys/dev/ice/virtchnl_inline_ipsec.h594
-rw-r--r--sys/dev/ice/virtchnl_lan_desc.h0
27 files changed, 2134 insertions, 784 deletions
diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h
index 31e9583494fc..70b56144faf2 100644
--- a/sys/dev/ice/ice_adminq_cmd.h
+++ b/sys/dev/ice/ice_adminq_cmd.h
@@ -152,6 +152,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_WOL_PROXY 0x0008
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
+#define ICE_AQC_CAPS_VMDQ 0x0014
#define ICE_AQC_CAPS_802_1QBG 0x0015
#define ICE_AQC_CAPS_802_1BR 0x0016
#define ICE_AQC_CAPS_VSI 0x0017
@@ -184,6 +185,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_DYN_FLATTENING 0x008A
#define ICE_AQC_CAPS_OROM_RECOVERY_UPDATE 0x0090
#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
+#define ICE_AQC_BIT_ROCEV2_LAG 0x01
+#define ICE_AQC_BIT_SRIOV_LAG 0x02
u8 major_ver;
u8 minor_ver;
@@ -358,6 +361,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
@@ -2198,6 +2203,14 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
+#define ICE_AQC_NVM_CMPO_MOD_ID 0x153
+
+/* Cage Max Power override NVM module */
+struct ice_aqc_nvm_cmpo {
+ __le16 length;
+#define ICE_AQC_NVM_CMPO_ENABLE BIT(8)
+ __le16 cages_cfg[8];
+};
/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
* type field is excluded from the section when reading and writing from
@@ -2509,11 +2522,13 @@ enum ice_lut_type {
ICE_LUT_VSI = 0,
ICE_LUT_PF = 1,
ICE_LUT_GLOBAL = 2,
- ICE_LUT_TYPE_MASK = 3
+ ICE_LUT_TYPE_MASK = 3,
+ ICE_LUT_PF_SMALL = 5, /* yields ICE_LUT_PF when &= ICE_LUT_TYPE_MASK */
};
enum ice_lut_size {
ICE_LUT_VSI_SIZE = 64,
+ ICE_LUT_PF_SMALL_SIZE = 128,
ICE_LUT_GLOBAL_SIZE = 512,
ICE_LUT_PF_SIZE = 2048,
};
@@ -2796,7 +2811,7 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
- u8 cluster_id;
+ __le16 cluster_id; /* Expresses next cluster ID in response */
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
@@ -2809,7 +2824,7 @@ struct ice_aqc_debug_dump_internals {
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
- u8 reserved;
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_MNG_TRANSACTIONS 22
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
__le32 addr_high;
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index c7bf3384675e..499ee41228c3 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -402,7 +402,7 @@ static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size)
}
/**
- * ice_cp_bitmap - copy bitmaps.
+ * ice_cp_bitmap - copy bitmaps
* @dst: bitmap destination
* @src: bitmap to copy from
* @size: Size of the bitmaps in bits
@@ -460,7 +460,7 @@ ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
}
/**
- * ice_cmp_bitmap - compares two bitmaps.
+ * ice_cmp_bitmap - compares two bitmaps
* @bmp1: the bitmap to compare
* @bmp2: the bitmap to compare with bmp1
* @size: Size of the bitmaps in bits
diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c
index 73249cb7add4..ef487bcfd0f4 100644
--- a/sys/dev/ice/ice_common.c
+++ b/sys/dev/ice/ice_common.c
@@ -2319,6 +2319,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
+ case ICE_AQC_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %d\n", prefix, caps->vmdq);
+ break;
case ICE_AQC_CAPS_802_1QBG:
caps->evb_802_1_qbg = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
@@ -2404,7 +2408,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
break;
case ICE_AQC_CAPS_ROCEV2_LAG:
- caps->roce_lag = (number == 1);
+ caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
prefix, caps->roce_lag);
break;
@@ -2726,6 +2730,10 @@ ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
+ ice_info(hw, "PF is configured in %s mode with IP instance ID %d\n",
+ (dev_p->nac_topo.mode == 0) ? "primary" : "secondary",
+ dev_p->nac_topo.id);
+
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
@@ -3060,7 +3068,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
- * ice_aq_set_port_params - set physical port parameters.
+ * ice_aq_set_port_params - set physical port parameters
* @pi: pointer to the port info struct
* @bad_frame_vsi: defines the VSI to which bad frames are forwarded
* @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
@@ -4058,6 +4066,8 @@ static u16 ice_lut_type_to_size(u16 lut_type)
return ICE_LUT_GLOBAL_SIZE;
case ICE_LUT_PF:
return ICE_LUT_PF_SIZE;
+ case ICE_LUT_PF_SMALL:
+ return ICE_LUT_PF_SMALL_SIZE;
default:
return 0;
}
@@ -4089,6 +4099,8 @@ int ice_lut_size_to_type(int lut_size)
return ICE_LUT_GLOBAL;
case ICE_LUT_PF_SIZE:
return ICE_LUT_PF;
+ case ICE_LUT_PF_SMALL_SIZE:
+ return ICE_LUT_PF_SMALL;
default:
return -1;
}
@@ -4116,8 +4128,8 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
vsi_handle = params->vsi_handle;
lut = params->lut;
- lut_type = params->lut_type;
- lut_size = ice_lut_type_to_size(lut_type);
+ lut_size = ice_lut_type_to_size(params->lut_type);
+ lut_type = params->lut_type & ICE_LUT_TYPE_MASK;
cmd_resp = &desc.params.get_set_rss_lut;
if (lut_type == ICE_LUT_GLOBAL)
glob_lut_idx = params->global_lut_id;
@@ -4773,6 +4785,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
* @buf: dump buffer
* @buf_size: dump buffer size
* @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
* @ret_next_table: next block to read (returned by FW)
* @ret_next_index: next index to read (returned by FW)
* @cd: pointer to command details structure
@@ -4780,10 +4793,10 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
* Get internal FW/HW data (0xFF08) for debug purposes.
*/
enum ice_status
-ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
+ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
- u16 *ret_next_table, u32 *ret_next_index,
- struct ice_sq_cd *cd)
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index, struct ice_sq_cd *cd)
{
struct ice_aqc_debug_dump_internals *cmd;
struct ice_aq_desc desc;
@@ -4796,7 +4809,7 @@ ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
- cmd->cluster_id = cluster_id;
+ cmd->cluster_id = CPU_TO_LE16(cluster_id);
cmd->table_id = CPU_TO_LE16(table_id);
cmd->idx = CPU_TO_LE32(start);
@@ -4805,6 +4818,8 @@ ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
if (!status) {
if (ret_buf_size)
*ret_buf_size = LE16_TO_CPU(desc.datalen);
+ if (ret_next_cluster)
+ *ret_next_cluster = LE16_TO_CPU(cmd->cluster_id);
if (ret_next_table)
*ret_next_table = LE16_TO_CPU(cmd->table_id);
if (ret_next_index)
@@ -6051,7 +6066,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*/
enum ice_status
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
@@ -6183,8 +6198,6 @@ static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
if (hw->fw_min_ver == min && hw->fw_patch >= patch)
return true;
}
- } else if (hw->fw_branch > branch) {
- return true;
}
return false;
@@ -6591,10 +6604,14 @@ u32 ice_get_link_speed(u16 index)
*/
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
{
- return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
+ return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
ICE_FW_FEC_DIS_AUTO_MAJ,
ICE_FW_FEC_DIS_AUTO_MIN,
- ICE_FW_FEC_DIS_AUTO_PATCH);
+ ICE_FW_FEC_DIS_AUTO_PATCH) ||
+ ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E82X,
+ ICE_FW_FEC_DIS_AUTO_MAJ_E82X,
+ ICE_FW_FEC_DIS_AUTO_MIN_E82X,
+ ICE_FW_FEC_DIS_AUTO_PATCH_E82X);
}
/**
diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h
index 024f6f4cdb01..3abfba874b9c 100644
--- a/sys/dev/ice/ice_common.h
+++ b/sys/dev/ice/ice_common.h
@@ -88,10 +88,10 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
enum ice_status
-ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
+ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
- u16 *ret_next_table, u32 *ret_next_index,
- struct ice_sq_cd *cd);
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index, struct ice_sq_cd *cd);
enum ice_status ice_set_mac_type(struct ice_hw *hw);
@@ -352,7 +352,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c
index 09816d4a812a..8aa2a7f765a2 100644
--- a/sys/dev/ice/ice_controlq.c
+++ b/sys/dev/ice/ice_controlq.c
@@ -482,7 +482,7 @@ shutdown_sq_out:
}
/**
- * ice_aq_ver_check - Check the reported AQ API version.
+ * ice_aq_ver_check - Check the reported AQ API version
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
@@ -1037,12 +1037,18 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (cq->sq.next_to_use == cq->sq.count)
cq->sq.next_to_use = 0;
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
+ ice_flush(hw);
+
+ /* Wait a short time before initial ice_sq_done() check, to allow
+ * hardware time for completion.
+ */
+ ice_usec_delay(5, false);
do {
if (ice_sq_done(hw, cq))
break;
- ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
+ ice_usec_delay(10, false);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h
index 047dd1beaf60..d48d53a37161 100644
--- a/sys/dev/ice/ice_controlq.h
+++ b/sys/dev/ice/ice_controlq.h
@@ -60,8 +60,7 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
-#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 100000 /* Count 100000 times */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
index ff13fe99ee27..8e1200e08a64 100644
--- a/sys/dev/ice/ice_drv_info.h
+++ b/sys/dev/ice/ice_drv_info.h
@@ -62,16 +62,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
-const char ice_driver_version[] = "1.37.11-k";
+const char ice_driver_version[] = "1.39.13-k";
const uint8_t ice_major_version = 1;
-const uint8_t ice_minor_version = 37;
-const uint8_t ice_patch_version = 11;
+const uint8_t ice_minor_version = 39;
+const uint8_t ice_patch_version = 13;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
- PVID(vendor, devid, name " - 1.37.11-k")
+ PVID(vendor, devid, name " - 1.39.13-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
- PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.11-k")
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.39.13-k")
/**
* @var ice_vendor_info_array
diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h
index b58af574976d..03b8c63af291 100644
--- a/sys/dev/ice/ice_features.h
+++ b/sys/dev/ice/ice_features.h
@@ -89,7 +89,6 @@ enum feat_list {
static inline void
ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
{
- ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
#ifndef DEV_NETMAP
ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
#endif
diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c
index 943b37746f9d..f103e2aa6e71 100644
--- a/sys/dev/ice/ice_flex_pipe.c
+++ b/sys/dev/ice/ice_flex_pipe.c
@@ -672,6 +672,7 @@ enum ice_status ice_replay_tunnels(struct ice_hw *hw)
if (status) {
ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n",
status, port);
+ hw->tnl.tbl[i].ref = refs;
break;
}
diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c
index 0cf37159b251..c04f86445767 100644
--- a/sys/dev/ice/ice_flow.c
+++ b/sys/dev/ice/ice_flow.c
@@ -404,12 +404,11 @@ struct ice_flow_prof_params {
};
#define ICE_FLOW_SEG_HDRS_L3_MASK \
- (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
- ICE_FLOW_SEG_HDR_ARP)
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
ICE_FLOW_SEG_HDR_SCTP)
-/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
+/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
@@ -483,15 +482,13 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
- src = i ?
- (const ice_bitmap_t *)ice_ptypes_ipv4_il :
+ src = i ? (const ice_bitmap_t *)ice_ptypes_ipv4_il :
(const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
- src = i ?
- (const ice_bitmap_t *)ice_ptypes_ipv6_il :
+ src = i ? (const ice_bitmap_t *)ice_ptypes_ipv6_il :
(const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
@@ -645,8 +642,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
case ICE_FLOW_FIELD_IDX_ICMP_CODE:
/* ICMP type and code share the same extraction seq. entry */
- prot_id = (params->prof->segs[seg].hdrs &
- ICE_FLOW_SEG_HDR_IPV4) ?
+ prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
ICE_FLOW_FIELD_IDX_ICMP_CODE :
@@ -1301,20 +1297,20 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
/* set outer most header */
if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_FRAG |
- ICE_FLOW_SEG_HDR_IPV_OTHER;
+ ICE_FLOW_SEG_HDR_IPV_FRAG |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_FRAG |
- ICE_FLOW_SEG_HDR_IPV_OTHER;
+ ICE_FLOW_SEG_HDR_IPV_FRAG |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_GRE |
- ICE_FLOW_SEG_HDR_IPV_OTHER;
+ ICE_FLOW_SEG_HDR_GRE |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_GRE |
- ICE_FLOW_SEG_HDR_IPV_OTHER;
+ ICE_FLOW_SEG_HDR_GRE |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
return ICE_ERR_PARAM;
@@ -1418,11 +1414,14 @@ ice_get_rss_hdr_type(struct ice_flow_prof *prof)
if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
hdr_type = ICE_RSS_OUTER_HEADERS;
} else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
- if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
+ const struct ice_flow_seg_info *s;
+
+ s = &prof->segs[ICE_RSS_OUTER_HEADERS];
+ if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
hdr_type = ICE_RSS_INNER_HEADERS;
- if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
- if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
}
@@ -1529,13 +1528,14 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
* [62:63] - Encapsulation flag:
* 0 if non-tunneled
* 1 if tunneled
- * 2 for tunneled with outer ipv4
- * 3 for tunneled with outer ipv6
+ * 2 for tunneled with outer IPv4
+ * 3 for tunneled with outer IPv6
*/
-#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
- ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
+ ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
- (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
+ (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & \
+ ICE_FLOW_PROF_ENCAP_M)))
/**
* ice_add_rss_cfg_sync - add an RSS configuration
@@ -1559,7 +1559,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
return ICE_ERR_PARAM;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
- ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
+ ICE_FLOW_SEG_SINGLE :
+ ICE_FLOW_SEG_MAX;
segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
sizeof(*segs));
@@ -1663,18 +1664,16 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
struct ice_rss_hash_cfg local_cfg;
enum ice_status status;
- if (!ice_is_vsi_valid(hw, vsi_handle) ||
- !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
+ cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return ICE_ERR_PARAM;
+ ice_acquire_lock(&hw->rss_locks);
local_cfg = *cfg;
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
- ice_acquire_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
- ice_release_lock(&hw->rss_locks);
} else {
- ice_acquire_lock(&hw->rss_locks);
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
@@ -1682,8 +1681,8 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
status = ice_add_rss_cfg_sync(hw, vsi_handle,
&local_cfg);
}
- ice_release_lock(&hw->rss_locks);
}
+ ice_release_lock(&hw->rss_locks);
return status;
}
@@ -1707,7 +1706,8 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
u8 segs_cnt;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
- ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
+ ICE_FLOW_SEG_SINGLE :
+ ICE_FLOW_SEG_MAX;
segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
sizeof(*segs));
if (!segs)
@@ -1762,8 +1762,8 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
struct ice_rss_hash_cfg local_cfg;
enum ice_status status;
- if (!ice_is_vsi_valid(hw, vsi_handle) ||
- !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
+ cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return ICE_ERR_PARAM;
@@ -1774,7 +1774,6 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
} else {
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
-
if (!status) {
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle,
diff --git a/sys/dev/ice/ice_flow.h b/sys/dev/ice/ice_flow.h
index 0857b325ca7c..31c369c144e0 100644
--- a/sys/dev/ice/ice_flow.h
+++ b/sys/dev/ice/ice_flow.h
@@ -188,14 +188,14 @@ enum ice_flow_avf_hdr_field {
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
- /* take inner headers as inputset for packet with outer ipv4. */
+ /* take inner headers as inputset for packet with outer IPv4. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
- /* take inner headers as inputset for packet with outer ipv6. */
+ /* take inner headers as inputset for packet with outer IPv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
- /* take inner as inputset for GTPoGRE with outer ipv4 + gre. */
+ /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
- /* take inner as inputset for GTPoGRE with outer ipv6 + gre. */
+ /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};
diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h
index 6b04bff4b722..8e59ebc76835 100644
--- a/sys/dev/ice/ice_hw_autogen.h
+++ b/sys/dev/ice/ice_hw_autogen.h
@@ -5476,6 +5476,7 @@
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1)
#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GL_MNG_FWSM_FW_MODES_S 0
#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
#define GL_MNG_FWSM_RSV0_S 3
diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h
index e9e09851b82c..4ac5fffe5b7e 100644
--- a/sys/dev/ice/ice_iflib.h
+++ b/sys/dev/ice/ice_iflib.h
@@ -193,6 +193,29 @@ struct ice_rx_queue {
};
/**
+ * @struct ice_mirr_if
+ * @brief structure representing a mirroring interface
+ */
+struct ice_mirr_if {
+ struct ice_softc *back;
+ struct ifnet *ifp;
+ struct ice_vsi *vsi;
+
+ device_t subdev;
+ if_ctx_t subctx;
+ if_softc_ctx_t subscctx;
+
+ u16 num_irq_vectors;
+ u16 *if_imap;
+ u16 *os_imap;
+ struct ice_irq_vector *rx_irqvs;
+
+ u32 state;
+
+ bool if_attached;
+};
+
+/**
* @struct ice_softc
* @brief main structure representing one device
*
@@ -262,7 +285,7 @@ struct ice_softc {
struct ice_resmgr rx_qmgr;
/* Interrupt allocation manager */
- struct ice_resmgr imgr;
+ struct ice_resmgr dev_imgr;
u16 *pf_imap;
int lan_vectors;
@@ -302,7 +325,7 @@ struct ice_softc {
/* NVM link override settings */
struct ice_link_default_override_tlv ldo_tlv;
- u16 fw_debug_dump_cluster_mask;
+ u32 fw_debug_dump_cluster_mask;
struct sx *iflib_ctx_lock;
@@ -310,6 +333,11 @@ struct ice_softc {
ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT);
ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT);
+ struct ice_resmgr os_imgr;
+ /* For mirror interface */
+ struct ice_mirr_if *mirr_if;
+ int extra_vectors;
+ int last_rid;
};
#endif /* _ICE_IFLIB_H_ */
diff --git a/sys/dev/ice/ice_iflib_txrx.c b/sys/dev/ice/ice_iflib_txrx.c
index f3087c09702a..a090717e7c5e 100644
--- a/sys/dev/ice/ice_iflib_txrx.c
+++ b/sys/dev/ice/ice_iflib_txrx.c
@@ -45,6 +45,18 @@
#include "ice_common_txrx.h"
/*
+ * Driver private implementations
+ */
+static int _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi);
+static int _ice_ift_txd_credits_update(struct ice_softc *sc, struct ice_tx_queue *txq, bool clear);
+static int _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget);
+static int _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri);
+static void _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
+ uint64_t *paddrs, uint16_t count);
+static void _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq,
+ uint32_t pidx);
+
+/*
* iflib txrx method declarations
*/
static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
@@ -55,6 +67,13 @@ static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
+static int ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear);
+static int ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi);
+static void ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx);
+static int ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
+static int ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri);
+static void ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru);
+static void ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
* advanced 32byte Rx descriptors.
@@ -82,8 +101,27 @@ struct if_txrx ice_txrx = {
};
/**
- * ice_ift_txd_encap - prepare Tx descriptors for a packet
- * @arg: the iflib softc structure pointer
+ * @var ice_subif_txrx
+ * @brief Tx/Rx operations for the iflib stack, for subinterfaces
+ *
+ * Structure defining the Tx and Rx related operations that iflib can request
+ * the subinterface driver to perform. These are the main entry points for the
+ * hot path of the transmit and receive paths in the iflib driver.
+ */
+struct if_txrx ice_subif_txrx = {
+ .ift_txd_credits_update = ice_ift_txd_credits_update_subif,
+ .ift_txd_encap = ice_ift_txd_encap_subif,
+ .ift_txd_flush = ice_ift_txd_flush_subif,
+ .ift_rxd_available = ice_ift_rxd_available_subif,
+ .ift_rxd_pkt_get = ice_ift_rxd_pkt_get_subif,
+ .ift_rxd_refill = ice_ift_rxd_refill_subif,
+ .ift_rxd_flush = ice_ift_rxd_flush_subif,
+ .ift_txq_select_v2 = NULL,
+};
+
+/**
+ * _ice_ift_txd_encap - prepare Tx descriptors for a packet
+ * @txq: driver's TX queue context
* @pi: packet info
*
* Prepares and encapsulates the given packet into into Tx descriptors, in
@@ -94,10 +132,8 @@ struct if_txrx ice_txrx = {
* Return 0 on success, non-zero error code on failure.
*/
static int
-ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
+_ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi)
{
- struct ice_softc *sc = (struct ice_softc *)arg;
- struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
struct ice_tx_desc *txd = NULL;
@@ -157,6 +193,27 @@ ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
}
/**
+ * ice_ift_txd_encap - prepare Tx descriptors for a packet
+ * @arg: the iflib softc structure pointer
+ * @pi: packet info
+ *
+ * Prepares and encapsulates the given packet into Tx descriptors, in
+ * preparation for sending to the transmit engine. Sets the necessary context
+ * descriptors for TSO and other offloads, and prepares the last descriptor
+ * for the writeback status.
+ *
+ * Return 0 on success, non-zero error code on failure.
+ */
+static int
+ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
+
+ return _ice_ift_txd_encap(txq, pi);
+}
+
+/**
* ice_ift_txd_flush - Flush Tx descriptors to hardware
* @arg: device specific softc pointer
* @txqid: the Tx queue to flush
@@ -176,9 +233,9 @@ ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
}
/**
- * ice_ift_txd_credits_update - cleanup Tx descriptors
- * @arg: device private softc
- * @txqid: the Tx queue to update
+ * _ice_ift_txd_credits_update - cleanup Tx descriptors
+ * @sc: device private softc
+ * @txq: the Tx queue to update
* @clear: if false, only report, do not actually clean
*
* If clear is false, iflib is asking if we *could* clean up any Tx
@@ -186,13 +243,12 @@ ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
*
* If clear is true, iflib is requesting to cleanup and reclaim used Tx
* descriptors.
+ *
+ * Called by other txd_credits_update functions passed to iflib.
*/
static int
-ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+_ice_ift_txd_credits_update(struct ice_softc *sc __unused, struct ice_tx_queue *txq, bool clear)
{
- struct ice_softc *sc = (struct ice_softc *)arg;
- struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
-
qidx_t processed = 0;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
@@ -235,9 +291,28 @@ ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
}
/**
- * ice_ift_rxd_available - Return number of available Rx packets
+ * ice_ift_txd_credits_update - cleanup PF VSI Tx descriptors
* @arg: device private softc
- * @rxqid: the Rx queue id
+ * @txqid: the Tx queue to update
+ * @clear: if false, only report, do not actually clean
+ *
+ * Wrapper for _ice_ift_txd_credits_update() meant for TX queues that
+ * belong to the PF VSI.
+ *
+ * @see _ice_ift_txd_credits_update()
+ */
+static int
+ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
+
+ return _ice_ift_txd_credits_update(sc, txq, clear);
+}
+
+/**
+ * _ice_ift_rxd_available - Return number of available Rx packets
+ * @rxq: RX queue driver structure
* @pidx: descriptor start point
* @budget: maximum Rx budget
*
@@ -245,10 +320,8 @@ ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
* of the given budget.
*/
static int
-ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
+_ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget)
{
- struct ice_softc *sc = (struct ice_softc *)arg;
- struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
union ice_32b_rx_flex_desc *rxd;
uint16_t status0;
int cnt, i, nrxd;
@@ -271,20 +344,53 @@ ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
}
/**
+ * ice_ift_rxd_available - Return number of available Rx packets
+ * @arg: device private softc
+ * @rxqid: the Rx queue id
+ * @pidx: descriptor start point
+ * @budget: maximum Rx budget
+ *
+ * Wrapper for _ice_ift_rxd_available() that provides a function pointer
+ * that iflib requires for RX processing.
+ */
+static int
+ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
+
+ return _ice_ift_rxd_available(rxq, pidx, budget);
+}
+
+/**
* ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
* @arg: device specific softc
* @ri: receive packet info
*
- * This function is called by iflib, and executes in ithread context. It is
- * called by iflib to obtain data which has been DMA'ed into host memory.
- * Returns zero on success, and EBADMSG on failure.
+ * Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer
+ * used by iflib for RX packet processing.
*/
static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
- if_softc_ctx_t scctx = sc->scctx;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
+
+ return _ice_ift_rxd_pkt_get(rxq, ri);
+}
+
+/**
+ * _ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
+ * @rxq: RX queue driver structure
+ * @ri: receive packet info
+ *
+ * This function is called by iflib, and executes in ithread context. It is
+ * called by iflib to obtain data which has been DMA'ed into host memory.
+ * Returns zero on success, and EBADMSG on failure.
+ */
+static int
+_ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri)
+{
union ice_32b_rx_flex_desc *cur;
u16 status0, plen, ptype;
bool eop;
@@ -341,7 +447,7 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Get packet type and set checksum flags */
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
- if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
+ if ((if_getcapenable(ri->iri_ifp) & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
@@ -357,16 +463,14 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
* @arg: device specific softc structure
* @iru: the Rx descriptor update structure
*
- * Update the Rx descriptor indices for a given queue, assigning new physical
- * addresses to the descriptors, preparing them for re-use by the hardware.
+ * Wrapper function for _ice_ift_rxd_refill() that provides a function pointer
+ * used by iflib for RX packet processing.
*/
static void
ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq;
- uint32_t next_pidx;
- int i;
uint64_t *paddrs;
uint32_t pidx;
uint16_t qsidx, count;
@@ -378,6 +482,26 @@ ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
rxq = &(sc->pf_vsi.rx_queues[qsidx]);
+ _ice_ift_rxd_refill(rxq, pidx, paddrs, count);
+}
+
+/**
+ * _ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
+ * @rxq: RX queue driver structure
+ * @pidx: first index to refill
+ * @paddrs: physical addresses to use
+ * @count: number of descriptors to refill
+ *
+ * Update the Rx descriptor indices for a given queue, assigning new physical
+ * addresses to the descriptors, preparing them for re-use by the hardware.
+ */
+static void
+_ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
+ uint64_t *paddrs, uint16_t count)
+{
+ uint32_t next_pidx;
+ int i;
+
for (i = 0, next_pidx = pidx; i < count; i++) {
rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
if (++next_pidx == (uint32_t)rxq->desc_count)
@@ -392,8 +516,8 @@ ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
* @flidx: unused parameter
* @pidx: descriptor index to advance tail to
*
- * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
- * software is done with the descriptor and it can be recycled.
+ * Wrapper function for _ice_ift_rxd_flush() that provides a function pointer
+ * used by iflib for RX packet processing.
*/
static void
ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
@@ -401,11 +525,36 @@ ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
- struct ice_hw *hw = &sc->hw;
- wr32(hw, rxq->tail, pidx);
+ _ice_ift_rxd_flush(sc, rxq, (uint32_t)pidx);
+}
+
+/**
+ * _ice_ift_rxd_flush - Flush Rx descriptors to hardware
+ * @sc: device specific softc pointer
+ * @rxq: RX queue driver structure
+ * @pidx: descriptor index to advance tail to
+ *
+ * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
+ * software is done with the descriptor and it can be recycled.
+ */
+static void
+_ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq, uint32_t pidx)
+{
+ wr32(&sc->hw, rxq->tail, pidx);
}
+/**
+ * ice_ift_queue_select - Select queue index to transmit packet on
+ * @arg: device specific softc
+ * @m: transmit packet data
+ * @pi: transmit packet metadata
+ *
+ * Called by iflib to determine which queue index to transmit the packet
+ * pointed to by @m on. In particular, ensures packets go out on the right
+ * queue index for the right transmit class when multiple traffic classes are
+ * enabled in the driver.
+ */
static qidx_t
ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
{
@@ -456,3 +605,146 @@ ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
else
return (tc_base_queue);
}
+
+/**
+ * ice_ift_txd_credits_update_subif - cleanup subinterface VSI Tx descriptors
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @txqid: the Tx queue to update
+ * @clear: if false, only report, do not actually clean
+ *
+ * Wrapper for _ice_ift_txd_credits_update() meant for TX queues that
+ * do not belong to the PF VSI.
+ *
+ * See _ice_ift_txd_credits_update().
+ */
+static int
+ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_softc *sc = mif->back;
+ struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
+
+ return _ice_ift_txd_credits_update(sc, txq, clear);
+}
+
+/**
+ * ice_ift_txd_encap_subif - prepare Tx descriptors for a packet
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @pi: packet info
+ *
+ * Wrapper for _ice_ift_txd_encap_subif() meant for TX queues that
+ * do not belong to the PF VSI.
+ *
+ * See _ice_ift_txd_encap_subif().
+ */
+static int
+ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_tx_queue *txq = &mif->vsi->tx_queues[pi->ipi_qsidx];
+
+ return _ice_ift_txd_encap(txq, pi);
+}
+
+/**
+ * ice_ift_txd_flush_subif - Flush Tx descriptors to hardware
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @txqid: the Tx queue to flush
+ * @pidx: descriptor index to advance tail to
+ *
+ * Advance the Transmit Descriptor Tail (TDT). Functionally identical to
+ * the ice_ift_txd_encap() meant for the main PF VSI, but provides a function
+ * pointer to iflib for use with non-main-PF VSI TX queues.
+ */
+static void
+ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
+ struct ice_hw *hw = &mif->back->hw;
+
+ wr32(hw, txq->tail, pidx);
+}
+
+/**
+ * ice_ift_rxd_available_subif - Return number of available Rx packets
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @rxqid: the Rx queue id
+ * @pidx: descriptor start point
+ * @budget: maximum Rx budget
+ *
+ * Determines how many Rx packets are available on the queue, up to a maximum
+ * of the given budget.
+ *
+ * See _ice_ift_rxd_available().
+ */
+static int
+ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
+
+ return _ice_ift_rxd_available(rxq, pidx, budget);
+}
+
+/**
+ * ice_ift_rxd_pkt_get_subif - Called by iflib to send data to upper layer
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @ri: receive packet info
+ *
+ * Wrapper function for _ice_ift_rxd_pkt_get() that provides a function pointer
+ * used by iflib for RX packet processing, for iflib subinterfaces.
+ */
+static int
+ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_rx_queue *rxq = &mif->vsi->rx_queues[ri->iri_qsidx];
+
+ return _ice_ift_rxd_pkt_get(rxq, ri);
+}
+
+/**
+ * ice_ift_rxd_refill_subif - Prepare Rx descriptors for re-use by hardware
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @iru: the Rx descriptor update structure
+ *
+ * Wrapper function for _ice_ift_rxd_refill() that provides a function pointer
+ * used by iflib for RX packet processing, for iflib subinterfaces.
+ */
+static void
+ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_rx_queue *rxq = &mif->vsi->rx_queues[iru->iru_qsidx];
+
+ uint64_t *paddrs;
+ uint32_t pidx;
+ uint16_t count;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ count = iru->iru_count;
+
+ _ice_ift_rxd_refill(rxq, pidx, paddrs, count);
+}
+
+/**
+ * ice_ift_rxd_flush_subif - Flush Rx descriptors to hardware
+ * @arg: subinterface private structure (struct ice_mirr_if)
+ * @rxqid: the Rx queue to flush
+ * @flidx: unused parameter
+ * @pidx: descriptor index to advance tail to
+ *
+ * Wrapper function for _ice_ift_rxd_flush() that provides a function pointer
+ * used by iflib for RX packet processing.
+ */
+static void
+ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx __unused,
+ qidx_t pidx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
+ struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
+
+ _ice_ift_rxd_flush(mif->back, rxq, pidx);
+}
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index 114d2a319669..659412450fce 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -179,8 +179,9 @@ ice_add_dscp2tc_map_sysctls(struct ice_softc *sc,
static void ice_set_default_local_mib_settings(struct ice_softc *sc);
static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg);
static void ice_start_dcbx_agent(struct ice_softc *sc);
-static void ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
- struct sbuf *sbuf, u16 cluster_id);
+static u16 ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
+ struct sbuf *sbuf, u16 cluster_id);
+static void ice_remove_vsi_mirroring(struct ice_vsi *vsi);
static int ice_module_init(void);
static int ice_module_exit(void);
@@ -242,6 +243,8 @@ static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_debug_set_link(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_temperature(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_create_mirror_interface(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_destroy_mirror_interface(SYSCTL_HANDLER_ARGS);
/**
* ice_map_bar - Map PCIe BAR memory
@@ -355,6 +358,10 @@ ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi,
sc->all_vsi[idx] = vsi;
vsi->dynamic = dynamic;
+ /* Set default mirroring rule information */
+ vsi->rule_mir_ingress = ICE_INVAL_MIRROR_RULE_ID;
+ vsi->rule_mir_egress = ICE_INVAL_MIRROR_RULE_ID;
+
/* Setup the VSI tunables now */
ice_add_vsi_tunables(vsi, sc->vsi_sysctls);
}
@@ -382,7 +389,7 @@ ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type)
return NULL;
}
- vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO);
+ vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_NOWAIT | M_ZERO);
if (!vsi) {
device_printf(sc->dev, "Unable to allocate VSI memory\n");
return NULL;
@@ -550,6 +557,7 @@ ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_VF:
+ case ICE_VSI_VMDQ2:
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
@@ -571,6 +579,8 @@ ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type)
*
* Configures the context for the given VSI, setting up how the firmware
* should map the queues for this VSI.
+ *
+ * @pre vsi->qmap_type is set to a valid type
*/
static int
ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
@@ -621,10 +631,95 @@ ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
}
/**
+ * ice_setup_vsi_mirroring -- Setup a VSI for mirroring PF VSI traffic
+ * @vsi: VSI to setup
+ *
+ * @pre vsi->mirror_src_vsi is set to the SW VSI num that traffic is to be
+ * mirrored from
+ *
+ * Returns 0 on success, EINVAL on failure.
+ */
+int
+ice_setup_vsi_mirroring(struct ice_vsi *vsi)
+{
+ struct ice_mir_rule_buf rule = { };
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u16 rule_id, dest_vsi;
+ u16 count = 1;
+
+ rule.vsi_idx = ice_get_hw_vsi_num(hw, vsi->mirror_src_vsi);
+ rule.add = true;
+
+ dest_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ rule_id = ICE_INVAL_MIRROR_RULE_ID;
+ status = ice_aq_add_update_mir_rule(hw, ICE_AQC_RULE_TYPE_VPORT_INGRESS,
+ dest_vsi, count, &rule, NULL,
+ &rule_id);
+ if (status) {
+ device_printf(dev,
+ "Could not add INGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n",
+ rule.vsi_idx, dest_vsi, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EINVAL);
+ }
+
+ vsi->rule_mir_ingress = rule_id;
+
+ rule_id = ICE_INVAL_MIRROR_RULE_ID;
+ status = ice_aq_add_update_mir_rule(hw, ICE_AQC_RULE_TYPE_VPORT_EGRESS,
+ dest_vsi, count, &rule, NULL, &rule_id);
+ if (status) {
+ device_printf(dev,
+ "Could not add EGRESS rule for mirror vsi %d to vsi %d, err %s aq_err %s\n",
+ rule.vsi_idx, dest_vsi, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EINVAL);
+ }
+
+ vsi->rule_mir_egress = rule_id;
+
+ return (0);
+}
+
+/**
+ * ice_remove_vsi_mirroring -- Teardown any VSI mirroring rules
+ * @vsi: VSI to remove mirror rules from
+ */
+static void
+ice_remove_vsi_mirroring(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ enum ice_status status = ICE_SUCCESS;
+ bool keep_alloc = false;
+
+ if (vsi->rule_mir_ingress != ICE_INVAL_MIRROR_RULE_ID)
+ status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_ingress, keep_alloc, NULL);
+
+ if (status)
+ device_printf(vsi->sc->dev, "Could not remove mirror VSI ingress rule, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ status = ICE_SUCCESS;
+
+ if (vsi->rule_mir_egress != ICE_INVAL_MIRROR_RULE_ID)
+ status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_egress, keep_alloc, NULL);
+
+ if (status)
+ device_printf(vsi->sc->dev, "Could not remove mirror VSI egress rule, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+}
+
+/**
* ice_initialize_vsi - Initialize a VSI for use
* @vsi: the vsi to initialize
*
* Initialize a VSI over the adminq and prepare it for operation.
+ *
+ * @pre vsi->num_tx_queues is set
+ * @pre vsi->num_rx_queues is set
*/
int
ice_initialize_vsi(struct ice_vsi *vsi)
@@ -640,6 +735,9 @@ ice_initialize_vsi(struct ice_vsi *vsi)
case ICE_VSI_PF:
ctx.flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_VMDQ2:
+ ctx.flags = ICE_AQ_VSI_TYPE_VMDQ2;
+ break;
default:
return (ENODEV);
}
@@ -754,6 +852,9 @@ ice_release_vsi(struct ice_vsi *vsi)
ice_del_vsi_sysctl_ctx(vsi);
+ /* Remove the configured mirror rule, if it exists */
+ ice_remove_vsi_mirroring(vsi);
+
/*
* If we unload the driver after a reset fails, we do not need to do
* this step.
@@ -1264,6 +1365,10 @@ ice_configure_all_rxq_interrupts(struct ice_vsi *vsi)
ice_configure_rxq_interrupt(hw, vsi->rx_qmap[rxq->me],
rxq->irqv->me, ICE_RX_ITR);
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "RXQ(%d) intr enable: me %d rxqid %d vector %d\n",
+ i, rxq->me, vsi->rx_qmap[rxq->me], rxq->irqv->me);
}
ice_flush(hw);
@@ -1462,6 +1567,9 @@ ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
+ case ICE_VSI_VMDQ2:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return (ENODEV);
}
@@ -4458,6 +4566,14 @@ ice_add_device_sysctls(struct ice_softc *sc)
OID_AUTO, "link_active_on_if_down", CTLTYPE_U8 | CTLFLAG_RWTUN,
sc, 0, ice_sysctl_set_link_active, "CU", ICE_SYSCTL_HELP_SET_LINK_ACTIVE);
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "create_mirror_interface", CTLTYPE_STRING | CTLFLAG_RW,
+ sc, 0, ice_sysctl_create_mirror_interface, "A", "");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "destroy_mirror_interface", CTLTYPE_STRING | CTLFLAG_RW,
+ sc, 0, ice_sysctl_destroy_mirror_interface, "A", "");
+
ice_add_dscp2tc_map_sysctls(sc, ctx, ctx_list);
/* Differentiate software and hardware statistics, by keeping hw stats
@@ -6247,16 +6363,18 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
return (0);
}
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID (0xFFFFFF)
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \
"\nSelect clusters to dump with \"dump\" sysctl" \
"\nFlags:" \
-"\n\t 0x1 - Switch" \
-"\n\t 0x2 - ACL" \
-"\n\t 0x4 - Tx Scheduler" \
-"\n\t 0x8 - Profile Configuration" \
-"\n\t 0x20 - Link" \
-"\n\t 0x80 - DCB" \
-"\n\t 0x100 - L2P" \
+"\n\t 0x1 - Switch" \
+"\n\t 0x2 - ACL" \
+"\n\t 0x4 - Tx Scheduler" \
+"\n\t 0x8 - Profile Configuration" \
+"\n\t 0x20 - Link" \
+"\n\t 0x80 - DCB" \
+"\n\t 0x100 - L2P" \
+"\n\t 0x400000 - Manageability Transactions" \
"\n\t" \
"\nUse \"sysctl -x\" to view flags properly."
@@ -6273,7 +6391,7 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
device_t dev = sc->dev;
- u16 clusters;
+ u32 clusters;
int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -6287,15 +6405,15 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
clusters = sc->fw_debug_dump_cluster_mask;
- ret = sysctl_handle_16(oidp, &clusters, 0, req);
+ ret = sysctl_handle_32(oidp, &clusters, 0, req);
if ((ret) || (req->newptr == NULL))
return (ret);
- if (!clusters ||
- (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK))) {
+ if (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK)) {
device_printf(dev,
"%s: ERROR: Incorrect settings requested\n",
__func__);
+ sc->fw_debug_dump_cluster_mask = ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID;
return (EINVAL);
}
@@ -6319,7 +6437,7 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
* @remark Only intended to be used by the sysctl handler
* ice_sysctl_fw_debug_dump_do_dump
*/
-static void
+static u16
ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 cluster_id)
{
struct ice_hw *hw = &sc->hw;
@@ -6330,20 +6448,21 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
int counter = 0;
u8 *data_buf;
- /* Other setup */
- data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO);
- if (!data_buf)
- return;
-
/* Input parameters / loop variables */
u16 table_id = 0;
u32 offset = 0;
/* Output from the Get Internal Data AQ command */
u16 ret_buf_size = 0;
+ u16 ret_next_cluster = 0;
u16 ret_next_table = 0;
u32 ret_next_index = 0;
+ /* Other setup */
+ data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO);
+ if (!data_buf)
+ return ret_next_cluster;
+
ice_debug(hw, ICE_DBG_DIAG, "%s: dumping cluster id %d\n", __func__,
cluster_id);
@@ -6363,7 +6482,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
status = ice_aq_get_internal_data(hw, cluster_id, table_id,
offset, data_buf, data_buf_size, &ret_buf_size,
- &ret_next_table, &ret_next_index, NULL);
+ &ret_next_cluster, &ret_next_table, &ret_next_index, NULL);
if (status) {
device_printf(dev,
"%s: ice_aq_get_internal_data in cluster %d: err %s aq_err %s\n",
@@ -6429,6 +6548,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
}
free(data_buf, M_ICE);
+ return ret_next_cluster;
}
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \
@@ -6437,6 +6557,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
"\nthis data is opaque and not a string."
#define ICE_FW_DUMP_BASE_TEXT_SIZE (1024 * 1024)
+#define ICE_FW_DUMP_ALL_TEXT_SIZE (10 * 1024 * 1024)
#define ICE_FW_DUMP_CLUST0_TEXT_SIZE (2 * 1024 * 1024)
#define ICE_FW_DUMP_CLUST1_TEXT_SIZE (128 * 1024)
#define ICE_FW_DUMP_CLUST2_TEXT_SIZE (2 * 1024 * 1024)
@@ -6493,9 +6614,9 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
* sysctl read call.
*/
if (input_buf[0] == '1') {
- if (!sc->fw_debug_dump_cluster_mask) {
+ if (sc->fw_debug_dump_cluster_mask == ICE_AQC_DBG_DUMP_CLUSTER_ID_INVALID) {
device_printf(dev,
- "%s: Debug Dump failed because no cluster was specified with the \"clusters\" sysctl.\n",
+ "%s: Debug Dump failed because an invalid cluster was specified.\n",
__func__);
return (EINVAL);
}
@@ -6513,12 +6634,16 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
/* Caller just wants the upper bound for size */
if (req->oldptr == NULL && req->newptr == NULL) {
size_t est_output_len = ICE_FW_DUMP_BASE_TEXT_SIZE;
- if (sc->fw_debug_dump_cluster_mask & 0x1)
- est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE;
- if (sc->fw_debug_dump_cluster_mask & 0x2)
- est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE;
- if (sc->fw_debug_dump_cluster_mask & 0x4)
- est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE;
+ if (sc->fw_debug_dump_cluster_mask == 0)
+ est_output_len += ICE_FW_DUMP_ALL_TEXT_SIZE;
+ else {
+ if (sc->fw_debug_dump_cluster_mask & 0x1)
+ est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE;
+ if (sc->fw_debug_dump_cluster_mask & 0x2)
+ est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE;
+ if (sc->fw_debug_dump_cluster_mask & 0x4)
+ est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE;
+ }
ret = SYSCTL_OUT(req, 0, est_output_len);
return (ret);
@@ -6529,9 +6654,17 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
ice_debug(&sc->hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
- for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
- sizeof(sc->fw_debug_dump_cluster_mask) * 8)
- ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
+ if (sc->fw_debug_dump_cluster_mask) {
+ for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
+ sizeof(sc->fw_debug_dump_cluster_mask) * 8)
+ ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
+ } else {
+ u16 next_cluster_id = 0;
+ /* We don't support QUEUE_MNG and FULL_CSR_SPACE */
+ do {
+ next_cluster_id = ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id);
+ } while (next_cluster_id != 0 && next_cluster_id < ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG);
+ }
sbuf_finish(sbuf);
sbuf_delete(sbuf);
@@ -6711,7 +6844,7 @@ ice_add_debug_sysctls(struct ice_softc *sc)
dump_list = SYSCTL_CHILDREN(dump_node);
SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
- ICE_CTLFLAG_DEBUG | CTLTYPE_U16 | CTLFLAG_RW, sc, 0,
+ ICE_CTLFLAG_DEBUG | CTLTYPE_U32 | CTLFLAG_RW, sc, 0,
ice_sysctl_fw_debug_dump_cluster_setting, "SU",
ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING);
@@ -6837,6 +6970,7 @@ ice_vsi_set_rss_params(struct ice_vsi *vsi)
vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_VMDQ2:
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_lut_type = ICE_LUT_VSI;
break;
@@ -10034,7 +10168,7 @@ ice_alloc_intr_tracking(struct ice_softc *sc)
int err;
/* Initialize the interrupt allocation manager */
- err = ice_resmgr_init_contig_only(&sc->imgr,
+ err = ice_resmgr_init_contig_only(&sc->dev_imgr,
hw->func_caps.common_cap.num_msix_vectors);
if (err) {
device_printf(dev, "Unable to initialize PF interrupt manager: %s\n",
@@ -10066,7 +10200,7 @@ ice_alloc_intr_tracking(struct ice_softc *sc)
return (0);
free_imgr:
- ice_resmgr_destroy(&sc->imgr);
+ ice_resmgr_destroy(&sc->dev_imgr);
return (err);
}
@@ -10083,19 +10217,21 @@ void
ice_free_intr_tracking(struct ice_softc *sc)
{
if (sc->pf_imap) {
- ice_resmgr_release_map(&sc->imgr, sc->pf_imap,
+ ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap,
sc->lan_vectors);
free(sc->pf_imap, M_ICE);
sc->pf_imap = NULL;
}
if (sc->rdma_imap) {
- ice_resmgr_release_map(&sc->imgr, sc->rdma_imap,
+ ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap,
sc->lan_vectors);
free(sc->rdma_imap, M_ICE);
sc->rdma_imap = NULL;
}
- ice_resmgr_destroy(&sc->imgr);
+ ice_resmgr_destroy(&sc->dev_imgr);
+
+ ice_resmgr_destroy(&sc->os_imgr);
}
/**
@@ -10869,6 +11005,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
/* Returned arguments from the Admin Queue */
u16 ret_buf_size = 0;
+ u16 ret_next_cluster = 0;
u16 ret_next_table = 0;
u32 ret_next_index = 0;
@@ -10935,7 +11072,8 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
memset(ddc->data, 0, ifd_len - sizeof(*ddc));
status = ice_aq_get_internal_data(hw, ddc->cluster_id, ddc->table_id, ddc->offset,
- (u8 *)ddc->data, ddc->data_size, &ret_buf_size, &ret_next_table, &ret_next_index, NULL);
+ (u8 *)ddc->data, ddc->data_size, &ret_buf_size,
+ &ret_next_cluster, &ret_next_table, &ret_next_index, NULL);
ice_debug(hw, ICE_DBG_DIAG, "%s: ret_buf_size %d, ret_next_table %d, ret_next_index %d\n",
__func__, ret_buf_size, ret_next_table, ret_next_index);
if (status) {
@@ -10950,6 +11088,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
ddc->table_id = ret_next_table;
ddc->offset = ret_next_index;
ddc->data_size = ret_buf_size;
+ ddc->cluster_id = ret_next_cluster;
/* Copy the possibly modified contents of the handled request out */
err = copyout(ddc, ifd->ifd_data, ifd->ifd_len);
@@ -11087,3 +11226,146 @@ ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
return sysctl_handle_8(oidp, &resp.data.s0f0.temp, 0, req);
}
+
+/**
+ * ice_sysctl_create_mirror_interface - Create a new ifnet that monitors
+ * traffic from the main PF VSI
+ */
+static int
+ice_sysctl_create_mirror_interface(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ device_t dev = sc->dev;
+ int ret;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ ret = priv_check(curthread, PRIV_DRIVER);
+ if (ret)
+ return (ret);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* If the user hasn't written "1" to this sysctl yet: */
+ if (!ice_test_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC)) {
+ /* Avoid output on the first set of reads to this sysctl in
+ * order to prevent a null byte from being written to the
+ * end result when called via sysctl(8).
+ */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, 0);
+ return (ret);
+ }
+
+ char input_buf[2] = "";
+ ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ /* If we get '1', then indicate we'll create the interface in
+ * the next sysctl read call.
+ */
+ if (input_buf[0] == '1') {
+ if (sc->mirr_if) {
+ device_printf(dev,
+ "Mirror interface %s already exists!\n",
+ if_name(sc->mirr_if->ifp));
+ return (EEXIST);
+ }
+ ice_set_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC);
+ return (0);
+ }
+
+ return (EINVAL);
+ }
+
+ /* --- "Do Create Mirror Interface" is set --- */
+
+ /* Caller just wants the upper bound for size */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, 128);
+ return (ret);
+ }
+
+ device_printf(dev, "Creating new mirroring interface...\n");
+
+ ret = ice_create_mirror_interface(sc);
+ if (ret)
+ return (ret);
+
+ ice_clear_state(&sc->state, ICE_STATE_DO_CREATE_MIRR_INTFC);
+
+ ret = sysctl_handle_string(oidp, __DECONST(char *, "Interface attached"), 0, req);
+ return (ret);
+}
+
+/**
+ * ice_sysctl_destroy_mirror_interface - Destroy network interface that monitors
+ * traffic from the main PF VSI
+ */
+static int
+ice_sysctl_destroy_mirror_interface(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ device_t dev = sc->dev;
+ int ret;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ ret = priv_check(curthread, PRIV_DRIVER);
+ if (ret)
+ return (ret);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* If the user hasn't written "1" to this sysctl yet: */
+ if (!ice_test_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC)) {
+ /* Avoid output on the first set of reads to this sysctl in
+ * order to prevent a null byte from being written to the
+ * end result when called via sysctl(8).
+ */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, 0);
+ return (ret);
+ }
+
+ char input_buf[2] = "";
+ ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req);
+ if ((ret) || (req->newptr == NULL))
+ return (ret);
+
+ /* If we get '1', then indicate we'll create the interface in
+ * the next sysctl read call.
+ */
+ if (input_buf[0] == '1') {
+ if (!sc->mirr_if) {
+ device_printf(dev,
+ "No mirror interface exists!\n");
+ return (EINVAL);
+ }
+ ice_set_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC);
+ return (0);
+ }
+
+ return (EINVAL);
+ }
+
+ /* --- "Do Destroy Mirror Interface" is set --- */
+
+ /* Caller just wants the upper bound for size */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ ret = SYSCTL_OUT(req, 0, 128);
+ return (ret);
+ }
+
+ device_printf(dev, "Destroying mirroring interface...\n");
+
+ ice_destroy_mirror_interface(sc);
+
+ ice_clear_state(&sc->state, ICE_STATE_DO_DESTROY_MIRR_INTFC);
+
+ ret = sysctl_handle_string(oidp, __DECONST(char *, "Interface destroyed"), 0, req);
+ return (ret);
+}
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index fd8c8881cdbd..cfd848d370bb 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -54,6 +54,7 @@
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
+#include <net/if_types.h>
#include <sys/bitstring.h>
@@ -265,6 +266,10 @@ struct ice_bar_info {
#define ICE_DEFAULT_VF_QUEUES 4
/*
+ * An invalid VSI number to indicate that mirroring should be disabled.
+ */
+#define ICE_INVALID_MIRROR_VSI ((u16)-1)
+/*
* The maximum number of RX queues allowed per TC in a VSI.
*/
#define ICE_MAX_RXQS_PER_TC 256
@@ -372,7 +377,7 @@ enum ice_rx_dtype {
* Only certain cluster IDs are valid for the FW debug dump functionality,
* so define a mask of those here.
*/
-#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x1af
+#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x4001AF
struct ice_softc;
@@ -513,8 +518,6 @@ struct ice_vsi {
u16 *tx_qmap; /* Tx VSI to PF queue mapping */
u16 *rx_qmap; /* Rx VSI to PF queue mapping */
- bitstr_t *vmap; /* Vector(s) assigned to VSI */
-
enum ice_resmgr_alloc_type qmap_type;
struct ice_tx_queue *tx_queues; /* Tx queue array */
@@ -556,6 +559,11 @@ struct ice_vsi {
/* VSI-level stats */
struct ice_vsi_hw_stats hw_stats;
+
+ /* VSI mirroring details */
+ u16 mirror_src_vsi;
+ u16 rule_mir_ingress;
+ u16 rule_mir_egress;
};
/**
@@ -564,7 +572,7 @@ struct ice_vsi {
*/
struct ice_debug_dump_cmd {
u32 offset; /* offset to read/write from table, in bytes */
- u16 cluster_id;
+ u16 cluster_id; /* also used to get next cluster id */
u16 table_id;
u16 data_size; /* size of data field, in bytes */
u16 reserved1;
@@ -587,6 +595,7 @@ enum ice_state {
ICE_STATE_RESET_OICR_RECV,
ICE_STATE_RESET_PFR_REQ,
ICE_STATE_PREPARED_FOR_RESET,
+ ICE_STATE_SUBIF_NEEDS_REINIT,
ICE_STATE_RESET_FAILED,
ICE_STATE_DRIVER_INITIALIZED,
ICE_STATE_NO_MEDIA,
@@ -601,6 +610,8 @@ enum ice_state {
ICE_STATE_DO_FW_DEBUG_DUMP,
ICE_STATE_LINK_ACTIVE_ON_DOWN,
ICE_STATE_FIRST_INIT_LINK,
+ ICE_STATE_DO_CREATE_MIRR_INTFC,
+ ICE_STATE_DO_DESTROY_MIRR_INTFC,
/* This entry must be last */
ICE_STATE_LAST,
};
@@ -797,6 +808,16 @@ void ice_request_stack_reinit(struct ice_softc *sc);
/* Details of how to check if the network stack is detaching us */
bool ice_driver_is_detaching(struct ice_softc *sc);
+/* Details of how to setup/teardown a mirror interface */
+/**
+ * @brief Create an interface for mirroring
+ */
+int ice_create_mirror_interface(struct ice_softc *sc);
+/**
+ * @brief Destroy created mirroring interface
+ */
+void ice_destroy_mirror_interface(struct ice_softc *sc);
+
const char * ice_fw_module_str(enum ice_aqc_fw_logging_mod module);
void ice_add_fw_logging_tunables(struct ice_softc *sc,
struct sysctl_oid *parent);
@@ -904,5 +925,6 @@ void ice_cfg_pba_num(struct ice_softc *sc);
int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
+int ice_setup_vsi_mirroring(struct ice_vsi *vsi);
#endif /* _ICE_LIB_H_ */
diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c
index 91393c1fc8b3..5234cb265f9b 100644
--- a/sys/dev/ice/ice_nvm.c
+++ b/sys/dev/ice/ice_nvm.c
@@ -296,7 +296,7 @@ ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
}
/**
- * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * ice_check_sr_access_params - verify params for Shadow RAM R/W operations
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to access
@@ -356,7 +356,7 @@ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
}
/**
- * ice_write_sr_aq - Writes Shadow RAM.
+ * ice_write_sr_aq - Writes Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to write
@@ -425,7 +425,6 @@ enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
if (hw->flash.blank_nvm_mode)
return ICE_SUCCESS;
@@ -873,7 +872,7 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
* @nvm: pointer to NVM info structure
*
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
- * in the nvm info structure.
+ * in the NVM info structure.
*/
static enum ice_status
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
@@ -1006,7 +1005,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
orom_data, hw->flash.banks.orom_size);
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
- return status;
+ goto exit_error;
}
/* Scan the memory buffer to locate the CIVD data section */
@@ -1030,7 +1029,8 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
- goto err_invalid_checksum;
+ status = ICE_ERR_NVM;
+ goto exit_error;
}
*civd = *tmp;
@@ -1038,11 +1038,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
return ICE_SUCCESS;
}
+ status = ICE_ERR_NVM;
ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
-err_invalid_checksum:
+exit_error:
ice_free(hw, orom_data);
- return ICE_ERR_NVM;
+ return status;
}
/**
@@ -1200,7 +1201,7 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
}
/**
- * ice_discover_flash_size - Discover the available flash size.
+ * ice_discover_flash_size - Discover the available flash size
* @hw: pointer to the HW struct
*
* The device flash could be up to 16MB in size. However, it is possible that
@@ -1457,6 +1458,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
+
return ICE_SUCCESS;
}
@@ -1708,7 +1710,6 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-
ice_release_nvm(hw);
if (!status)
@@ -1771,19 +1772,19 @@ ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ enum ice_status err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = ICE_LO_BYTE(cmd_flags);
- cmd->offset_high = ICE_HI_BYTE(cmd_flags);
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (!status && response_flags)
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!err && response_flags)
*response_flags = cmd->cmd_flags;
- return status;
+ return err;
}
/**
diff --git a/sys/dev/ice/ice_nvm.h b/sys/dev/ice/ice_nvm.h
index 32f791f63102..f43381c10ac5 100644
--- a/sys/dev/ice/ice_nvm.h
+++ b/sys/dev/ice/ice_nvm.h
@@ -108,6 +108,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
enum ice_status
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
+
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index ced1bc5037dd..300d61bfb5d9 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -179,6 +179,7 @@ enum ice_prot_id {
#define ICE_TUN_FLAG_MDID_OFF(word) \
(ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_FROM_NETWORK_FLAG_MASK 0x8
#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_IN_VLAN_MASK 0x80 /* VLAN inside tunneled header */
#define ICE_TUN_FLAG_VLAN_MASK 0x01
diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c
index ed9432292c0c..cd0d7de62b33 100644
--- a/sys/dev/ice/ice_sched.c
+++ b/sys/dev/ice/ice_sched.c
@@ -55,9 +55,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return ICE_ERR_NO_MEMORY;
- /* coverity[suspicious_sizeof] */
root->children = (struct ice_sched_node **)
- ice_calloc(hw, hw->max_children[0], sizeof(*root));
+ ice_calloc(hw, hw->max_children[0], sizeof(*root->children));
if (!root->children) {
ice_free(hw, root);
return ICE_ERR_NO_MEMORY;
@@ -213,9 +212,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!node)
return ICE_ERR_NO_MEMORY;
if (hw->max_children[layer]) {
- /* coverity[suspicious_sizeof] */
node->children = (struct ice_sched_node **)
- ice_calloc(hw, hw->max_children[layer], sizeof(*node));
+ ice_calloc(hw, hw->max_children[layer],
+ sizeof(*node->children));
if (!node->children) {
ice_free(hw, node);
return ICE_ERR_NO_MEMORY;
diff --git a/sys/dev/ice/ice_strings.c b/sys/dev/ice/ice_strings.c
index 7feaef163ef6..b341b2815fea 100644
--- a/sys/dev/ice/ice_strings.c
+++ b/sys/dev/ice/ice_strings.c
@@ -1018,6 +1018,8 @@ ice_state_to_str(enum ice_state state)
return "RESET_PFR_REQ";
case ICE_STATE_PREPARED_FOR_RESET:
return "PREPARED_FOR_RESET";
+ case ICE_STATE_SUBIF_NEEDS_REINIT:
+ return "SUBIF_NEEDS_REINIT";
case ICE_STATE_RESET_FAILED:
return "RESET_FAILED";
case ICE_STATE_DRIVER_INITIALIZED:
@@ -1046,6 +1048,10 @@ ice_state_to_str(enum ice_state state)
return "LINK_ACTIVE_ON_DOWN";
case ICE_STATE_FIRST_INIT_LINK:
return "FIRST_INIT_LINK";
+ case ICE_STATE_DO_CREATE_MIRR_INTFC:
+ return "DO_CREATE_MIRR_INTFC";
+ case ICE_STATE_DO_DESTROY_MIRR_INTFC:
+ return "DO_DESTROY_MIRR_INTFC";
case ICE_STATE_LAST:
return NULL;
}
diff --git a/sys/dev/ice/ice_switch.c b/sys/dev/ice/ice_switch.c
index 44be3999d3de..e02390e10ef8 100644
--- a/sys/dev/ice/ice_switch.c
+++ b/sys/dev/ice/ice_switch.c
@@ -1180,6 +1180,7 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
*
* In all other cases, the LAN enable has to be set to false.
*/
+
if (hw->evb_veb) {
if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
fi->lkup_type == ICE_SW_LKUP_PROMISC ||
@@ -1196,6 +1197,13 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->lan_en = true;
}
}
+ /* To be able to receive packets coming from the VF on the same PF,
+ * unicast filter needs to be added without LB_EN bit
+ */
+ if (fi->flag & ICE_FLTR_RX_LB) {
+ fi->lb_en = false;
+ fi->lan_en = true;
+ }
}
/**
@@ -2023,7 +2031,7 @@ ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
new_fltr = &f_entry->fltr_info;
if (new_fltr->flag & ICE_FLTR_RX)
new_fltr->src = lport;
- else if (new_fltr->flag & ICE_FLTR_TX)
+ else if (new_fltr->flag & (ICE_FLTR_TX | ICE_FLTR_RX_LB))
new_fltr->src =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -3259,12 +3267,15 @@ static void ice_determine_promisc_mask(struct ice_fltr_info *fi,
{
u16 vid = fi->l_data.mac_vlan.vlan_id;
u8 *macaddr = fi->l_data.mac.mac_addr;
+ bool is_rx_lb_fltr = false;
bool is_tx_fltr = false;
ice_zero_bitmap(promisc_mask, ICE_PROMISC_MAX);
if (fi->flag == ICE_FLTR_TX)
is_tx_fltr = true;
+ if (fi->flag == ICE_FLTR_RX_LB)
+ is_rx_lb_fltr = true;
if (IS_BROADCAST_ETHER_ADDR(macaddr)) {
ice_set_bit(is_tx_fltr ? ICE_PROMISC_BCAST_TX
@@ -3273,8 +3284,12 @@ static void ice_determine_promisc_mask(struct ice_fltr_info *fi,
ice_set_bit(is_tx_fltr ? ICE_PROMISC_MCAST_TX
: ICE_PROMISC_MCAST_RX, promisc_mask);
} else if (IS_UNICAST_ETHER_ADDR(macaddr)) {
- ice_set_bit(is_tx_fltr ? ICE_PROMISC_UCAST_TX
- : ICE_PROMISC_UCAST_RX, promisc_mask);
+ if (is_tx_fltr)
+ ice_set_bit(ICE_PROMISC_UCAST_TX, promisc_mask);
+ else if (is_rx_lb_fltr)
+ ice_set_bit(ICE_PROMISC_UCAST_RX_LB, promisc_mask);
+ else
+ ice_set_bit(ICE_PROMISC_UCAST_RX, promisc_mask);
}
if (vid) {
@@ -3510,7 +3525,7 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
enum ice_status status = ICE_SUCCESS;
- bool is_tx_fltr;
+ bool is_tx_fltr, is_rx_lb_fltr;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
@@ -3547,6 +3562,7 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
pkt_type = 0;
is_tx_fltr = false;
+ is_rx_lb_fltr = false;
if (ice_test_and_clear_bit(ICE_PROMISC_UCAST_RX,
p_mask)) {
@@ -3569,6 +3585,10 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
p_mask)) {
pkt_type = BCAST_FLTR;
is_tx_fltr = true;
+ } else if (ice_test_and_clear_bit(ICE_PROMISC_UCAST_RX_LB,
+ p_mask)) {
+ pkt_type = UCAST_FLTR;
+ is_rx_lb_fltr = true;
}
/* Check for VLAN promiscuous flag */
@@ -3596,6 +3616,9 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
if (is_tx_fltr) {
new_fltr.flag |= ICE_FLTR_TX;
new_fltr.src = hw_vsi_id;
+ } else if (is_rx_lb_fltr) {
+ new_fltr.flag |= ICE_FLTR_RX_LB;
+ new_fltr.src = hw_vsi_id;
} else {
new_fltr.flag |= ICE_FLTR_RX;
new_fltr.src = lport;
diff --git a/sys/dev/ice/ice_switch.h b/sys/dev/ice/ice_switch.h
index 70b5df769175..60d3dfdf2fb7 100644
--- a/sys/dev/ice/ice_switch.h
+++ b/sys/dev/ice/ice_switch.h
@@ -38,9 +38,11 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_MAX_SW 256
#define ICE_DFLT_VSI_INVAL 0xff
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
-#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
+
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_RX_LB BIT(2)
+#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_PROFID_IPV4_GTPC_TEID 41
#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
@@ -412,6 +414,7 @@ enum ice_promisc_flags {
ICE_PROMISC_BCAST_TX,
ICE_PROMISC_VLAN_RX,
ICE_PROMISC_VLAN_TX,
+ ICE_PROMISC_UCAST_RX_LB,
/* Max value */
ICE_PROMISC_MAX,
};
diff --git a/sys/dev/ice/ice_type.h b/sys/dev/ice/ice_type.h
index e153a772f746..70312a28e4e4 100644
--- a/sys/dev/ice/ice_type.h
+++ b/sys/dev/ice/ice_type.h
@@ -322,6 +322,7 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
+ ICE_VSI_VMDQ2 = 2,
ICE_VSI_LB = 6,
};
@@ -452,6 +453,9 @@ struct ice_hw_common_caps {
/* SR-IOV virtualization */
u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
/* EVB capabilities */
u8 evb_802_1_qbg; /* Edge Virtual Bridging */
u8 evb_802_1_qbh; /* Bridge Port Extension */
@@ -1171,8 +1175,11 @@ struct ice_hw {
struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
struct ice_lock rss_locks; /* protect RSS configuration */
struct LIST_HEAD_TYPE rss_list_head;
+ u16 vsi_owning_pf_lut; /* SW IDX of VSI that acquired PF RSS LUT */
struct ice_mbx_snapshot mbx_snapshot;
u8 dvm_ena;
+
+ bool subscribable_recipes_supported;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@@ -1428,11 +1435,17 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* FW branch number for hardware families */
+#define ICE_FW_VER_BRANCH_E82X 0
+#define ICE_FW_VER_BRANCH_E810 1
+
/* FW version for FEC disable in Auto FEC mode */
-#define ICE_FW_FEC_DIS_AUTO_BRANCH 1
#define ICE_FW_FEC_DIS_AUTO_MAJ 7
#define ICE_FW_FEC_DIS_AUTO_MIN 0
#define ICE_FW_FEC_DIS_AUTO_PATCH 5
+#define ICE_FW_FEC_DIS_AUTO_MAJ_E82X 7
+#define ICE_FW_FEC_DIS_AUTO_MIN_E82X 1
+#define ICE_FW_FEC_DIS_AUTO_PATCH_E82X 2
/* AQ API version for FW health reports */
#define ICE_FW_API_HEALTH_REPORT_MAJ 1
diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c
index fb710d078b42..4e451bf3fb55 100644
--- a/sys/dev/ice/if_ice_iflib.c
+++ b/sys/dev/ice/if_ice_iflib.c
@@ -84,6 +84,30 @@ static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
static int ice_if_suspend(if_ctx_t ctx);
static int ice_if_resume(if_ctx_t ctx);
static bool ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
+static int ice_setup_mirror_vsi(struct ice_mirr_if *mif);
+static int ice_wire_mirror_intrs(struct ice_mirr_if *mif);
+static void ice_free_irqvs_subif(struct ice_mirr_if *mif);
+static void *ice_subif_register(device_t);
+static void ice_subif_setup_scctx(struct ice_mirr_if *mif);
+static int ice_subif_rebuild(struct ice_softc *sc);
+static int ice_subif_rebuild_vsi_qmap(struct ice_softc *sc);
+
+/* Iflib API */
+static int ice_subif_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
+ uint64_t *paddrs, int ntxqs, int ntxqsets);
+static int ice_subif_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
+ uint64_t *paddrs, int nrxqs, int nrxqsets);
+static int ice_subif_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
+static int ice_subif_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
+static void ice_subif_if_intr_enable(if_ctx_t ctx);
+static int ice_subif_if_msix_intr_assign(if_ctx_t ctx, int msix);
+static void ice_subif_if_init(if_ctx_t ctx);
+static void ice_subif_if_stop(if_ctx_t ctx);
+static void ice_subif_if_queues_free(if_ctx_t ctx);
+static int ice_subif_if_attach_pre(if_ctx_t);
+static int ice_subif_if_attach_post(if_ctx_t);
+static void ice_subif_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
+static int ice_subif_if_promisc_set(if_ctx_t ctx, int flags);
static int ice_msix_que(void *arg);
static int ice_msix_admin(void *arg);
@@ -575,7 +599,7 @@ reinit_hw:
goto free_tx_qmgr;
}
- /* Initialize the interrupt resource manager */
+ /* Initialize the PF device interrupt resource manager */
err = ice_alloc_intr_tracking(sc);
if (err)
/* Errors are already printed */
@@ -929,6 +953,9 @@ ice_if_detach(if_ctx_t ctx)
mtx_unlock(&sc->admin_mtx);
mtx_destroy(&sc->admin_mtx);
+ /* Remove additional interfaces if they exist */
+ if (sc->mirr_if)
+ ice_destroy_mirror_interface(sc);
ice_rdma_pf_detach(sc);
/* Free allocated media types */
@@ -1300,8 +1327,16 @@ ice_msix_admin(void *arg)
* the ICE_STATE_RESET_*_REQ bits, which will trigger the
* correct type of reset.
*/
- if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV))
+ if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) {
hw->reset_ongoing = true;
+ /*
+ * During the NVM update process, there is a driver reset and link
+ * goes down and then up. The below if-statement prevents a second
+ * link flap from occurring in ice_if_init().
+ */
+ if (if_getflags(sc->ifp) & IFF_UP)
+ ice_set_state(&sc->state, ICE_STATE_FIRST_INIT_LINK);
+ }
}
if (oicr & PFINT_OICR_ECC_ERR_M) {
@@ -1346,6 +1381,9 @@ ice_msix_admin(void *arg)
* @remark This driver will only use MSI-X vectors. If this is not possible,
* neither MSI or legacy interrupts will be tried.
*
+ * @remark if it exists, os_imgr is initialized here for keeping track of
+ * the assignments of extra MSIX vectors.
+ *
* @post on success this function must set the following scctx parameters:
* isc_vectors, isc_nrxqsets, isc_ntxqsets, and isc_intr.
*
@@ -1424,9 +1462,14 @@ ice_allocate_msix(struct ice_softc *sc)
* to allocate one vector for administrative tasks.
*/
requested = rdma + queues + 1;
+ /* Add extra vectors requested by the user for later subinterface
+ * creation.
+ */
+ if_ctx_t ctx = sc->ctx;
+ u32 extra_vectors = iflib_get_extra_msix_vectors_sysctl(ctx);
+ requested += extra_vectors;
vectors = requested;
-
err = pci_alloc_msix(dev, &vectors);
if (err) {
device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n",
@@ -1441,6 +1484,8 @@ ice_allocate_msix(struct ice_softc *sc)
device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n",
requested, vectors);
+ diff += extra_vectors;
+ extra_vectors = 0;
/*
* The OS didn't grant us the requested number of vectors.
* Check to see if we can reduce demands by limiting the
@@ -1481,6 +1526,7 @@ ice_allocate_msix(struct ice_softc *sc)
device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
vectors);
+ /* Split resulting vectors back into requested splits */
scctx->isc_vectors = vectors;
scctx->isc_nrxqsets = queues;
scctx->isc_ntxqsets = queues;
@@ -1496,23 +1542,37 @@ ice_allocate_msix(struct ice_softc *sc)
/* Keep track of which interrupt indices are being used for what */
sc->lan_vectors = vectors - rdma;
- err = ice_resmgr_assign_contiguous(&sc->imgr, sc->pf_imap, sc->lan_vectors);
+ sc->lan_vectors -= extra_vectors;
+ err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->pf_imap, sc->lan_vectors);
if (err) {
device_printf(dev, "Unable to assign PF interrupt mapping: %s\n",
ice_err_str(err));
goto err_pci_release_msi;
}
- err = ice_resmgr_assign_contiguous(&sc->imgr, sc->rdma_imap, rdma);
+ err = ice_resmgr_assign_contiguous(&sc->dev_imgr, sc->rdma_imap, rdma);
if (err) {
device_printf(dev, "Unable to assign PF RDMA interrupt mapping: %s\n",
ice_err_str(err));
- ice_resmgr_release_map(&sc->imgr, sc->pf_imap,
- sc->lan_vectors);
- goto err_pci_release_msi;
+ goto err_release_pf_imap;
+ }
+ sc->extra_vectors = extra_vectors;
+ /* Setup another resource manager to track the assignments of extra OS
+ * vectors. These OS interrupt allocations don't need to be contiguous,
+ * unlike the ones that come from the device.
+ */
+ err = ice_resmgr_init(&sc->os_imgr, sc->extra_vectors);
+ if (err) {
+ device_printf(dev, "Unable to initialize OS extra interrupt manager: %s\n",
+ ice_err_str(err));
+ ice_resmgr_release_map(&sc->dev_imgr, sc->rdma_imap,
+ rdma);
+ goto err_release_pf_imap;
}
-
return (0);
+err_release_pf_imap:
+ ice_resmgr_release_map(&sc->dev_imgr, sc->pf_imap,
+ sc->lan_vectors);
err_pci_release_msi:
pci_release_msi(dev);
err_free_msix_table:
@@ -1586,12 +1646,14 @@ ice_if_msix_intr_assign(if_ctx_t ctx, int msix)
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
return (0);
+ int rid;
for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) {
struct ice_rx_queue *rxq = &vsi->rx_queues[i];
struct ice_tx_queue *txq = &vsi->tx_queues[i];
- int rid = vector + 1;
char irq_name[16];
+ rid = vector + 1;
+
snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid,
IFLIB_INTR_RXTX, ice_msix_que,
@@ -1616,6 +1678,9 @@ ice_if_msix_intr_assign(if_ctx_t ctx, int msix)
txq->irqv = &sc->irqvs[vector];
}
+ /* For future interrupt assignments */
+ sc->last_rid = rid + sc->irdma_vectors;
+
return (0);
fail:
for (; i >= 0; i--, vector--)
@@ -1950,6 +2015,7 @@ ice_update_rx_mbuf_sz(struct ice_softc *sc)
static void
ice_if_init(if_ctx_t ctx)
{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
device_t dev = sc->dev;
int err;
@@ -2042,6 +2108,13 @@ ice_if_init(if_ctx_t ctx)
ice_rdma_pf_init(sc);
ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED);
+
+ if (sc->mirr_if && ice_testandclear_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) {
+ ice_clear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED);
+ iflib_request_reset(sc->mirr_if->subctx);
+ iflib_admin_intr_deferred(sc->mirr_if->subctx);
+ }
+
return;
err_stop_rx:
@@ -2130,6 +2203,10 @@ ice_if_timer(if_ctx_t ctx, uint16_t qid)
/* Update the primary VSI stats */
ice_update_vsi_hw_stats(&sc->pf_vsi);
+
+ /* Update mirror VSI stats */
+ if (sc->mirr_if && sc->mirr_if->if_attached)
+ ice_update_vsi_hw_stats(sc->mirr_if->vsi);
}
/**
@@ -2211,7 +2288,7 @@ ice_transition_recovery_mode(struct ice_softc *sc)
}
/* Destroy the interrupt manager */
- ice_resmgr_destroy(&sc->imgr);
+ ice_resmgr_destroy(&sc->dev_imgr);
/* Destroy the queue managers */
ice_resmgr_destroy(&sc->tx_qmgr);
ice_resmgr_destroy(&sc->rx_qmgr);
@@ -2367,6 +2444,12 @@ ice_prepare_for_reset(struct ice_softc *sc)
sc->pf_vsi.num_tx_queues);
ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap,
sc->pf_vsi.num_rx_queues);
+ if (sc->mirr_if) {
+ ice_resmgr_release_map(&sc->tx_qmgr, sc->mirr_if->vsi->tx_qmap,
+ sc->mirr_if->num_irq_vectors);
+ ice_resmgr_release_map(&sc->rx_qmgr, sc->mirr_if->vsi->rx_qmap,
+ sc->mirr_if->num_irq_vectors);
+ }
ice_clear_hw_tbls(hw);
@@ -2557,6 +2640,8 @@ ice_rebuild(struct ice_softc *sc)
goto err_shutdown_ctrlq;
}
+ ice_clean_all_vsi_rss_cfg(sc);
+
ice_clear_pxe_mode(hw);
status = ice_get_caps(hw);
@@ -2634,7 +2719,14 @@ ice_rebuild(struct ice_softc *sc)
/* Now that the rebuild is finished, we're no longer prepared to reset */
ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
- log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp));
+ /* Reconfigure the subinterface */
+ if (sc->mirr_if) {
+ err = ice_subif_rebuild(sc);
+ if (err)
+ goto err_deinit_pf_vsi;
+ }
+
+ log(LOG_INFO, "%s: device rebuild successful\n", sc->ifp->if_xname);
/* In order to completely restore device functionality, the iflib core
* needs to be reset. We need to request an iflib reset. Additionally,
@@ -2928,6 +3020,7 @@ ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
static void
ice_if_stop(if_ctx_t ctx)
{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
ASSERT_CTX_LOCKED(sc);
@@ -2977,6 +3070,11 @@ ice_if_stop(if_ctx_t ctx)
if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
!(if_getflags(sc->ifp) & IFF_UP) && sc->link_up)
ice_set_link(sc, false);
+
+ if (sc->mirr_if && ice_test_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT)) {
+ ice_subif_if_stop(sc->mirr_if->subctx);
+ device_printf(sc->dev, "The subinterface also comes down and up after reset\n");
+ }
}
/**
@@ -3162,17 +3260,1155 @@ ice_if_resume(if_ctx_t ctx)
* @ctx: iflib context pointer
* @event: event code to check
*
- * Defaults to returning false for unknown events.
+ * Defaults to returning true for unknown events.
*
* @returns true if iflib needs to reinit the interface
*/
static bool
-ice_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
+ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event)
{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
switch (event) {
case IFLIB_RESTART_VLAN_CONFIG:
+ if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
+ !(if_getflags(sc->ifp) & IFF_UP))
+ return false;
default:
- return (false);
+ return true;
+ }
+}
+
+extern struct if_txrx ice_subif_txrx;
+
+/**
+ * @var ice_subif_methods
+ * @brief ice driver method entry points
+ */
+static device_method_t ice_subif_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_register, ice_subif_register),
+ DEVMETHOD_END
+};
+
+/**
+ * @var ice_subif_driver
+ * @brief driver structure for the device API
+ */
+static driver_t ice_subif_driver = {
+ .name = "ice_subif",
+ .methods = ice_subif_methods,
+ .size = sizeof(struct ice_mirr_if),
+};
+
+static device_method_t ice_iflib_subif_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ice_subif_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ice_subif_if_attach_post),
+ DEVMETHOD(ifdi_tx_queues_alloc, ice_subif_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, ice_subif_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_msix_intr_assign, ice_subif_if_msix_intr_assign),
+ DEVMETHOD(ifdi_intr_enable, ice_subif_if_intr_enable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, ice_subif_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, ice_subif_if_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_init, ice_subif_if_init),
+ DEVMETHOD(ifdi_stop, ice_subif_if_stop),
+ DEVMETHOD(ifdi_queues_free, ice_subif_if_queues_free),
+ DEVMETHOD(ifdi_media_status, ice_subif_if_media_status),
+ DEVMETHOD(ifdi_promisc_set, ice_subif_if_promisc_set),
+};
+
+/**
+ * @var ice_iflib_subif_driver
+ * @brief driver structure for the iflib stack
+ *
+ * driver_t definition used to setup the iflib device methods.
+ */
+static driver_t ice_iflib_subif_driver = {
+ .name = "ice_subif",
+ .methods = ice_iflib_subif_methods,
+ .size = sizeof(struct ice_mirr_if),
+};
+
+/**
+ * @var ice_subif_sctx
+ * @brief ice driver shared context
+ *
+ * Similar to the existing ice_sctx, this structure has these differences:
+ * - isc_admin_intrcnt is set to 0
+ * - Uses subif iflib driver methods
+ * - Flagged as a VF for iflib
+ */
+static struct if_shared_ctx ice_subif_sctx = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,
+
+ .isc_tx_maxsize = ICE_MAX_FRAME_SIZE,
+ .isc_tx_maxsegsize = ICE_MAX_FRAME_SIZE,
+ .isc_tso_maxsize = ICE_TSO_SIZE + sizeof(struct ether_vlan_header),
+ .isc_tso_maxsegsize = ICE_MAX_DMA_SEG_SIZE,
+
+ .isc_rx_maxsize = ICE_MAX_FRAME_SIZE,
+ .isc_rx_nsegments = ICE_MAX_RX_SEGS,
+ .isc_rx_maxsegsize = ICE_MAX_FRAME_SIZE,
+
+ .isc_nfl = 1,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 1,
+
+ .isc_admin_intrcnt = 0,
+ .isc_vendor_info = ice_vendor_info_array,
+ .isc_driver_version = __DECONST(char *, ice_driver_version),
+ .isc_driver = &ice_iflib_subif_driver,
+
+ .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP |
+ IFLIB_ADMIN_ALWAYS_RUN | IFLIB_SKIP_MSIX |
+ IFLIB_IS_VF,
+
+ .isc_nrxd_min = {ICE_MIN_DESC_COUNT},
+ .isc_ntxd_min = {ICE_MIN_DESC_COUNT},
+ .isc_nrxd_max = {ICE_IFLIB_MAX_DESC_COUNT},
+ .isc_ntxd_max = {ICE_IFLIB_MAX_DESC_COUNT},
+ .isc_nrxd_default = {ICE_DEFAULT_DESC_COUNT},
+ .isc_ntxd_default = {ICE_DEFAULT_DESC_COUNT},
+};
+
+static void *
+ice_subif_register(device_t dev __unused)
+{
+ return (&ice_subif_sctx);
+}
+
+static void
+ice_subif_setup_scctx(struct ice_mirr_if *mif)
+{
+ if_softc_ctx_t scctx = mif->subscctx;
+
+ scctx->isc_txrx = &ice_subif_txrx;
+
+ scctx->isc_capenable = ICE_FULL_CAPS;
+ scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD;
+
+ scctx->isc_ntxqsets = 4;
+ scctx->isc_nrxqsets = 4;
+ scctx->isc_vectors = scctx->isc_nrxqsets;
+
+ scctx->isc_ntxqsets_max = 256;
+ scctx->isc_nrxqsets_max = 256;
+
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct ice_tx_desc), DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
+ * sizeof(union ice_32b_rx_flex_desc), DBA_ALIGN);
+
+ scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS;
+ scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS;
+ scctx->isc_tx_tso_size_max = ICE_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE;
+}
+
+static int
+ice_subif_if_attach_pre(if_ctx_t ctx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+
+ mif->subctx = ctx;
+ mif->subdev = dev;
+ mif->subscctx = iflib_get_softc_ctx(ctx);
+
+ /* Setup the iflib softc context structure */
+ ice_subif_setup_scctx(mif);
+
+ return (0);
+}
+
+static int
+ice_subif_if_attach_post(if_ctx_t ctx __unused)
+{
+ return (0);
+}
+
+/**
+ * ice_destroy_mirror_interface - destroy mirror interface
+ * @sc: driver private data
+ *
+ * Destroys all resources associated with the mirroring interface.
+ * Will not exit early on failure.
+ *
+ * @pre: Mirror interface already exists and is initialized.
+ */
+void
+ice_destroy_mirror_interface(struct ice_softc *sc)
+{
+ struct ice_mirr_if *mif = sc->mirr_if;
+ struct ice_vsi *vsi = mif->vsi;
+ bool is_locked = false;
+ int ret;
+
+ is_locked = sx_xlocked(sc->iflib_ctx_lock);
+ if (is_locked)
+ IFLIB_CTX_UNLOCK(sc);
+
+ if (mif->ifp) {
+ ret = iflib_device_deregister(mif->subctx);
+ if (ret) {
+ device_printf(sc->dev,
+ "iflib_device_deregister for mirror interface failed: %d\n",
+ ret);
+ }
+ }
+
+ bus_topo_lock();
+ ret = device_delete_child(sc->dev, mif->subdev);
+ bus_topo_unlock();
+ if (ret) {
+ device_printf(sc->dev,
+ "device_delete_child for mirror interface failed: %d\n",
+ ret);
+ }
+
+ if (is_locked)
+ IFLIB_CTX_LOCK(sc);
+
+ if (mif->if_imap) {
+ free(mif->if_imap, M_ICE);
+ mif->if_imap = NULL;
+ }
+ if (mif->os_imap) {
+ free(mif->os_imap, M_ICE);
+ mif->os_imap = NULL;
+ }
+
+ /* These are freed via ice_subif_queues_free_subif
+ * vsi:
+ * - rx_irqvs
+ * - tx_queues
+ * - rx_queues
+ */
+ ice_release_vsi(vsi);
+
+ free(mif, M_ICE);
+ sc->mirr_if = NULL;
+
+}
+
+/**
+ * ice_setup_mirror_vsi - Initialize mirror VSI
+ * @mif: driver private data for mirror interface
+ *
+ * Allocates a VSI for a mirror interface, and sets that VSI up for use as a
+ * mirror for the main PF VSI.
+ *
+ * Returns 0 on success, or a standard error code on failure.
+ */
+static int
+ice_setup_mirror_vsi(struct ice_mirr_if *mif)
+{
+ struct ice_softc *sc = mif->back;
+ device_t dev = sc->dev;
+ struct ice_vsi *vsi;
+ int ret = 0;
+
+ /* vsi is for the new mirror vsi, not the PF's main VSI */
+ vsi = ice_alloc_vsi(sc, ICE_VSI_VMDQ2);
+ if (!vsi) {
+ /* Already prints an error message */
+ return (ENOMEM);
+ }
+ mif->vsi = vsi;
+
+ /* Reserve VSI queue allocation from PF queues */
+ ret = ice_alloc_vsi_qmap(vsi, ICE_DEFAULT_VF_QUEUES, ICE_DEFAULT_VF_QUEUES);
+ if (ret) {
+ device_printf(dev, "%s: Unable to allocate mirror VSI queue maps (%d queues): %s\n",
+ __func__, ICE_DEFAULT_VF_QUEUES, ice_err_str(ret));
+ goto release_vsi;
+ }
+ vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES;
+
+ /* Assign Tx queues from PF space */
+ ret = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap,
+ vsi->num_tx_queues);
+ if (ret) {
+ device_printf(dev, "Unable to assign mirror VSI Tx queues: %s\n",
+ ice_err_str(ret));
+ goto release_vsi;
+ }
+ /* Assign Rx queues from PF space */
+ ret = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap,
+ vsi->num_rx_queues);
+ if (ret) {
+ device_printf(dev, "Unable to assign mirror VSI Rx queues: %s\n",
+ ice_err_str(ret));
+ goto release_vsi;
+ }
+ vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED;
+ vsi->max_frame_size = ICE_MAX_FRAME_SIZE;
+
+ ret = ice_initialize_vsi(vsi);
+ if (ret) {
+ device_printf(dev, "%s: Error in ice_initialize_vsi for mirror VSI: %s\n",
+ __func__, ice_err_str(ret));
+ goto release_vsi;
+ }
+
+ /* Setup this VSI for receiving traffic */
+ ret = ice_config_rss(vsi);
+ if (ret) {
+ device_printf(dev,
+ "Unable to configure RSS for mirror VSI: %s\n",
+ ice_err_str(ret));
+ goto release_vsi;
+ }
+
+ /* Set HW rules for mirroring traffic */
+ vsi->mirror_src_vsi = sc->pf_vsi.idx;
+
+ ice_debug(&sc->hw, ICE_DBG_INIT,
+ "Configuring mirroring from VSI %d to %d\n",
+ vsi->mirror_src_vsi, vsi->idx);
+ ice_debug(&sc->hw, ICE_DBG_INIT, "(HW num: VSI %d to %d)\n",
+ ice_get_hw_vsi_num(&sc->hw, vsi->mirror_src_vsi),
+ ice_get_hw_vsi_num(&sc->hw, vsi->idx));
+
+ ret = ice_setup_vsi_mirroring(vsi);
+ if (ret) {
+ device_printf(dev,
+ "Unable to configure mirroring for VSI: %s\n",
+ ice_err_str(ret));
+ goto release_vsi;
+ }
+
+ return (0);
+
+release_vsi:
+ ice_release_vsi(vsi);
+ mif->vsi = NULL;
+ return (ret);
+}
+
+/**
+ * ice_create_mirror_interface - Initialize mirror interface
+ * @sc: driver private data
+ *
+ * Creates and sets up a mirror interface that will mirror traffic from
+ * the main PF interface. Includes a call to iflib_device_register() in order
+ * to setup necessary iflib structures for this new interface as well.
+ *
+ * If it returns successfully, a new interface will be created and will show
+ * up in the ifconfig interface list.
+ *
+ * Returns 0 on success, or a standard error code on failure.
+ */
+int
+ice_create_mirror_interface(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+ struct ice_mirr_if *mif;
+ struct ifmedia *media;
+ struct sbuf *sb;
+ int ret = 0;
+
+ mif = (struct ice_mirr_if *)malloc(sizeof(*mif), M_ICE, M_ZERO | M_NOWAIT);
+ if (!mif) {
+ device_printf(dev, "malloc() error allocating mirror interface\n");
+ return (ENOMEM);
+ }
+
+ /* Set pointers */
+ sc->mirr_if = mif;
+ mif->back = sc;
+
+ /* Do early setup because these will be called during iflib_device_register():
+ * - ice_subif_if_tx_queues_alloc
+ * - ice_subif_if_rx_queues_alloc
+ */
+ ret = ice_setup_mirror_vsi(mif);
+ if (ret)
+ goto out;
+
+ /* Determine name for new interface:
+ * (base interface name)(modifier name)(modifier unit number)
+ * e.g. for ice0 with a new mirror interface (modifier m)
+ * of index 0, this equals "ice0m0"
+ */
+ sb = sbuf_new_auto();
+ MPASS(sb != NULL);
+ sbuf_printf(sb, "%sm", device_get_nameunit(dev));
+ sbuf_finish(sb);
+
+ bus_topo_lock();
+ mif->subdev = device_add_child(dev, sbuf_data(sb), 0);
+ bus_topo_unlock();
+
+ if (!mif->subdev) {
+ device_printf(dev, "device_add_child failed for %s0\n", sbuf_data(sb));
+ sbuf_delete(sb);
+ free(mif, M_ICE);
+ sc->mirr_if = NULL;
+ return (ENOMEM);
+ }
+ sbuf_delete(sb);
+
+ device_set_driver(mif->subdev, &ice_subif_driver);
+
+ /* Use iflib_device_register() directly because the driver already
+ * has an initialized softc to pass to iflib
+ */
+ ret = iflib_device_register(mif->subdev, mif, &ice_subif_sctx, &mif->subctx);
+ if (ret)
+ goto out;
+
+ /* Indicate that created interface will be just for monitoring */
+ mif->ifp = iflib_get_ifp(mif->subctx);
+ if_setflagbits(mif->ifp, IFF_MONITOR, 0);
+
+ /* Use autoselect media by default */
+ media = iflib_get_media(mif->subctx);
+ ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(media, IFM_ETHER | IFM_AUTO);
+
+ device_printf(dev, "Created dev %s and ifnet %s for mirroring\n",
+ device_get_nameunit(mif->subdev), if_name(mif->ifp));
+
+ ice_add_vsi_sysctls(mif->vsi);
+
+ ret = ice_wire_mirror_intrs(mif);
+ if (ret)
+ goto out;
+
+ mif->if_attached = true;
+ return (0);
+
+out:
+ ice_destroy_mirror_interface(sc);
+ return (ret);
+}
+
+/**
+ * ice_wire_mirror_intrs
+ * @mif: driver private subinterface structure
+ *
+ * Helper function that sets up driver interrupt data and calls
+ * into iflib in order to setup interrupts in its data structures as well.
+ *
+ * Like ice_if_msix_intr_assign, currently requires that we get at least the same
+ * number of vectors as we have queues, and that we always have the same number
+ * of Tx and Rx queues. Unlike that function, this calls a special
+ * iflib_irq_alloc_generic_subif() function for RX interrupts because the
+ * driver needs to get MSI-X resources from the parent device.
+ *
+ * Tx queues use a softirq instead of using their own hardware interrupt so that
+ * remains unchanged.
+ *
+ * Returns 0 on success or an error code from iflib_irq_alloc_generic_subctx()
+ * on failure.
+ */
+static int
+ice_wire_mirror_intrs(struct ice_mirr_if *mif)
+{
+ struct ice_softc *sc = mif->back;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_vsi *vsi = mif->vsi;
+ device_t dev = mif->subdev;
+ int err, i, rid;
+
+ if_ctx_t ctx = mif->subctx;
+
+ ice_debug(hw, ICE_DBG_INIT, "%s: Last rid: %d\n", __func__, sc->last_rid);
+
+ rid = sc->last_rid + 1;
+ for (i = 0; i < vsi->num_rx_queues; i++, rid++) {
+ struct ice_rx_queue *rxq = &vsi->rx_queues[i];
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+ char irq_name[16];
+
+ // TODO: Change to use dynamic interface number
+ snprintf(irq_name, sizeof(irq_name), "m0rxq%d", i);
+ /* First arg is parent device (physical port's) iflib ctx */
+ err = iflib_irq_alloc_generic_subctx(sc->ctx, ctx,
+ &mif->rx_irqvs[i].irq, rid, IFLIB_INTR_RXTX, ice_msix_que,
+ rxq, rxq->me, irq_name);
+ if (err) {
+ device_printf(dev,
+ "Failed to allocate q int %d err: %s\n",
+ i, ice_err_str(err));
+ i--;
+ goto fail;
+ }
+ MPASS(rid - 1 > 0);
+ /* Set vector number used in interrupt enable/disable functions */
+ mif->rx_irqvs[i].me = rid - 1;
+ rxq->irqv = &mif->rx_irqvs[i];
+
+ bzero(irq_name, sizeof(irq_name));
+ snprintf(irq_name, sizeof(irq_name), "m0txq%d", i);
+ iflib_softirq_alloc_generic(ctx, &mif->rx_irqvs[i].irq,
+ IFLIB_INTR_TX, txq, txq->me, irq_name);
+ txq->irqv = &mif->rx_irqvs[i];
+ }
+
+ sc->last_rid = rid - 1;
+
+ ice_debug(hw, ICE_DBG_INIT, "%s: New last rid: %d\n", __func__,
+ sc->last_rid);
+
+ return (0);
+
+fail:
+ for (; i >= 0; i--)
+ iflib_irq_free(ctx, &mif->rx_irqvs[i].irq);
+ return (err);
+}
+
+/**
+ * ice_subif_rebuild - Rebuild subinterface post reset
+ * @sc: The device private softc
+ *
+ * Restore subinterface state after a reset occurred.
+ * Restart the VSI and enable the mirroring.
+ */
+static int
+ice_subif_rebuild(struct ice_softc *sc)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(sc->ctx);
+ struct ice_vsi *vsi = sc->mirr_if->vsi;
+ int err;
+
+ err = ice_subif_rebuild_vsi_qmap(sc);
+ if (err) {
+ device_printf(sc->dev, "Unable to re-assign mirror VSI queues, err %s\n",
+ ice_err_str(err));
+ return (err);
+ }
+
+ err = ice_initialize_vsi(vsi);
+ if (err) {
+ device_printf(sc->dev, "Unable to re-initialize mirror VSI, err %s\n",
+ ice_err_str(err));
+ goto err_release_queue_allocations_subif;
+ }
+
+ err = ice_config_rss(vsi);
+ if (err) {
+ device_printf(sc->dev,
+ "Unable to reconfigure RSS for the mirror VSI, err %s\n",
+ ice_err_str(err));
+ goto err_deinit_subif_vsi;
+ }
+
+ vsi->mirror_src_vsi = sc->pf_vsi.idx;
+
+ err = ice_setup_vsi_mirroring(vsi);
+ if (err) {
+ device_printf(sc->dev,
+ "Unable to configure mirroring for VSI: %s\n",
+ ice_err_str(err));
+ goto err_deinit_subif_vsi;
+ }
+
+ ice_set_state(&mif->state, ICE_STATE_SUBIF_NEEDS_REINIT);
+
+ return (0);
+
+err_deinit_subif_vsi:
+ ice_deinit_vsi(vsi);
+err_release_queue_allocations_subif:
+ ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap,
+ sc->mirr_if->num_irq_vectors);
+ ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap,
+ sc->mirr_if->num_irq_vectors);
+
+ return (err);
+}
+
+/**
+ * ice_subif_rebuild_vsi_qmap - Rebuild the mirror VSI queue mapping
+ * @sc: the device softc pointer
+ *
+ * Loops over the Tx and Rx queues for the mirror VSI and reassigns the queue
+ * mapping after a reset occurred.
+ */
+static int
+ice_subif_rebuild_vsi_qmap(struct ice_softc *sc)
+{
+ struct ice_vsi *vsi = sc->mirr_if->vsi;
+ struct ice_tx_queue *txq;
+ struct ice_rx_queue *rxq;
+ int err, i;
+
+ err = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap, sc->mirr_if->num_irq_vectors);
+ if (err) {
+ device_printf(sc->dev, "Unable to assign mirror VSI Tx queues: %s\n",
+ ice_err_str(err));
+ return (err);
+ }
+
+ err = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap, sc->mirr_if->num_irq_vectors);
+ if (err) {
+ device_printf(sc->dev, "Unable to assign mirror VSI Rx queues: %s\n",
+ ice_err_str(err));
+ goto err_release_tx_queues;
+ }
+
+ vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED;
+
+ /* Re-assign Tx queue tail pointers */
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++)
+ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
+
+ /* Re-assign Rx queue tail pointers */
+ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++)
+ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
+
+ return (0);
+
+err_release_tx_queues:
+ ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues);
+
+ return (err);
+}
+
+/**
+ * ice_subif_if_tx_queues_alloc - Allocate Tx queue memory for subinterfaces
+ * @ctx: iflib context structure
+ * @vaddrs: virtual addresses for the queue memory
+ * @paddrs: physical addresses for the queue memory
+ * @ntxqs: the number of Tx queues per set (should always be 1)
+ * @ntxqsets: the number of Tx queue sets to allocate
+ *
+ * See ice_if_tx_queues_alloc() description. Similar to that function, but
+ * for subinterfaces instead.
+ */
+static int
+ice_subif_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int __invariant_only ntxqs, int ntxqsets)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_tx_queue *txq;
+ device_t dev = mif->subdev;
+ struct ice_vsi *vsi;
+ int err, i, j;
+
+ MPASS(mif != NULL);
+ MPASS(ntxqs == 1);
+ MPASS(mif->subscctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT);
+
+ vsi = mif->vsi;
+
+ MPASS(vsi->num_tx_queues == ntxqsets);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->tx_queues =
+ (struct ice_tx_queue *)malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "%s: Unable to allocate Tx queue memory for subfunction\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ /* Allocate report status arrays */
+ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
+ if (!(txq->tx_rsq =
+ (uint16_t *)malloc(sizeof(uint16_t) * mif->subscctx->isc_ntxd[0], M_ICE, M_NOWAIT))) {
+ device_printf(dev,
+ "%s: Unable to allocate tx_rsq memory for subfunction\n", __func__);
+ err = ENOMEM;
+ goto free_tx_queues;
+ }
+ /* Initialize report status array */
+ for (j = 0; j < mif->subscctx->isc_ntxd[0]; j++)
+ txq->tx_rsq[j] = QIDX_INVALID;
}
+
+ /* Add Tx queue sysctls context */
+ ice_vsi_add_txqs_ctx(vsi);
+
+ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
+ /* q_handle == me when only one TC */
+ txq->me = txq->q_handle = i;
+ txq->vsi = vsi;
+
+ /* store the queue size for easier access */
+ txq->desc_count = mif->subscctx->isc_ntxd[0];
+
+ /* get the virtual and physical address of the hardware queues */
+ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
+ txq->tx_base = (struct ice_tx_desc *)vaddrs[i];
+ txq->tx_paddr = paddrs[i];
+
+ ice_add_txq_sysctls(txq);
+ }
+
+ return (0);
+
+free_tx_queues:
+ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
+ if (txq->tx_rsq != NULL) {
+ free(txq->tx_rsq, M_ICE);
+ txq->tx_rsq = NULL;
+ }
+ }
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+ return (err);
+}
+
+/**
+ * ice_subif_if_rx_queues_alloc - Allocate Rx queue memory for subinterfaces
+ * @ctx: iflib context structure
+ * @vaddrs: virtual addresses for the queue memory
+ * @paddrs: physical addresses for the queue memory
+ * @nrxqs: number of Rx queues per set (should always be 1)
+ * @nrxqsets: number of Rx queue sets to allocate
+ *
+ * See ice_if_rx_queues_alloc() for general summary; this is similar to that
+ * but implemented for subinterfaces.
+ */
+static int
+ice_subif_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int __invariant_only nrxqs, int nrxqsets)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_rx_queue *rxq;
+ device_t dev = mif->subdev;
+ struct ice_vsi *vsi;
+ int i;
+
+ MPASS(mif != NULL);
+ MPASS(nrxqs == 1);
+ MPASS(mif->subscctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT);
+
+ vsi = mif->vsi;
+
+ MPASS(vsi->num_rx_queues == nrxqsets);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->rx_queues =
+ (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "%s: Unable to allocate Rx queue memory for subfunction\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ /* Add Rx queue sysctls context */
+ ice_vsi_add_rxqs_ctx(vsi);
+
+ for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) {
+ rxq->me = i;
+ rxq->vsi = vsi;
+
+ /* store the queue size for easier access */
+ rxq->desc_count = mif->subscctx->isc_nrxd[0];
+
+ /* get the virtual and physical address of the hardware queues */
+ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
+ rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i];
+ rxq->rx_paddr = paddrs[i];
+
+ ice_add_rxq_sysctls(rxq);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_subif_if_msix_intr_assign - Assign MSI-X interrupts to new sub interface
+ * @ctx: the iflib context structure
+ * @msix: the number of vectors we were assigned
+ *
+ * Allocates and assigns driver private resources for MSI-X interrupt tracking.
+ *
+ * @pre OS MSI-X resources have been pre-allocated by parent interface.
+ */
+static int
+ice_subif_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+
+ device_t dev = mif->subdev;
+ int ret;
+
+ if (vsi->num_rx_queues != vsi->num_tx_queues) {
+ device_printf(dev,
+ "iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing number of Tx and Rx queues\n",
+ vsi->num_tx_queues, vsi->num_rx_queues);
+ return (EOPNOTSUPP);
+ }
+
+ if (msix > sc->extra_vectors) {
+ device_printf(dev,
+ "%s: Not enough spare (%d) msix vectors for new sub-interface requested (%d)\n",
+ __func__, sc->extra_vectors, msix);
+ return (ENOSPC);
+ }
+ device_printf(dev, "%s: Using %d vectors for sub-interface\n", __func__,
+ msix);
+
+ /* Allocate space to store the IRQ vector data */
+ mif->num_irq_vectors = vsi->num_rx_queues;
+ mif->rx_irqvs = (struct ice_irq_vector *)
+ malloc(sizeof(struct ice_irq_vector) * (mif->num_irq_vectors),
+ M_ICE, M_NOWAIT);
+ if (!mif->rx_irqvs) {
+ device_printf(dev,
+ "Unable to allocate RX irqv memory for mirror's %d vectors\n",
+ mif->num_irq_vectors);
+ return (ENOMEM);
+ }
+
+ /* Assign mirror interface interrupts from PF device space */
+ if (!(mif->if_imap =
+ (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors,
+ M_ICE, M_NOWAIT))) {
+ device_printf(dev, "Unable to allocate mirror intfc if_imap memory\n");
+ ret = ENOMEM;
+ goto free_irqvs;
+ }
+ ret = ice_resmgr_assign_contiguous(&sc->dev_imgr, mif->if_imap, mif->num_irq_vectors);
+ if (ret) {
+ device_printf(dev, "Unable to assign mirror intfc PF device interrupt mapping: %s\n",
+ ice_err_str(ret));
+ goto free_if_imap;
+ }
+ /* Assign mirror interface interrupts from OS interrupt allocation space */
+ if (!(mif->os_imap =
+ (u16 *)malloc(sizeof(u16) * mif->num_irq_vectors,
+ M_ICE, M_NOWAIT))) {
+ device_printf(dev, "Unable to allocate mirror intfc os_imap memory\n");
+ ret = ENOMEM;
+ goto free_if_imap;
+ }
+ ret = ice_resmgr_assign_contiguous(&sc->os_imgr, mif->os_imap, mif->num_irq_vectors);
+ if (ret) {
+ device_printf(dev, "Unable to assign mirror intfc OS interrupt mapping: %s\n",
+ ice_err_str(ret));
+ goto free_if_imap;
+ }
+
+ return (0);
+
+free_if_imap:
+ free(mif->if_imap, M_ICE);
+ mif->if_imap = NULL;
+free_irqvs:
+ free(mif->rx_irqvs, M_ICE);
+ mif->rx_irqvs = NULL;
+ return (ret);
+}
+
+/**
+ * ice_subif_if_intr_enable - Enable device interrupts for a subinterface
+ * @ctx: iflib context structure
+ *
+ * Called by iflib to request enabling all interrupts that belong to a
+ * subinterface.
+ */
+static void
+ice_subif_if_intr_enable(if_ctx_t ctx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ /* Do not enable queue interrupts in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ /* Enable all queue interrupts */
+ for (int i = 0; i < vsi->num_rx_queues; i++)
+ ice_enable_intr(hw, vsi->rx_queues[i].irqv->me);
+}
+
+/**
+ * ice_subif_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
+ * @ctx: iflib context structure
+ * @rxqid: the Rx queue to enable
+ *
+ * Enable a specific Rx queue interrupt.
+ *
+ * This function is not protected by the iflib CTX lock.
+ */
+static int
+ice_subif_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ /* Do not enable queue interrupts in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me);
+ return (0);
+}
+
+/**
+ * ice_subif_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
+ * @ctx: iflib context structure
+ * @txqid: the Tx queue to enable
+ *
+ * Enable a specific Tx queue interrupt.
+ *
+ * This function is not protected by the iflib CTX lock.
+ */
+static int
+ice_subif_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ /* Do not enable queue interrupts in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me);
+ return (0);
+}
+
+/**
+ * ice_subif_if_init - Initialize the subinterface
+ * @ctx: iflib ctx structure
+ *
+ * Called by iflib to bring the device up, i.e. ifconfig ice0m0 up.
+ * Prepares the Tx and Rx engines and enables interrupts.
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_subif_if_init(if_ctx_t ctx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+ device_t dev = mif->subdev;
+ int err;
+
+ if (ice_driver_is_detaching(sc))
+ return;
+
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
+ device_printf(dev,
+ "request to start interface cannot be completed as the parent device %s failed to reset\n",
+ device_get_nameunit(sc->dev));
+ return;
+ }
+
+ if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
+ device_printf(dev,
+ "request to start interface cannot be completed while parent device %s is prepared for impending reset\n",
+ device_get_nameunit(sc->dev));
+ return;
+ }
+
+ /* XXX: Equiv to ice_update_rx_mbuf_sz */
+ vsi->mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
+
+ /* Initialize software Tx tracking values */
+ ice_init_tx_tracking(vsi);
+
+ err = ice_cfg_vsi_for_tx(vsi);
+ if (err) {
+ device_printf(dev,
+ "Unable to configure subif VSI for Tx: %s\n",
+ ice_err_str(err));
+ return;
+ }
+
+ err = ice_cfg_vsi_for_rx(vsi);
+ if (err) {
+ device_printf(dev,
+ "Unable to configure subif VSI for Rx: %s\n",
+ ice_err_str(err));
+ goto err_cleanup_tx;
+ }
+
+ err = ice_control_all_rx_queues(vsi, true);
+ if (err) {
+ device_printf(dev,
+ "Unable to enable subif Rx rings for receive: %s\n",
+ ice_err_str(err));
+ goto err_cleanup_tx;
+ }
+
+ ice_configure_all_rxq_interrupts(vsi);
+ ice_configure_rx_itr(vsi);
+
+ ice_set_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED);
+ return;
+
+err_cleanup_tx:
+ ice_vsi_disable_tx(vsi);
+}
+
+/**
+ * ice_if_stop_subif - Stop the subinterface
+ * @ctx: iflib context structure
+ * @ifs: subinterface context structure
+ *
+ * Called by iflib to stop the subinterface and bring it down.
+ * (e.g. ifconfig ice0m0 down)
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_subif_if_stop(if_ctx_t ctx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+ device_t dev = mif->subdev;
+
+ if (!ice_testandclear_state(&mif->state, ICE_STATE_DRIVER_INITIALIZED))
+ return;
+
+ if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
+ device_printf(dev,
+ "request to stop interface cannot be completed as the parent device %s failed to reset\n",
+ device_get_nameunit(sc->dev));
+ return;
+ }
+
+ if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
+ device_printf(dev,
+ "request to stop interface cannot be completed while parent device %s is prepared for impending reset\n",
+ device_get_nameunit(sc->dev));
+ return;
+ }
+
+ /* Dissociate the Tx and Rx queues from the interrupts */
+ ice_flush_txq_interrupts(vsi);
+ ice_flush_rxq_interrupts(vsi);
+
+ /* Disable the Tx and Rx queues */
+ ice_vsi_disable_tx(vsi);
+ ice_control_all_rx_queues(vsi, false);
+}
+
+/**
+ * ice_free_irqvs_subif - Free IRQ vector memory for subinterfaces
+ * @mif: Mirror interface private structure
+ *
+ * Free IRQ vector memory allocated during ice_subif_if_msix_intr_assign.
+ */
+static void
+ice_free_irqvs_subif(struct ice_mirr_if *mif)
+{
+ struct ice_softc *sc = mif->back;
+ struct ice_vsi *vsi = mif->vsi;
+ if_ctx_t ctx = sc->ctx;
+ int i;
+
+ /* If the irqvs array is NULL, then there are no vectors to free */
+ if (mif->rx_irqvs == NULL)
+ return;
+
+ /* Free the IRQ vectors -- currently subinterfaces have number
+ * of vectors equal to number of RX queues
+ *
+ * XXX: ctx is parent device's ctx, not the subinterface ctx
+ */
+ for (i = 0; i < vsi->num_rx_queues; i++)
+ iflib_irq_free(ctx, &mif->rx_irqvs[i].irq);
+
+ ice_resmgr_release_map(&sc->os_imgr, mif->os_imap,
+ mif->num_irq_vectors);
+ ice_resmgr_release_map(&sc->dev_imgr, mif->if_imap,
+ mif->num_irq_vectors);
+
+ sc->last_rid -= vsi->num_rx_queues;
+
+ /* Clear the irqv pointers */
+ for (i = 0; i < vsi->num_rx_queues; i++)
+ vsi->rx_queues[i].irqv = NULL;
+
+ for (i = 0; i < vsi->num_tx_queues; i++)
+ vsi->tx_queues[i].irqv = NULL;
+
+ /* Release the vector array memory */
+ free(mif->rx_irqvs, M_ICE);
+ mif->rx_irqvs = NULL;
+}
+
+/**
+ * ice_subif_if_queues_free - Free queue memory for subinterfaces
+ * @ctx: the iflib context structure
+ *
+ * Free queue memory allocated by ice_subif_tx_queues_alloc() and
+ * ice_subif_if_rx_queues_alloc().
+ */
+static void
+ice_subif_if_queues_free(if_ctx_t ctx)
+{
+ struct ice_mirr_if *mif = (struct ice_mirr_if *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = mif->vsi;
+ struct ice_tx_queue *txq;
+ int i;
+
+ /* Free the Tx and Rx sysctl contexts, and assign NULL to the node
+ * pointers.
+ */
+ ice_vsi_del_txqs_ctx(vsi);
+ ice_vsi_del_rxqs_ctx(vsi);
+
+ /* Release MSI-X IRQ vectors */
+ ice_free_irqvs_subif(mif);
+
+ if (vsi->tx_queues != NULL) {
+ /* free the tx_rsq arrays */
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
+ if (txq->tx_rsq != NULL) {
+ free(txq->tx_rsq, M_ICE);
+ txq->tx_rsq = NULL;
+ }
+ }
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+ }
+ if (vsi->rx_queues != NULL) {
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+ }
+}
+
+/**
+ * ice_subif_if_media_status - Report subinterface media
+ * @ctx: iflib context structure
+ * @ifmr: ifmedia request structure to update
+ *
+ * Updates the provided ifmr with something, in order to prevent a
+ * "no media types?" message from ifconfig.
+ *
+ * Mirror interfaces are always up.
+ */
+static void
+ice_subif_if_media_status(if_ctx_t ctx __unused, struct ifmediareq *ifmr)
+{
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
+}
+
+/**
+ * ice_subif_if_promisc_set - Set subinterface promiscuous mode
+ * @ctx: iflib context structure
+ * @flags: promiscuous flags to configure
+ *
+ * Called by iflib to configure device promiscuous mode.
+ *
+ * @remark This does not need to be implemented for now.
+ */
+static int
+ice_subif_if_promisc_set(if_ctx_t ctx __unused, int flags __unused)
+{
+ return (0);
}
diff --git a/sys/dev/ice/virtchnl_inline_ipsec.h b/sys/dev/ice/virtchnl_inline_ipsec.h
deleted file mode 100644
index adcd9f34c00f..000000000000
--- a/sys/dev/ice/virtchnl_inline_ipsec.h
+++ /dev/null
@@ -1,594 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2024, Intel Corporation
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VIRTCHNL_INLINE_IPSEC_H_
-#define _VIRTCHNL_INLINE_IPSEC_H_
-
-#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3
-#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16
-#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128
-#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
-#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
-#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
-#define VIRTCHNL_IPSEC_SA_DESTROY 0
-#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
-#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF
-#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF
-#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF
-
-/* crypto type */
-#define VIRTCHNL_AUTH 1
-#define VIRTCHNL_CIPHER 2
-#define VIRTCHNL_AEAD 3
-
-/* caps enabled */
-#define VIRTCHNL_IPSEC_ESN_ENA BIT(0)
-#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1)
-#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2)
-#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3)
-#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4)
-#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5)
-#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6)
-#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7)
-
-/* algorithm type */
-/* Hash Algorithm */
-#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
-#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
-#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
-#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
-#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */
-#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */
-#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */
-#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */
-#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */
-#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */
-#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */
-#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */
-#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */
-#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
-#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
-/* Cipher Algorithm */
-#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */
-#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */
-#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */
-#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */
-/* AEAD Algorithm */
-#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */
-#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */
-#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
-
-/* protocol type */
-#define VIRTCHNL_PROTO_ESP 1
-#define VIRTCHNL_PROTO_AH 2
-#define VIRTCHNL_PROTO_RSVD1 3
-
-/* sa mode */
-#define VIRTCHNL_SA_MODE_TRANSPORT 1
-#define VIRTCHNL_SA_MODE_TUNNEL 2
-#define VIRTCHNL_SA_MODE_TRAN_TUN 3
-#define VIRTCHNL_SA_MODE_UNKNOWN 4
-
-/* sa direction */
-#define VIRTCHNL_DIR_INGRESS 1
-#define VIRTCHNL_DIR_EGRESS 2
-#define VIRTCHNL_DIR_INGRESS_EGRESS 3
-
-/* sa termination */
-#define VIRTCHNL_TERM_SOFTWARE 1
-#define VIRTCHNL_TERM_HARDWARE 2
-
-/* sa ip type */
-#define VIRTCHNL_IPV4 1
-#define VIRTCHNL_IPV6 2
-
-/* for virtchnl_ipsec_resp */
-enum inline_ipsec_resp {
- INLINE_IPSEC_SUCCESS = 0,
- INLINE_IPSEC_FAIL = -1,
- INLINE_IPSEC_ERR_FIFO_FULL = -2,
- INLINE_IPSEC_ERR_NOT_READY = -3,
- INLINE_IPSEC_ERR_VF_DOWN = -4,
- INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
- INLINE_IPSEC_ERR_NO_MEM = -6,
-};
-
-/* Detailed opcodes for DPDK and IPsec use */
-enum inline_ipsec_ops {
- INLINE_IPSEC_OP_GET_CAP = 0,
- INLINE_IPSEC_OP_GET_STATUS = 1,
- INLINE_IPSEC_OP_SA_CREATE = 2,
- INLINE_IPSEC_OP_SA_UPDATE = 3,
- INLINE_IPSEC_OP_SA_DESTROY = 4,
- INLINE_IPSEC_OP_SP_CREATE = 5,
- INLINE_IPSEC_OP_SP_DESTROY = 6,
- INLINE_IPSEC_OP_SA_READ = 7,
- INLINE_IPSEC_OP_EVENT = 8,
- INLINE_IPSEC_OP_RESP = 9,
-};
-
-#pragma pack(1)
-/* Not all valid, if certain field is invalid, set 1 for all bits */
-struct virtchnl_algo_cap {
- u32 algo_type;
-
- u16 block_size;
-
- u16 min_key_size;
- u16 max_key_size;
- u16 inc_key_size;
-
- u16 min_iv_size;
- u16 max_iv_size;
- u16 inc_iv_size;
-
- u16 min_digest_size;
- u16 max_digest_size;
- u16 inc_digest_size;
-
- u16 min_aad_size;
- u16 max_aad_size;
- u16 inc_aad_size;
-};
-#pragma pack()
-
-/* vf record the capability of crypto from the virtchnl */
-struct virtchnl_sym_crypto_cap {
- u8 crypto_type;
- u8 algo_cap_num;
- struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
-};
-
-/* VIRTCHNL_OP_GET_IPSEC_CAP
- * VF pass virtchnl_ipsec_cap to PF
- * and PF return capability of ipsec from virtchnl.
- */
-#pragma pack(1)
-struct virtchnl_ipsec_cap {
- /* max number of SA per VF */
- u16 max_sa_num;
-
- /* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
- u8 virtchnl_protocol_type;
-
- /* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
- u8 virtchnl_sa_mode;
-
- /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
- u8 virtchnl_direction;
-
- /* termination mode - value ref VIRTCHNL_TERM_XXX */
- u8 termination_mode;
-
- /* number of supported crypto capability */
- u8 crypto_cap_num;
-
- /* descriptor ID */
- u16 desc_id;
-
- /* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
- u32 caps_enabled;
-
- /* crypto capabilities */
- struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
-};
-
-/* configuration of crypto function */
-struct virtchnl_ipsec_crypto_cfg_item {
- u8 crypto_type;
-
- u32 algo_type;
-
- /* Length of valid IV data. */
- u16 iv_len;
-
- /* Length of digest */
- u16 digest_len;
-
- /* SA salt */
- u32 salt;
-
- /* The length of the symmetric key */
- u16 key_len;
-
- /* key data buffer */
- u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
-};
-#pragma pack()
-
-struct virtchnl_ipsec_sym_crypto_cfg {
- struct virtchnl_ipsec_crypto_cfg_item
- items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
-};
-
-#pragma pack(1)
-/* VIRTCHNL_OP_IPSEC_SA_CREATE
- * VF send this SA configuration to PF using virtchnl;
- * PF create SA as configuration and PF driver will return
- * an unique index (sa_idx) for the created SA.
- */
-struct virtchnl_ipsec_sa_cfg {
- /* IPsec SA Protocol - AH/ESP */
- u8 virtchnl_protocol_type;
-
- /* termination mode - value ref VIRTCHNL_TERM_XXX */
- u8 virtchnl_termination;
-
- /* type of outer IP - IPv4/IPv6 */
- u8 virtchnl_ip_type;
-
- /* type of esn - !0:enable/0:disable */
- u8 esn_enabled;
-
- /* udp encap - !0:enable/0:disable */
- u8 udp_encap_enabled;
-
- /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
- u8 virtchnl_direction;
-
- /* reserved */
- u8 reserved1;
-
- /* SA security parameter index */
- u32 spi;
-
- /* outer src ip address */
- u8 src_addr[16];
-
- /* outer dst ip address */
- u8 dst_addr[16];
-
- /* SPD reference. Used to link an SA with its policy.
- * PF drivers may ignore this field.
- */
- u16 spd_ref;
-
- /* high 32 bits of esn */
- u32 esn_hi;
-
- /* low 32 bits of esn */
- u32 esn_low;
-
- /* When enabled, sa_index must be valid */
- u8 sa_index_en;
-
- /* SA index when sa_index_en is true */
- u32 sa_index;
-
- /* auditing mode - enable/disable */
- u8 audit_en;
-
- /* lifetime byte limit - enable/disable
- * When enabled, byte_limit_hard and byte_limit_soft
- * must be valid.
- */
- u8 byte_limit_en;
-
- /* hard byte limit count */
- u64 byte_limit_hard;
-
- /* soft byte limit count */
- u64 byte_limit_soft;
-
- /* drop on authentication failure - enable/disable */
- u8 drop_on_auth_fail_en;
-
- /* anti-reply window check - enable/disable
- * When enabled, arw_size must be valid.
- */
- u8 arw_check_en;
-
- /* size of arw window, offset by 1. Setting to 0
- * represents ARW window size of 1. Setting to 127
- * represents ARW window size of 128
- */
- u8 arw_size;
-
- /* no ip offload mode - enable/disable
- * When enabled, ip type and address must not be valid.
- */
- u8 no_ip_offload_en;
-
- /* SA Domain. Used to logical separate an SADB into groups.
- * PF drivers supporting a single group ignore this field.
- */
- u16 sa_domain;
-
- /* crypto configuration */
- struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
-};
-#pragma pack()
-
-/* VIRTCHNL_OP_IPSEC_SA_UPDATE
- * VF send configuration of index of SA to PF
- * PF will update SA according to configuration
- */
-struct virtchnl_ipsec_sa_update {
- u32 sa_index; /* SA to update */
- u32 esn_hi; /* high 32 bits of esn */
- u32 esn_low; /* low 32 bits of esn */
-};
-
-#pragma pack(1)
-/* VIRTCHNL_OP_IPSEC_SA_DESTROY
- * VF send configuration of index of SA to PF
- * PF will destroy SA according to configuration
- * flag bitmap indicate all SA or just selected SA will
- * be destroyed
- */
-struct virtchnl_ipsec_sa_destroy {
- /* All zero bitmap indicates all SA will be destroyed.
- * Non-zero bitmap indicates the selected SA in
- * array sa_index will be destroyed.
- */
- u8 flag;
-
- /* selected SA index */
- u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
-};
-
-/* VIRTCHNL_OP_IPSEC_SA_READ
- * VF send this SA configuration to PF using virtchnl;
- * PF read SA and will return configuration for the created SA.
- */
-struct virtchnl_ipsec_sa_read {
- /* SA valid - invalid/valid */
- u8 valid;
-
- /* SA active - inactive/active */
- u8 active;
-
- /* SA SN rollover - not_rollover/rollover */
- u8 sn_rollover;
-
- /* IPsec SA Protocol - AH/ESP */
- u8 virtchnl_protocol_type;
-
- /* termination mode - value ref VIRTCHNL_TERM_XXX */
- u8 virtchnl_termination;
-
- /* auditing mode - enable/disable */
- u8 audit_en;
-
- /* lifetime byte limit - enable/disable
- * When set to limit, byte_limit_hard and byte_limit_soft
- * must be valid.
- */
- u8 byte_limit_en;
-
- /* hard byte limit count */
- u64 byte_limit_hard;
-
- /* soft byte limit count */
- u64 byte_limit_soft;
-
- /* drop on authentication failure - enable/disable */
- u8 drop_on_auth_fail_en;
-
- /* anti-replay window check - enable/disable
- * When set to check, arw_size, arw_top, and arw must be valid
- */
- u8 arw_check_en;
-
- /* size of arw window, offset by 1. Setting to 0
- * represents ARW window size of 1. Setting to 127
- * represents ARW window size of 128
- */
- u8 arw_size;
-
- /* reserved */
- u8 reserved1;
-
- /* top of anti-replay-window */
- u64 arw_top;
-
- /* anti-replay-window */
- u8 arw[16];
-
- /* packets processed */
- u64 packets_processed;
-
- /* bytes processed */
- u64 bytes_processed;
-
- /* packets dropped */
- u32 packets_dropped;
-
- /* authentication failures */
- u32 auth_fails;
-
- /* ARW check failures */
- u32 arw_fails;
-
- /* type of esn - enable/disable */
- u8 esn;
-
- /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
- u8 virtchnl_direction;
-
- /* SA security parameter index */
- u32 spi;
-
- /* SA salt */
- u32 salt;
-
- /* high 32 bits of esn */
- u32 esn_hi;
-
- /* low 32 bits of esn */
- u32 esn_low;
-
- /* SA Domain. Used to logical separate an SADB into groups.
- * PF drivers supporting a single group ignore this field.
- */
- u16 sa_domain;
-
- /* SPD reference. Used to link an SA with its policy.
- * PF drivers may ignore this field.
- */
- u16 spd_ref;
-
- /* crypto configuration. Salt and keys are set to 0 */
- struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
-};
-#pragma pack()
-
-/* Add allowlist entry in IES */
-struct virtchnl_ipsec_sp_cfg {
- u32 spi;
- u32 dip[4];
-
- /* Drop frame if true or redirect to QAT if false. */
- u8 drop;
-
- /* Congestion domain. For future use. */
- u8 cgd;
-
- /* 0 for IPv4 table, 1 for IPv6 table. */
- u8 table_id;
-
- /* Set TC (congestion domain) if true. For future use. */
- u8 set_tc;
-
- /* 0 for NAT-T unsupported, 1 for NAT-T supported */
- u8 is_udp;
-
- /* reserved */
- u8 reserved;
-
- /* NAT-T UDP port number. Only valid in case NAT-T supported */
- u16 udp_port;
-};
-
-#pragma pack(1)
-/* Delete allowlist entry in IES */
-struct virtchnl_ipsec_sp_destroy {
- /* 0 for IPv4 table, 1 for IPv6 table. */
- u8 table_id;
- u32 rule_id;
-};
-#pragma pack()
-
-/* Response from IES to allowlist operations */
-struct virtchnl_ipsec_sp_cfg_resp {
- u32 rule_id;
-};
-
-struct virtchnl_ipsec_sa_cfg_resp {
- u32 sa_handle;
-};
-
-#define INLINE_IPSEC_EVENT_RESET 0x1
-#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2
-#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4
-
-struct virtchnl_ipsec_event {
- u32 ipsec_event_data;
-};
-
-#define INLINE_IPSEC_STATUS_AVAILABLE 0x1
-#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2
-
-struct virtchnl_ipsec_status {
- u32 status;
-};
-
-struct virtchnl_ipsec_resp {
- u32 resp;
-};
-
-/* Internal message descriptor for VF <-> IPsec communication */
-struct inline_ipsec_msg {
- u16 ipsec_opcode;
- u16 req_id;
-
- union {
- /* IPsec request */
- struct virtchnl_ipsec_sa_cfg sa_cfg[0];
- struct virtchnl_ipsec_sp_cfg sp_cfg[0];
- struct virtchnl_ipsec_sa_update sa_update[0];
- struct virtchnl_ipsec_sa_destroy sa_destroy[0];
- struct virtchnl_ipsec_sp_destroy sp_destroy[0];
-
- /* IPsec response */
- struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
- struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
- struct virtchnl_ipsec_cap ipsec_cap[0];
- struct virtchnl_ipsec_status ipsec_status[0];
- /* response to del_sa, del_sp, update_sa */
- struct virtchnl_ipsec_resp ipsec_resp[0];
-
- /* IPsec event (no req_id is required) */
- struct virtchnl_ipsec_event event[0];
-
- /* Reserved */
- struct virtchnl_ipsec_sa_read sa_read[0];
- } ipsec_data;
-};
-
-static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
-{
- u16 valid_len = sizeof(struct inline_ipsec_msg);
-
- switch (opcode) {
- case INLINE_IPSEC_OP_GET_CAP:
- case INLINE_IPSEC_OP_GET_STATUS:
- break;
- case INLINE_IPSEC_OP_SA_CREATE:
- valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
- break;
- case INLINE_IPSEC_OP_SP_CREATE:
- valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
- break;
- case INLINE_IPSEC_OP_SA_UPDATE:
- valid_len += sizeof(struct virtchnl_ipsec_sa_update);
- break;
- case INLINE_IPSEC_OP_SA_DESTROY:
- valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
- break;
- case INLINE_IPSEC_OP_SP_DESTROY:
- valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
- break;
- /* Only for msg length caculation of response to VF in case of
- * inline ipsec failure.
- */
- case INLINE_IPSEC_OP_RESP:
- valid_len += sizeof(struct virtchnl_ipsec_resp);
- break;
- default:
- valid_len = 0;
- break;
- }
-
- return valid_len;
-}
-
-#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
diff --git a/sys/dev/ice/virtchnl_lan_desc.h b/sys/dev/ice/virtchnl_lan_desc.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/sys/dev/ice/virtchnl_lan_desc.h
+++ /dev/null